prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
#!/usr/bin/env python
import os
import salem
from glob import glob
import pandas as pd
import numpy as np
import wrf
import netCDF4 as nc
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import seaborn as sns
from IQR_plot import plot_day_clm
# load map settings from wps namelist
fig_map, ax_map = plt.subplots(1, 1)
fpath = '../wrf_input/London-3domains/namelist.wps-london-3'
g, maps = salem.geogrid_simulator(fpath)
maps[0].set_rgb(natural_earth='hr')
maps[0].visualize(ax=ax_map, title='Domains')
# export the figure
fig_map.set_size_inches(6, 6)
fig_map.savefig('fig/map.png')
# load WRF results
fl_WRF = sorted(glob('wrfout*'))
ds = salem.open_mf_wrf_dataset(fl_WRF)
dswrf = nc.Dataset(fl_WRF[0])
x_pos, y_pos = wrf.ll_to_xy(dswrf, latitude=[51.], longitude=[-0.1])
# ds.HFX[:, y_pos, x_pos].time
# ds.HFX[:, y_pos, x_pos].plot.line(add_legend=True)
ds_sel = ds[['HFX', 'LH', 'GRDFLX', 'SWDOWN', 'GLW']].resample(
time='1h', label='left').mean()
# Facet plotting
ds_grp_clm = ds_sel.HFX.load().groupby('time.hour').median(axis=0)
fig_spatial_clm = ds_grp_clm.plot(
x='west_east', y='south_north', col='hour',
col_wrap=4, robust=True).fig
fig_spatial_clm.savefig('figures/QH_map.pdf')
# print ds_sel.resample.__doc__
da_sel = ds_sel.to_array(name='flux')
da_sel_pos = da_sel[:, :, y_pos, x_pos]
df_sel_pos = da_sel_pos.to_pandas().T
# plotting SEB components
da_sel_pos.loc[['HFX', 'LH', 'GRDFLX'], :].plot(x='time', hue='variable')
fig_flx_ts = da_sel_pos.loc[['HFX', 'LH'], :].plot(
x='time', hue='variable')[0].figure
fig_flx_ts.axes[0].set_title('')
fig_flx_ts.tight_layout()
fig_flx_ts.savefig('QH-QE.pdf')
# climatology of diurnal cycles
grp_sel_pos_clm = df_sel_pos[['HFX', 'LH']].groupby(
[df_sel_pos.index.hour.rename('hr'),
df_sel_pos.index.minute.rename('min')])
# id_30min = pd.timedelta_range(start='0. hr', periods=48, freq='30min')
idx = | pd.date_range('20140101', '20140101T23:30', periods=48) | pandas.date_range |
#!/usr/bin/python
""" Working through blaze tutorial, nothing to see here """
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import glob
import pandas as pd
import blaze as bl
from odo import odo
def blaze_tutorial():
accounts = bl.Symbol('accounts',
'var * {id: int, name: string, amount: int}')
deadbeats = accounts[accounts.amount < 0].name
list_ = [[1, 'Alice', 100],
[2, 'Bob', -200],
[3, 'Charlie', 300],
[4, 'Denis', 400],
[5, 'Edith', -500]]
print(list(bl.compute(deadbeats, list_)))
df_ = bl.DataFrame(list_, columns=['id', 'name', 'amount'])
print(bl.compute(deadbeats, df_))
bl_df_dir = dir(df_)
df_ = | pd.DataFrame(list_, columns=['id', 'name', 'amount']) | pandas.DataFrame |
# To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %%
import pandas as pd
import glob
import os
# %%
home=os.path.dirname(__file__)+"/../"
# %%
df = pd.read_csv(home+'/COVID-19/dati-province/dpc-covid19-ita-province.csv')
provdata = pd.read_csv(home+'/other_info/provinceData.csv')
regdata = pd.read_csv(home+'/other_info/regionData.csv')
# %%
# rename columns
df = df.rename(columns={
'stato': 'Country',
'codice_regione': 'Region Code',
'denominazione_regione': 'Region',
'codice_provincia': 'Province Code',
'denominazione_provincia': 'Province',
'sigla_provincia': 'Province Abbreviation',
'totale_casi': 'Total Cases'
})
df = df.astype({
'Total Cases':'Int32'
})
provdata = provdata.astype({
'Population':'Int32'
})
# %%
df['Last Update'] = pd.to_datetime(df['data'])
df['Date'] = pd.to_datetime(df['data']).dt.floor('D')
# %%
# Previous Total Cases Previous Total Deaths Previous Total Recovered Previous Total Tests
prev = df[['Date','Region','Province','Total Cases']].\
rename(columns={'Total Cases':'Prev Total Cases'})
prev['Date'] = prev['Date']+pd.to_timedelta(1,unit='D')
prev2 = df[['Date','Region','Province','Total Cases']].\
rename(columns={'Total Cases':'Prev2 Total Cases'})
prev2['Date'] = prev2['Date']+pd.to_timedelta(2,unit='D')
prev3 = df[['Date','Region','Province','Total Cases']].\
rename(columns={'Total Cases':'Prev3 Total Cases'})
prev3['Date'] = prev3['Date']+pd.to_timedelta(3,unit='D')
prev7 = df[['Date','Region','Province','Total Cases']].\
rename(columns={'Total Cases':'Prev7 Total Cases'})
prev7['Date'] = prev7['Date']+ | pd.to_timedelta(7,unit='D') | pandas.to_timedelta |
# -*- coding: utf-8 -*-
"""
Module doc string
"""
import pathlib
import re
import json
from datetime import datetime
import flask
import dash
import dash_table
import matplotlib.colors as mcolors
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objs as go
import plotly.express as px
import pandas as pd
import copy
import numpy as np
from precomputing import add_stopwords
from dash.dependencies import Output, Input, State
from dateutil import relativedelta
from wordcloud import WordCloud, STOPWORDS
from ldacomplaints import lda_analysis
from sklearn.manifold import TSNE
import pandas as pd
layoutt = dict(
autosize=True,
height=800,
barmode='stack',
template="plotly_white",
font=dict(color='#000000'),
titlefont=dict(color='#000000', size='14'),
margin=dict(
l=35,
r=35,
b=35,
t=45
),
hovermode="closest",
# plot_bgcolor="#191A1A",
# paper_bgcolor="#020202",
legend=dict(font=dict(size=10), orientation='h'),
title='Nummber of fake news vs New COVID case (thousands)',
)
data_df = | pd.read_csv('new_cases.csv') | pandas.read_csv |
import os, sys, re, json
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from tqdm import tqdm
import sklearn.metrics as metrics
import warnings
warnings.filterwarnings('ignore')
"""""""""""""""""""""""""""""""""""""""""
# Encoding and Build Dataset
"""""""""""""""""""""""""""""""""""""""""
def BLOSUM62Encoder(seq: str, length: int, padding: bool) -> np.array:
dict_map = {
"A" : [4, -1, -2, -2, 0, -1, -1, 0, -2, -1, -1, -1, -1, -2, -1, 1, 0, -3, -2, 0, -2, -1, 0, -4],
"R" : [-1, 5, 0, -2, -3, 1, 0, -2, 0, -3, -2, 2, -1, -3, -2, -1, -1, -3, -2, -3, -1, 0, -1, -4],
"N" : [-2, 0, 6, 1, -3, 0, 0, 0, 1, -3, -3, 0, -2, -3, -2, 1, 0, -4, -2, -3, 3, 0, -1, -4],
"D" : [-2, -2, 1, 6, -3, 0, 2, -1, -1, -3, -4, -1, -3, -3, -1, 0, -1, -4, -3, -3, 4, 1, -1, -4],
"C" : [0, -3, -3, -3, 9, -3, -4, -3, -3, -1, -1, -3, -1, -2, -3, -1, -1, -2, -2, -1, -3, -3, -2, -4],
"Q" : [-1, 1, 0, 0, -3, 5, 2, -2, 0, -3, -2, 1, 0, -3, -1, 0, -1, -2, -1, -2, 0, 3, -1, -4],
"E" : [-1, 0, 0, 2, -4, 2, 5, -2, 0, -3, -3, 1, -2, -3, -1, 0, -1, -3, -2, -2, 1, 4, -1, -4],
"G" : [0, -2, 0, -1, -3, -2, -2, 6, -2, -4, -4, -2, -3, -3, -2, 0, -2, -2, -3, -3, -1, -2, -1, -4],
"H" : [-2, 0, 1, -1, -3, 0, 0, -2, 8, -3, -3, -1, -2, -1, -2, -1, -2, -2, 2, -3, 0, 0, -1, -4],
"I" : [-1, -3, -3, -3, -1, -3, -3, -4, -3, 4, 2, -3, 1, 0, -3, -2, -1, -3, -1, 3, -3, -3, -1, -4],
"L" : [-1, -2, -3, -4, -1, -2, -3, -4, -3, 2, 4, -2, 2, 0, -3, -2, -1, -2, -1, 1, -4, -3, -1, -4],
"K" : [-1, 2, 0, -1, -3, 1, 1, -2, -1, -3, -2, 5, -1, -3, -1, 0, -1, -3, -2, -2, 0, 1, -1, -4],
"M" : [-1, -1, -2, -3, -1, 0, -2, -3, -2, 1, 2, -1, 5, 0, -2, -1, -1, -1, -1, 1, -3, -1, -1, -4],
"F" : [-2, -3, -3, -3, -2, -3, -3, -3, -1, 0, 0, -3, 0, 6, -4, -2, -2, 1, 3, -1, -3, -3, -1, -4],
"P" : [-1, -2, -2, -1, -3, -1, -1, -2, -2, -3, -3, -1, -2, -4, 7, -1, -1, -4, -3, -2, -2, -1, -2, -4],
"S" : [1, -1, 1, 0, -1, 0, 0, 0, -1, -2, -2, 0, -1, -2, -1, 4, 1, -3, -2, -2, 0, 0, 0, -4],
"T" : [0, -1, 0, -1, -1, -1, -1, -2, -2, -1, -1, -1, -1, -2, -1, 1, 5, -2, -2, 0, -1, -1, 0, -4],
"W" : [-3, -3, -4, -4, -2, -2, -3, -2, -2, -3, -2, -3, -1, 1, -4, -3, -2, 11, 2, -3, -4, -3, -2, -4],
"Y" : [-2, -2, -2, -3, -2, -1, -2, -3, 2, -1, -1, -2, -1, 3, -3, -2, -2, 2, 7, -1, -3, -2, -1, -4],
"V" : [0, -3, -3, -3, -1, -2, -2, -3, -3, 3, 1, -2, 1, -1, -2, -2, 0, -3, -1, 4, -3, -2, -1, -4],
"B" : [-2, -1, 3, 4, -3, 0, 1, -1, 0, -3, -4, 0, -3, -3, -2, 0, -1, -4, -3, -3, 4, 1, -1, -4],
"Z" : [-1, 0, 0, 1, -3, 3, 4, -2, 0, -3, -3, 1, -1, -3, -1, 0, -1, -3, -2, -2, 1, 4, -1, -4],
"X" : [0, -1, -1, -1, -2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -2, 0, 0, -2, -1, -1, -1, -1, -1, -4],
"." : [-4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, 1],
"U" : [-4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, 1]
}
if padding == False:
length = len(seq)
arr = np.tile(np.array(dict_map["."]), (length, 1)).T
for idx in range(len(seq)):
arr[:, idx] = dict_map[seq[idx]]
return arr
def OneHotEncoder(seq: str, length: int, padding: bool) -> np.array:
dict_map = {
'A': [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'C': [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'D': [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'E': [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'F': [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'G': [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'H': [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'I': [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'K': [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'L': [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'M': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'N': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'P': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
'Q': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
'R': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
'S': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
'T': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
'V': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
'W': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
'Y': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
'.': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]
}
if padding == False:
length = len(seq)
arr = np.zeros((21, length))
for idx in range(length):
try:
arr[:, idx] = dict_map[seq[idx]]
except:
arr[:, idx] = dict_map["."]
return arr
def BuildDataset(df, encoding_method, epitope_length, with_label=True):
x_list, y_list = list(), list()
for idx, row in tqdm(df.iterrows(), total=df.shape[0], leave=False):
# epitope encoding
if encoding_method == "onehot":
epitope_encode = OneHotEncoder(row.sequence, epitope_length, True)
elif encoding_method == "blosum":
epitope_encode = BLOSUM62Encoder(row.sequence, epitope_length, True)
else:
print("wrong epitope encoding method")
return None
# x = epitpoe_encode
x_list.append(epitope_encode)
# y = [idx, classification_value, regression_value]
if with_label:
try:
y_list.append([idx, row.bind, row.value])
except:
y_list.append([idx, row.bind])
else:
y_list.append([idx])
x_tensor = torch.FloatTensor(x_list)
y_tensor = torch.FloatTensor(y_list)
dataset = torch.utils.data.TensorDataset(x_tensor, y_tensor)
return dataset
"""""""""""""""""""""""""""""""""""""""""
# Data for Training
"""""""""""""""""""""""""""""""""""""""""
class Data():
def __init__(self, df_file, dataset_file, batch_size, shuffle, decoy_times=None):
self.df_file = df_file
self.dataset_file = dataset_file
# list for [hit, decoy]
if type(self.df_file) == list:
df_list = list()
for f in self.df_file:
df_list.append(pd.read_csv(f, index_col=0))
self.df = pd.concat(df_list)
else:
self.df = | pd.read_csv(self.df_file, index_col=0) | pandas.read_csv |
import pandas as pd
import numpy as np
from sklearn.base import BaseEstimator
def modified_fillna(arr, fillers):
ser = pd.Series(arr)
filler = np.random.choice(fillers, ser.size)
return np.where(ser.isnull(), filler, ser.values)
class MelaClassifier(BaseEstimator):
def __init__(self, weights, low_lim, up_lim):
self.var = None
self.low_lim = low_lim
self.up_lim = up_lim
self.weights = np.array(weights)
def preprocess(self, data):
data = | pd.DataFrame(data) | pandas.DataFrame |
import pandas as pd
import numpy as np
import datetime as dt
# read in data
stop_times = | pd.read_csv('apcdata_year.filtered.csv') | pandas.read_csv |
# ==============================================================================
# purpose: follow along to the 10 minute tutorial on pandas:
# http://pandas.pydata.org/pandas-docs/version/0.15.2/10min.html
# author: <NAME>
# created: 12/4/15
# revised:
# comments:
#==============================================================================
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
s = | pd.Series([1, 2, 3, np.nan, np.nan, 7]) | pandas.Series |
# This function to classify the data in real time
import argparse
import pandas as pd
import numpy as np
import os
import random
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from scipy import stats
import pathlib
import DS_pkt_near_realtime_run_ml as run_ml
import settings
def read_data(dataset_dir):
df_list = []
video_list = os.listdir(dataset_dir)
for video in video_list:
if video == '.DS_Store':
continue
video_path = pathlib.Path(str(dataset_dir) + '/' + video)
t_df = pd.read_csv(video_path)
if len(t_df) > 0:
df_list.append(t_df)
return df_list
def split_to_train_test(input_path, random_seed, platform):
print('split for xgboost')
loc_1 = 1
bw_nc = 0
loc = loc_1
bw = bw_nc
dataset_list = os.listdir(input_path)
# read all the data under given dataset
df_list = []
for dataset_ind, dataset in enumerate(dataset_list):
if dataset == '.DS_Store':
continue
conditions = dataset.split('_')
if int(conditions[1]) == loc and int(conditions[3]) == bw:
condition_path = input_path + '/' + dataset
temp_df_list = read_data(condition_path)
# select for the given platform
for i in range(len(temp_df_list)):
if temp_df_list[i].loc[0, 'Vid_pltform'] == platform:
df_list.append(temp_df_list[i])
# split to train and test dataset
train_list = []
test_list = []
all_videos = np.arange(0, 51, 1)
np.random.seed(random_seed)
test_ind = np.random.choice(all_videos, size=15, replace=False)
train_ind = list(set(list(all_videos)) - set(list(test_ind)))
test_ind = list(test_ind)
for df in df_list:
if df.loc[0, 'Vid_num'] in test_ind:
test_list.append(df)
elif df.loc[0, 'Vid_num'] in train_ind:
train_list.append(df)
# train_list = [j for sub in train_list for j in sub]
# test_list = [j for sub in test_list for j in sub]
random.Random(random_seed).shuffle(train_list)
random.Random(random_seed).shuffle(test_list)
train_df = pd.concat(train_list, axis=0)
test_df = pd.concat(test_list, axis=0)
return train_df, test_df, train_list, test_list
# train and test xgboost models
def train_xgboost_model(train_df, test_df, train_list, test_list):
print('run_xgboost')
feature_removed = [
'Unnamed: 0',
'index1',
'Vid_pltform',
'Vid_pltform_ml',
'Vid_begin',
'Vid_end',
'loc',
'time',
'bandwidth',
'Vid_num',
]
train_df.drop(feature_removed, inplace=True, axis=1)
test_df.drop(feature_removed, inplace=True, axis=1)
# print(train_df.dtypes)
final_df = run_ml.run_xgboost(train_df, test_df, train_list, test_list)
return final_df
# split the data for naive bayes
def train_test_split(xgboost_pred, random_seed):
all_videos = np.arange(0, 51, 1)
np.random.seed(random_seed)
test_ind = np.random.choice(all_videos, size=15, replace=False)
train_ind = list(set(list(all_videos)) - set(list(test_ind)))
test_ind = list(test_ind)
train_df = xgboost_pred.loc[xgboost_pred['Vid_num'].isin(list(train_ind))]
test_df = xgboost_pred.loc[xgboost_pred['Vid_num'].isin(list(test_ind))]
train_df.to_csv(
'/Users/ckat9988/Documents/Research/Passive_analaysis/Analysis/Experiments/temp/train_csv_naive.csv')
test_df.to_csv(
'/Users/ckat9988/Documents/Research/Passive_analaysis/Analysis/Experiments/temp/test_csv_naive.csv')
return train_df, test_df
# get the raw bin data.
def get_raw_bin_acc(xgboost_data, time_point):
gt = xgboost_data.loc[:, 'Vid_type'].values
predictions = xgboost_data.loc[:, 'bin_1':'bin_' + str(time_point)].values
mode_prefictions = stats.mode(predictions, axis=1)[0]
acc_mode = accuracy_score(gt, mode_prefictions)
prec_mode = precision_score(gt, mode_prefictions)
rec_mode = recall_score(gt, mode_prefictions)
f1_mode = f1_score(gt, mode_prefictions)
mode_performance = [acc_mode, prec_mode, rec_mode, f1_mode]
return mode_performance
def store_data(platform, data, outpath):
if platform == settings.PLATFORM_YT:
write_folder = 'yt'
elif platform == settings.PLATFORM_FB:
write_folder = 'fb'
else:
write_folder = 'both'
t = np.arange(10, 121, 5).reshape([-1, 1])
cols = ['time', 'acc', 'prec', 'recall', 'f1']
mertic_data = np.asarray(data)
df_data = np.concatenate([t, mertic_data], axis=1)
df = pd.DataFrame(columns=cols,
data=df_data)
df.to_csv(outpath + '/' + write_folder + '.csv', index=False)
return
def read_xgboost_data(read_path, read_folder):
df_list = []
for p in read_folder:
path_in = read_path + '/' + p
df = | pd.read_csv(path_in + '.csv') | pandas.read_csv |
#!/usr/bin/env python
# encoding: utf-8
# This file is part of CycloneDX Python module.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (c) <NAME>. All Rights Reserved.
# Copyright (c) 2020 <NAME>. All Rights Reserved.
# the main reason of change here is to generate a import-able bom.xml for dependency-track
import argparse
import pandas as pd
import re
import xmlschema
from cyclonedxbuildroot import BomGenerator
from cyclonedxbuildroot import BomValidator
#get copyright
import debmake
import shutil
import requests
import urllib3
import os
http = urllib3.PoolManager()
import urllib, json
import pypandoc
import xmltodict
def get_json_from_url(url):
response = urllib.request.urlopen(url)
return json.loads(response.read())
def print_list(list):
for item in list:
print(item)
def sanitized_license_name(license):
#license name in buildroot is not well formatted for spdx.
sanitized_license = re.sub('\(.*?\)', '', license)
sanitized_license = sanitized_license.split(' or')[0]
sanitized_license = sanitized_license.split(' ')[0]
sanitized_license = sanitized_license.split(',')[0]
return sanitized_license
def read_openwrt_package_file(filepath):
component = {}
current_pakcage_name = None
with open(filepath) as fp:
line = fp.readline()
while line:
if "Package: " == line[:9]:
current_pakcage_name = line[9:].rstrip()
#print('packge_name: (%s)'%current_pakcage_name)
component[current_pakcage_name] = {}
component[current_pakcage_name]['version'] = 'openwrt'
component[current_pakcage_name]['license'] = 'Missing license!'
component[current_pakcage_name]['copyright'] = ''
component[current_pakcage_name]['pkg_tar_ball_name'] = ''
if "Version: " == line[:9]:
component[current_pakcage_name]['version'] = line[9:].rstrip()
if "License: " == line[:9]:
component[current_pakcage_name]['license'] = line[9:].rstrip()
if "Maintainer: " == line[:12]:
component[current_pakcage_name]['copyright'] = line[12:].rstrip()
if "Source: " == line[:8]:
component[current_pakcage_name]['pkg_tar_ball_name'] = line[8:].rstrip()
if "@@" == line:
current_pakcage_name = None
line = fp.readline()
print("package_number : %d"%len(component))
return component
#where to store these two lists to be discussed!
#better add a comment in it. to be discussed!
# this two lists shall be downloaded from git.
#verified_package_list = [{ 'name': 'audit', 'version' : '2.8.4', 'approved': False }, { 'name': 'libcap-ng', 'version' : '0.7.9', 'approved': True }]
#white list to be discussed!
"""
whitelist_license = ['Apache-2.0',\
'BSD-2-Clause',\
'BSD-2-Clause-FreeBSD',\
'BSD-3-Clause',\
'0BSD',\
'ISC',\
'MIT',\
'X11',\
'GPL-2.0',\
'GPL-2.0+',\
'LGPL-2.1',\
'LGPL-2.1+',\
'GPL-2.0-only',\
'GPL-2.0-or-later',\
'LGPL-2.1-only',\
'LGPL-2.1-or-later']
"""
verified_package_list = get_json_from_url("https://raw.githubusercontent.com/alvinchchen/verified_package_list/master/verified_package_list.json")
whitelist_license = get_json_from_url("https://github.com/alvinchchen/verified_package_list/raw/master/whitelist.json")
openwrt_package_info = read_openwrt_package_file("packageinfo")
def check_verified_package_list(name, version):
for pkg in verified_package_list:
if name == pkg['name'] and version ==pkg['version']:
return pkg
return None
def get_copyright(download_url, tarball_name):
"""Read copyright data from file."""
copyright = {}
download_ok = True
tmp_dir = './tmp/'
report = ''
try:
shutil.rmtree(tmp_dir)
except:
pass
os.mkdir(tmp_dir)
tmp_source_dir = './tmp/src/'
os.mkdir(tmp_source_dir)
current_working_dir = os.getcwd()
local_download_path = './' + tarball_name
download_url = download_url + "/" + tarball_name
if not os.path.isfile(tarball_name):
print('no tarball')
os.system('wget '+ download_url)
if os.path.isfile(local_download_path):
os.system('tar xvf '+ local_download_path + ' -C ' + tmp_source_dir)
else:
download_ok = False
#scan copyright
os.chdir(tmp_source_dir)
(nonlink_files, xml_html_files, binary_files, huge_files, counter, count_list) = debmake.scanfiles.scanfiles()
data = debmake.checkdep5.checkdep5(nonlink_files, mode=3, pedantic=1)
for (licenseid, licensetext, files, copyright_lines) in data:
copyright_line = copyright_lines[11:]
if '__NO_COPYRIGHT__' not in copyright_line \
and '__INITIAL_' not in copyright_line \
and '__NO_COPYRIGHT_NOR_LICENSE__' not in copyright_line \
and '__SHORT_LINE__' not in copyright_line \
and '__LONG_LINE__' not in copyright_line \
and '__MANY_NON_ASCII__' not in copyright_line :
copyright_string_list = copyright_line.split('\n')
for item in copyright_string_list:
string = item.lstrip().rstrip().replace('\\n', '')
if len(string):
copyright[string] = 1
os.chdir(current_working_dir)
shutil.rmtree(tmp_dir)
copyright_text = []
index = 1
for i in copyright.keys():
copyright_text.append('(%d) %s'%(index, i))
index = index + 1
print(copyright_text)
return copyright_text, download_ok
def get_url_license(license_url):
"""Read url_license from cyclonedx """
tmp_dir = './tmp/'
report = ''
try:
shutil.rmtree(tmp_dir)
except:
pass
os.mkdir(tmp_dir)
tmp_source_dir = './tmp/src/'
os.mkdir(tmp_source_dir)
current_working_dir = os.getcwd()
#scan copyright
os.chdir(tmp_source_dir)
os.system('wget '+ license_url)
(nonlink_files, xml_html_files, binary_files, huge_files, counter, count_list) = debmake.scanfiles.scanfiles()
print(nonlink_files)
print(xml_html_files)
if len(nonlink_files) != 0:
data = debmake.checkdep5.checkdep5(nonlink_files, mode=3, pedantic=1)
else:
data = debmake.checkdep5.checkdep5(xml_html_files, mode=3, pedantic=1)
result = license_url
for (licenseid, licensetext, files, copyright_lines) in data:
print(licenseid)
print(files)
if licenseid == 'Expat':
licenseid = 'MIT'
result = licenseid
os.chdir(current_working_dir)
shutil.rmtree(tmp_dir)
return result
def build_cyclonedx_component(component):
publisher = component['publisher']
pkg_name = component['pkg_name']
version = component['version']
purl = component['purl']
license = component['license']
hashes = component['hashes']
modified = component['modified']
copyright = ""
for item in component['copyright']:
copyright += item
description = component['description']
return BomGenerator.build_component_element(publisher, pkg_name, version, description, hashes, license, purl, modified, copyright)
def openwrt_manifest_to_component(input_file):
"""Read BOM data from file path."""
component_elements = []
lines = []
with open(input_file) as f:
lines = f.readlines()
print(f'package number : {len(lines)}')
for i in range(0, len(lines)):
pkg_name = lines[i].split(':')[0]
license_name = lines[i].split(':')[1].lstrip().rstrip()
if 'Missing license!' in license_name:
license_name = 'Missing license!'
component = {}
component['publisher'] = ''
component['pkg_name'] = pkg_name
component['pkg_tar_ball_name'] = openwrt_package_info[pkg_name]['pkg_tar_ball_name']
component['download_url'] = ''
component['version'] = openwrt_package_info[pkg_name]['version']
component['purl'] = 'pkg:fedora/' + component['pkg_name'] + '@' + component['version']
component['license'] = openwrt_package_info[pkg_name]['license']
component['hashes'] = []
component['modified'] = 'false'
component['copyright'] = openwrt_package_info[pkg_name]['copyright']
component['description'] = ''
component['download_fail'] = False
component_elements.append(component)
return component_elements
def buildroot_manifest_to_component(input_file):
"""Read BOM data from file path."""
component_elements = []
if input_file.split('.')[1] == 'csv':
sheetX = pd.read_csv(input_file)
else:
#xslx
xls = | pd.ExcelFile(input_file) | pandas.ExcelFile |
import re
import time
from pathlib import Path
from tarfile import TarFile
from timeit import default_timer as timer
from typing import *
from zipfile import ZipFile
import pandas as pd
from joblib import Parallel, delayed
from tqdm import tqdm
from smseventlog import delta, dt
from smseventlog import errors as er
from smseventlog import eventfolders as efl
from smseventlog import functions as f
from smseventlog import getlog
from smseventlog.data.internal import faults as flt
from smseventlog.data.internal import plm
from smseventlog.data.internal import utils as utl
from smseventlog.database import db
from smseventlog.utils import fileops as fl
log = getlog(__name__)
ahs_files = ['data', 'dnevent', 'sfevent']
def import_dls(p: Path, mw=None) -> dict:
"""Upload downloads folder from local computer to p-drive
p : Path
filepath to process
mw : gui.gui.MainWindow
mw object to update statusbar with progress
Returns
-------
dict
dict of result times
Import csvs to database:
faults
plm
Zip:
dsc folder (ge files)
Attempt to get unit from:
- file name
- dsc stats file
- fault csv
- plm csv
- TODO check selected dir contains some correct files (eg not accidental selection)
"""
start = time.time()
now = lambda x: time.time() - x
# check if unit given in file name
unit = utl.unit_from_str(s=p.name)
d = f.date_from_str(s=p.name)
d_lower = dt.now() + delta(days=-365 * 2)
m_result = {k: dict(num=0, time=0) for k in ('ge_zip', 'fault', 'plm')}
# list of dates created as backup if no dsc
lst_dates = [fl.date_created(p) for p in p.iterdir()]
# callback to update statusbar
if mw is None:
from smseventlog.gui._global import update_statusbar as us
else:
us = mw.update_statusbar
# find dsc files to use for stat file first
lst_dsc = utl.FolderSearch('dsc', d_lower=d_lower).search(p)
if lst_dsc:
lst_dates = [] # use dsc for date, clear backup dates
# try to get unit from first dsc serial file first
try:
p_stat = stats_from_dsc(p=lst_dsc[0])
if unit is None:
print('p_stat', p_stat)
unit = unit_from_stat(p_stat)
except Exception as e:
# print(e)
log.warning('Failed to get unit from stats file.')
# save files to import after unit check
m_import = {}
unit_func = dict(
fault=flt.unit_from_fault,
plm=plm.unit_from_haulcycle)
# check unit from fault/plm
for ftype in unit_func.keys():
try:
lst_csv = utl.FolderSearch(ftype, d_lower=d_lower).search(p)
if lst_csv:
m_import[ftype] = lst_csv
# try to get unit if doesn't exist yet
if unit is None:
unit = unit_func.get(ftype)(p=lst_csv[0], raise_errors=False)
except Exception as e:
# print(e)
us(msg=f'Failed to read {ftype} file(s).', warn=True, log_=True)
# get dates from ge dsc
for p_dsc in lst_dsc:
lst_dates.append(date_from_dsc(p_dsc))
# check for AHS files in first level of dls folder
ahs_folders = utl.FolderSearch('ahs', max_depth=0).search(p)
if not ahs_folders:
suffix = 'DLS'
else:
suffix = 'FRDLS'
if unit is None:
unit = val_from_ahs_files(ahs_folders, 'unit')
# get date from ahs files
if d is None:
lst_dates.append(val_from_ahs_files(ahs_folders, 'date'))
# final check, fail if unit doesn't exist yet
if unit is None:
raise er.NoUnitError()
# sort dates and set date if not given in folder name
if d is None and lst_dates:
lst_dates = sorted(lst_dates, reverse=False)
d = lst_dates[0]
if d is None:
raise er.NoDateError()
name = f'{unit} - {d:%Y-%m-%d}'
title = f'{name} - {suffix}'
m_result['name'] = name
from smseventlog.eventfolders import UnitFolder
uf = UnitFolder(unit=unit)
p_dst = uf.p_dls / f'{d.year}/{title}'
# make sure we don't overwrite folder
log.info(f'p_dst: {p_dst}')
if p_dst.exists():
raise er.FolderExistsError(p=p_dst)
# import fault/plm
for ftype, lst_csv in m_import.items():
time_prev = time.time()
# log.info(f'importing: {ftype}')
try:
rowsadded = utl.combine_import_csvs(lst_csv=lst_csv, ftype=ftype, unit=unit, n_jobs=-4)
m_result[ftype] = dict(num=rowsadded or 0, time=now(time_prev))
except Exception as e:
# NOTE could maybe raise a custom exception here?
us(msg=f'Failed to import {ftype} files.', warn=True, log_=True)
# zip GE dsc files
if lst_dsc:
time_prev = time.time()
for p_dsc in lst_dsc:
# log.info(f'zipping: {p_dsc}')
fl.zip_folder_threadsafe(p_src=p_dsc, p_dst=p_dst / p_dsc.name, delete=True)
m_result['ge_zip'] = dict(num=len(lst_dsc), time=now(time_prev))
# zip dnevent/sfevent folders in place
if ahs_folders:
time_prev = time.time()
# copy 6 newest files > 3mb to PREVIEW dir
make_ahs_data_preview(ahs_folders)
for p_ahs in ahs_folders:
# if any(item in p_ahs.name.lower() for item in ('dnevent', 'sfevent')):
fl.zip_folder_threadsafe(p_src=p_ahs, p_dst=p_dst / p_ahs.name, delete=True)
m_result['ahs_zip'] = dict(num=len(ahs_folders), time=now(time_prev))
# upload all to p-drive
us(f'Uploading files to: {p_dst}')
fl.move_folder(p_src=p, p_dst=p_dst)
m_result['time_total'] = now(start)
return m_result
def make_ahs_data_preview(
ahs_folders: List[Path],
p_dst: Path = None,
n_newest: int = 6) -> None:
"""Extract x newest data files > 3mb, copy to separate DATA_PREVIEW dir"""
p_data = [p for p in ahs_folders if p.name.lower() == 'data']
if not p_data:
return
p_data = p_data[0]
min_size = 3e6 # 3mb
lst = []
if p_dst is None:
p_dst = p_data.parent
p_dst = p_dst / 'DATA_PREVIEW'
# loop newest files, collect those > 3mb
for p in sorted(p_data.glob('*.gz*'), reverse=True):
if p.stat().st_size > min_size:
lst.append(p)
if len(lst) >= n_newest:
break
# move files to DATA_PREVIEW dir
for p in lst:
fl.copy_file(p_src=p, p_dst=p_dst / p.name)
def val_from_ahs_files(ahs_folders: List[Path], type_: str) -> Union[str, None]:
"""Get unit number/date from list of ahs FR folders
Parameters
----------
ahs_folders : List[Path]
[data, dnevent, sfevent]
type_ : str
unit | date
Returns
-------
Union[str, None]
unit/date or None
"""
val = None
expr = r'gz$|txt$'
if not type_ in ('unit', 'date'):
raise ValueError(f'type_ must be unit|date, not "{type_}"')
for p_ahs in ahs_folders:
if val is None:
for p2 in sorted(list(p_ahs.iterdir()), reverse=True):
if re.search(expr, p2.name.lower()):
if type_ == 'unit':
temp = p2.name.split('_')[0]
if db.unit_exists(temp):
val = temp
elif type_ == 'date':
# get date as 6 digit date YYMMDD
val = re.search(r'\d{6}', p2.name)[0]
val = dt.strptime(val, '%y%m%d')
break
return val
def is_year(name: str) -> bool:
"""Check if passed in string is a 4 digit year, eg '2020'
Parameters
----------
name : str
String to check
Returns
-------
bool
"""
exp = re.compile('^[2][0-9]{3}$')
ans = re.search(exp, name)
return not ans is None
@er.errlog(msg='Couldn\'t find recent dls folder.', err=False)
def get_recent_dls_unit(unit: str) -> Path:
"""Get most recent dls folder for single unit
Parameters
----------
unit : str
Returns
-------
Path
Path to most recent dls folder
"""
p_unit = efl.UnitFolder(unit=unit).p_unit
p_dls = p_unit / 'Downloads'
if not p_dls.exists():
log.warning(f'Download folder doesn\'t exist: {p_dls}')
return
# get all downloads/year folders
lst_year = [p for p in p_dls.iterdir() if p.is_dir() and is_year(p.name)]
if not lst_year:
log.warning('No download year folders found.')
return
# sort year folders by name, newest first, select first
lst_year_sorted = sorted(lst_year, key=lambda p: p.name, reverse=True) # sort by year
p_year = lst_year_sorted[0]
# sort all dls folders on date from folder title
lst_dls = [p for p in p_year.iterdir() if p.is_dir()]
lst_dls_sorted = sorted(filter(lambda p: f.date_from_str(p.name) is not None, lst_dls),
key=lambda p: f.date_from_str(p.name), reverse=True)
return lst_dls_sorted[0]
def zip_recent_dls_unit(unit: str, _zip=True) -> Path:
"""Func for gui to find (optional zip) most recent dls folder by parsing date in folder title"""
from ...gui import _global as gbl
from ...gui.dialogs import msg_simple, msgbox
p_dls = get_recent_dls_unit(unit=unit)
if not p_dls is None:
msg = f'Found DLS folder: {p_dls.name}, calculating size...'
gbl.update_statusbar(msg)
gbl.get_mainwindow().app.processEvents()
size = fl.calc_size(p_dls)
msg = f'Found DLS folder:\n\n{p_dls.name}\n{size}\n\nZip now?'
if not msgbox(msg=msg, yesno=True):
return
else:
msg = 'Couldn\'t find recent DLS folder, check folder structure for issues.'
msg_simple(msg=msg, icon='warning')
return
if _zip:
p_zip = fl.zip_folder_threadsafe(p_src=p_dls, delete=False)
return p_zip
else:
return p_dls
def fix_dsc(p: Path) -> None:
"""Process/fix single dsc/dls folder"""
# log.info(f'fix_dsc: {p}')
start = timer()
unit = utl.unit_from_path(p)
uf = efl.UnitFolder(unit=unit)
p_parent = p.parent
d = date_from_dsc(p=p)
# rename dls folder: UUU - YYYY-MM-DD - DLS
newname = f'{unit} - {d:%Y-%m-%d} - DLS'
p_new = uf.p_dls / f'{d.year}/{newname}'
# need to make sure there is only one _dsc_ folder in path
# make sure dsc isn't within 2 levels of 'Downloads' fodler
dsccount = sum(1 for _ in p_parent.glob('*dsc*'))
if dsccount > 1 or check_parents(p=p, depth=2, names=['downloads']):
# just move dsc folder, not parent and contents
p_src = p
p_dst = p_new / p.name
else:
p_src = p_parent # folder above _dsc_
p_dst = p_new
# zip and move dsc folder, then move anything else remaining in the parent dir
is_zip = p.suffix in ('.zip', '.tar')
is_same_folder = p_src == p_dst
if not is_same_folder or not is_zip:
msg = ''
for n, _p in dict(orig=p, src=p_src, dst=p_dst).items():
msg += f'\n\t\t{n:<4}: {_p}'
log.info(f'fix_dsc:{msg}')
try:
if not is_zip:
p_zip = fl.zip_folder_threadsafe(
p_src=p,
p_dst=p_new / p.name,
delete=True)
if not is_same_folder:
fl.move_folder(p_src=p_src, p_dst=p_dst)
except Exception as e:
log.warning(f'Error fixing dsc folder: {str(p_src)}')
raise e
log.info(f'Elapsed time: {f.deltasec(start, timer())}s')
def fix_dls_all_units(d_lower: dt = None) -> None:
if d_lower is None:
d_lower = dt.now() + delta(days=-30)
units = utl.all_units()
# collect dsc files from all units in parallel
result = Parallel(n_jobs=-1, verbose=11)(delayed(utl.process_files)(
ftype='dsc',
units=unit,
d_lower=d_lower,
parallel=False) for unit in units)
# fix them
def date_from_dsc(p: Path) -> dt:
"""Parse date from dsc folder name, eg 328_dsc_20180526-072028
- if no dsc, use date created"""
try:
sdate = p.name.split('_dsc_')[-1].split('-')[0]
d = dt.strptime(sdate, '%Y%m%d')
except Exception:
d = fl.date_created(p)
return d
def get_recent_dsc_single(
unit: str,
d_lower: dt = dt(2020, 1, 1),
year: str = None,
all_files: bool = False,
ftype: str = 'dsc',
max_depth: int = 3):
"""Return list of most recent dsc folder from each unit
- OR most recent fault... could extend this for any filetype
Parameters
----------
d_lower : datetime, optional,
limit search by date, default dt(2020,1,1)
unit : str, optional
all_files: bool
return dict of unit: list of all sorted files
Returns
-------
list | dict
"""
lst = []
uf = efl.UnitFolder(unit=unit)
p_dls = uf.p_dls
if not year is None:
p_year = p_dls / year
if p_year.exists():
p_dls = p_year
lst_unit = utl.FolderSearch(ftype, d_lower=d_lower, max_depth=max_depth).search(p_dls)
if lst_unit:
lst_unit.sort(key=lambda p: date_from_dsc(p), reverse=True)
if not all_files:
lst.append(lst_unit[0])
else:
lst.extend(lst_unit)
return lst
def get_recent_dsc_all(minesite='FortHills', model='980E', all_files=True, **kw):
"""Return list of most recent dsc folders for all units"""
lst = []
# keep all files to try and import next most recent if file fails
if all_files:
lst = {}
units = db.unique_units(minesite=minesite, model=model)
for unit in tqdm(units):
recent_dsc = get_recent_dsc_single(unit=unit, all_files=all_files, **kw)
if not recent_dsc:
print(f'\n\nNo recent dsc for: {unit}')
if not all_files:
lst.extend(recent_dsc)
else:
lst[unit] = recent_dsc
return lst
def move_tr3(p):
unit = utl.unit_from_path(p) # assuming in unit folder
p_dst_base = Path('/Users/Jayme/OneDrive/SMS Equipment/Share/tr3 export')
p_dst = p_dst_base / f'{unit}/{p.name}'
fl.copy_file(p_src=p, p_dst=p_dst)
def check_parents(p: Path, depth: int, names: list) -> bool:
"""Check path to make sure parents aren't top level folders
Parameters
----------
p : Path
Path to check\n
depth : int
From start of folder path to this folder level\n
names : list
Names to check
Returns
-------
bool
If path checked is top level folder
"""
names = [n.lower() for n in names]
for parent in list(p.parents)[:depth]:
if parent.name.lower() in names:
return True
return False
def zip_recent_dls(units, d_lower=dt(2020, 1, 1)):
# get most recent dsc from list of units and zip parent folder for attaching to TSI
if not isinstance(units, list):
units = [units]
lst = []
for unit in units:
lst.extend(get_recent_dsc_single(unit=unit, d_lower=d_lower))
lst_zip = [fl.zip_folder_threadsafe(p_src=p.parent, delete=False) for p in lst]
return lst_zip
# STATS csv
def stats_from_dsc(p):
"""Get stats file path from dsc path"""
if p.is_dir():
try:
return list((p / 'stats').glob('SERIAL*csv'))[0]
except Exception:
return None
print(f'Couldn\'t read stats: {p}')
elif p.suffix == '.zip':
return ZipFile(p)
elif p.suffix == '.tar':
return TarFile(p)
def import_stats(lst=None, d_lower=dt(2021, 1, 1)):
"""Use list of most recent dsc and combine into dataframe"""
if lst is None:
lst = get_recent_dsc_all(d_lower=d_lower)
if isinstance(lst, dict):
dfs = []
for unit, lst_csv in tqdm(lst.items()):
# try to find/load csv, or move to next if fail
for p in lst_csv:
try:
p_csv = stats_from_dsc(p)
df_single = get_stats(p=p_csv)
dfs.append(df_single)
break
except Exception as e:
log.warning(f'Failed to load csv: {p}, \n{str(e)}')
df = pd.concat(dfs)
else:
df = pd.concat([get_stats(stats_from_dsc(p)) for p in lst])
return df
def get_list_stats(unit):
"""Return list of STATS csvs for specific unit"""
from ...eventfolders import UnitFolder
uf = UnitFolder(unit=unit)
p_dls = uf.p_dls
return p_dls.glob('SERIAL*csv')
def smr_from_stats(lst):
return pd.concat([get_stats(p) for p in lst])
def unit_from_stat(p: Path) -> Union[str, None]:
"""Try to get unit from stats file
Parameters
----------
p : Path
Returns
-------
Union[str, None]
unit if exists else None
"""
df = get_stats(p=p)
unit = df.index[0]
if not unit == 'TEMP':
return unit
def get_stats(p, all_cols=False):
"""
Read stats csv and convert to single row df of timestamp, psc/tsc versions + inv SNs, to be combined
Can read zip or tarfiles"""
# dsc folder could be zipped, just read zipped csv, easy!
# super not dry
# print(p)
if isinstance(p, ZipFile):
zf = p
p = Path(zf.filename)
csv = [str(file.filename) for file in zf.filelist if re.search(
r'serial.*csv', str(file), flags=re.IGNORECASE)][0]
with zf.open(csv) as reader:
df = pd.read_csv(reader, index_col=0)
elif isinstance(p, TarFile):
tf = p
p = Path(tf.name)
csv = [file for file in tf.getnames() if re.search(r'serial.*csv', file, flags=re.IGNORECASE)][0]
df = pd.read_csv(tf.extractfile(csv), index_col=0)
else:
df = | pd.read_csv(p, index_col=0) | pandas.read_csv |
# -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
import itertools
import warnings
from warnings import catch_warnings
from datetime import datetime
from pandas.types.common import (is_integer_dtype,
is_float_dtype,
is_scalar)
from pandas.compat import range, lrange, lzip, StringIO, lmap
from pandas.tslib import NaT
from numpy import nan
from numpy.random import randn
import numpy as np
import pandas as pd
from pandas import option_context
from pandas.core.indexing import _non_reducing_slice, _maybe_numeric_slice
from pandas.core.api import (DataFrame, Index, Series, Panel, isnull,
MultiIndex, Timestamp, Timedelta, UInt64Index)
from pandas.formats.printing import pprint_thing
from pandas import concat
from pandas.core.common import PerformanceWarning
from pandas.tests.indexing.common import _mklbl
import pandas.util.testing as tm
from pandas import date_range
_verbose = False
# ------------------------------------------------------------------------
# Indexing test cases
def _generate_indices(f, values=False):
""" generate the indicies
if values is True , use the axis values
is False, use the range
"""
axes = f.axes
if values:
axes = [lrange(len(a)) for a in axes]
return itertools.product(*axes)
def _get_value(f, i, values=False):
""" return the value for the location i """
# check agains values
if values:
return f.values[i]
# this is equiv of f[col][row].....
# v = f
# for a in reversed(i):
# v = v.__getitem__(a)
# return v
with catch_warnings(record=True):
return f.ix[i]
def _get_result(obj, method, key, axis):
""" return the result for this obj with this key and this axis """
if isinstance(key, dict):
key = key[axis]
# use an artifical conversion to map the key as integers to the labels
# so ix can work for comparisions
if method == 'indexer':
method = 'ix'
key = obj._get_axis(axis)[key]
# in case we actually want 0 index slicing
try:
xp = getattr(obj, method).__getitem__(_axify(obj, key, axis))
except:
xp = getattr(obj, method).__getitem__(key)
return xp
def _axify(obj, key, axis):
# create a tuple accessor
axes = [slice(None)] * obj.ndim
axes[axis] = key
return tuple(axes)
class TestIndexing(tm.TestCase):
_objs = set(['series', 'frame', 'panel'])
_typs = set(['ints', 'uints', 'labels', 'mixed',
'ts', 'floats', 'empty', 'ts_rev'])
def setUp(self):
self.series_ints = Series(np.random.rand(4), index=lrange(0, 8, 2))
self.frame_ints = DataFrame(np.random.randn(4, 4),
index=lrange(0, 8, 2),
columns=lrange(0, 12, 3))
self.panel_ints = Panel(np.random.rand(4, 4, 4),
items=lrange(0, 8, 2),
major_axis=lrange(0, 12, 3),
minor_axis=lrange(0, 16, 4))
self.series_uints = Series(np.random.rand(4),
index=UInt64Index(lrange(0, 8, 2)))
self.frame_uints = DataFrame(np.random.randn(4, 4),
index=UInt64Index(lrange(0, 8, 2)),
columns=UInt64Index(lrange(0, 12, 3)))
self.panel_uints = Panel(np.random.rand(4, 4, 4),
items=UInt64Index(lrange(0, 8, 2)),
major_axis=UInt64Index(lrange(0, 12, 3)),
minor_axis=UInt64Index(lrange(0, 16, 4)))
self.series_labels = Series(np.random.randn(4), index=list('abcd'))
self.frame_labels = DataFrame(np.random.randn(4, 4),
index=list('abcd'), columns=list('ABCD'))
self.panel_labels = Panel(np.random.randn(4, 4, 4),
items=list('abcd'),
major_axis=list('ABCD'),
minor_axis=list('ZYXW'))
self.series_mixed = Series(np.random.randn(4), index=[2, 4, 'null', 8])
self.frame_mixed = DataFrame(np.random.randn(4, 4),
index=[2, 4, 'null', 8])
self.panel_mixed = Panel(np.random.randn(4, 4, 4),
items=[2, 4, 'null', 8])
self.series_ts = Series(np.random.randn(4),
index=date_range('20130101', periods=4))
self.frame_ts = DataFrame(np.random.randn(4, 4),
index=date_range('20130101', periods=4))
self.panel_ts = Panel(np.random.randn(4, 4, 4),
items=date_range('20130101', periods=4))
dates_rev = (date_range('20130101', periods=4)
.sort_values(ascending=False))
self.series_ts_rev = Series(np.random.randn(4),
index=dates_rev)
self.frame_ts_rev = DataFrame(np.random.randn(4, 4),
index=dates_rev)
self.panel_ts_rev = Panel(np.random.randn(4, 4, 4),
items=dates_rev)
self.frame_empty = DataFrame({})
self.series_empty = Series({})
self.panel_empty = Panel({})
# form agglomerates
for o in self._objs:
d = dict()
for t in self._typs:
d[t] = getattr(self, '%s_%s' % (o, t), None)
setattr(self, o, d)
def check_values(self, f, func, values=False):
if f is None:
return
axes = f.axes
indicies = itertools.product(*axes)
for i in indicies:
result = getattr(f, func)[i]
# check agains values
if values:
expected = f.values[i]
else:
expected = f
for a in reversed(i):
expected = expected.__getitem__(a)
tm.assert_almost_equal(result, expected)
def check_result(self, name, method1, key1, method2, key2, typs=None,
objs=None, axes=None, fails=None):
def _eq(t, o, a, obj, k1, k2):
""" compare equal for these 2 keys """
if a is not None and a > obj.ndim - 1:
return
def _print(result, error=None):
if error is not None:
error = str(error)
v = ("%-16.16s [%-16.16s]: [typ->%-8.8s,obj->%-8.8s,"
"key1->(%-4.4s),key2->(%-4.4s),axis->%s] %s" %
(name, result, t, o, method1, method2, a, error or ''))
if _verbose:
pprint_thing(v)
try:
rs = getattr(obj, method1).__getitem__(_axify(obj, k1, a))
try:
xp = _get_result(obj, method2, k2, a)
except:
result = 'no comp'
_print(result)
return
detail = None
try:
if is_scalar(rs) and is_scalar(xp):
self.assertEqual(rs, xp)
elif xp.ndim == 1:
tm.assert_series_equal(rs, xp)
elif xp.ndim == 2:
tm.assert_frame_equal(rs, xp)
elif xp.ndim == 3:
tm.assert_panel_equal(rs, xp)
result = 'ok'
except AssertionError as e:
detail = str(e)
result = 'fail'
# reverse the checks
if fails is True:
if result == 'fail':
result = 'ok (fail)'
_print(result)
if not result.startswith('ok'):
raise AssertionError(detail)
except AssertionError:
raise
except Exception as detail:
# if we are in fails, the ok, otherwise raise it
if fails is not None:
if isinstance(detail, fails):
result = 'ok (%s)' % type(detail).__name__
_print(result)
return
result = type(detail).__name__
raise AssertionError(_print(result, error=detail))
if typs is None:
typs = self._typs
if objs is None:
objs = self._objs
if axes is not None:
if not isinstance(axes, (tuple, list)):
axes = [axes]
else:
axes = list(axes)
else:
axes = [0, 1, 2]
# check
for o in objs:
if o not in self._objs:
continue
d = getattr(self, o)
for a in axes:
for t in typs:
if t not in self._typs:
continue
obj = d[t]
if obj is not None:
obj = obj.copy()
k2 = key2
_eq(t, o, a, obj, key1, k2)
def test_ix_deprecation(self):
# GH 15114
df = DataFrame({'A': [1, 2, 3]})
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
df.ix[1, 'A']
def test_indexer_caching(self):
# GH5727
# make sure that indexers are in the _internal_names_set
n = 1000001
arrays = [lrange(n), lrange(n)]
index = MultiIndex.from_tuples(lzip(*arrays))
s = Series(np.zeros(n), index=index)
str(s)
# setitem
expected = Series(np.ones(n), index=index)
s = Series(np.zeros(n), index=index)
s[s == 0] = 1
tm.assert_series_equal(s, expected)
def test_at_and_iat_get(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
result = getattr(f, func)[i]
expected = _get_value(f, i, values)
tm.assert_almost_equal(result, expected)
for o in self._objs:
d = getattr(self, o)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, self.check_values, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_and_iat_set(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
getattr(f, func)[i] = 1
expected = _get_value(f, i, values)
tm.assert_almost_equal(expected, 1)
for t in self._objs:
d = getattr(self, t)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, _check, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_iat_coercion(self):
# as timestamp is not a tuple!
dates = date_range('1/1/2000', periods=8)
df = DataFrame(randn(8, 4), index=dates, columns=['A', 'B', 'C', 'D'])
s = df['A']
result = s.at[dates[5]]
xp = s.values[5]
self.assertEqual(result, xp)
# GH 7729
# make sure we are boxing the returns
s = Series(['2014-01-01', '2014-02-02'], dtype='datetime64[ns]')
expected = Timestamp('2014-02-02')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
s = Series(['1 days', '2 days'], dtype='timedelta64[ns]')
expected = Timedelta('2 days')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
def test_iat_invalid_args(self):
pass
def test_imethods_with_dups(self):
# GH6493
# iat/iloc with dups
s = Series(range(5), index=[1, 1, 2, 2, 3], dtype='int64')
result = s.iloc[2]
self.assertEqual(result, 2)
result = s.iat[2]
self.assertEqual(result, 2)
self.assertRaises(IndexError, lambda: s.iat[10])
self.assertRaises(IndexError, lambda: s.iat[-10])
result = s.iloc[[2, 3]]
expected = Series([2, 3], [2, 2], dtype='int64')
tm.assert_series_equal(result, expected)
df = s.to_frame()
result = df.iloc[2]
expected = Series(2, index=[0], name=2)
tm.assert_series_equal(result, expected)
result = df.iat[2, 0]
expected = 2
self.assertEqual(result, 2)
def test_repeated_getitem_dups(self):
# GH 5678
# repeated gettitems on a dup index returing a ndarray
df = DataFrame(
np.random.random_sample((20, 5)),
index=['ABCDE' [x % 5] for x in range(20)])
expected = df.loc['A', 0]
result = df.loc[:, 0].loc['A']
tm.assert_series_equal(result, expected)
def test_iloc_exceeds_bounds(self):
# GH6296
# iloc should allow indexers that exceed the bounds
df = DataFrame(np.random.random_sample((20, 5)), columns=list('ABCDE'))
expected = df
# lists of positions should raise IndexErrror!
with tm.assertRaisesRegexp(IndexError,
'positional indexers are out-of-bounds'):
df.iloc[:, [0, 1, 2, 3, 4, 5]]
self.assertRaises(IndexError, lambda: df.iloc[[1, 30]])
self.assertRaises(IndexError, lambda: df.iloc[[1, -30]])
self.assertRaises(IndexError, lambda: df.iloc[[100]])
s = df['A']
self.assertRaises(IndexError, lambda: s.iloc[[100]])
self.assertRaises(IndexError, lambda: s.iloc[[-100]])
# still raise on a single indexer
msg = 'single positional indexer is out-of-bounds'
with tm.assertRaisesRegexp(IndexError, msg):
df.iloc[30]
self.assertRaises(IndexError, lambda: df.iloc[-30])
# GH10779
# single positive/negative indexer exceeding Series bounds should raise
# an IndexError
with tm.assertRaisesRegexp(IndexError, msg):
s.iloc[30]
self.assertRaises(IndexError, lambda: s.iloc[-30])
# slices are ok
result = df.iloc[:, 4:10] # 0 < start < len < stop
expected = df.iloc[:, 4:]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -4:-10] # stop < 0 < start < len
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4:-1] # 0 < stop < len < start (down)
expected = df.iloc[:, :4:-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 4:-10:-1] # stop < 0 < start < len (down)
expected = df.iloc[:, 4::-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:4] # start < 0 < stop < len
expected = df.iloc[:, :4]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4] # 0 < stop < len < start
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:-11:-1] # stop < start < 0 < len (down)
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:11] # 0 < len < start < stop
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
# slice bounds exceeding is ok
result = s.iloc[18:30]
expected = s.iloc[18:]
tm.assert_series_equal(result, expected)
result = s.iloc[30:]
expected = s.iloc[:0]
tm.assert_series_equal(result, expected)
result = s.iloc[30::-1]
expected = s.iloc[::-1]
tm.assert_series_equal(result, expected)
# doc example
def check(result, expected):
str(result)
result.dtypes
tm.assert_frame_equal(result, expected)
dfl = DataFrame(np.random.randn(5, 2), columns=list('AB'))
check(dfl.iloc[:, 2:3], DataFrame(index=dfl.index))
check(dfl.iloc[:, 1:3], dfl.iloc[:, [1]])
check(dfl.iloc[4:6], dfl.iloc[[4]])
self.assertRaises(IndexError, lambda: dfl.iloc[[4, 5, 6]])
self.assertRaises(IndexError, lambda: dfl.iloc[:, 4])
def test_iloc_getitem_int(self):
# integer
self.check_result('integer', 'iloc', 2, 'ix',
{0: 4, 1: 6, 2: 8}, typs=['ints', 'uints'])
self.check_result('integer', 'iloc', 2, 'indexer', 2,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int(self):
# neg integer
self.check_result('neg int', 'iloc', -1, 'ix',
{0: 6, 1: 9, 2: 12}, typs=['ints', 'uints'])
self.check_result('neg int', 'iloc', -1, 'indexer', -1,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_list_int(self):
# list of ints
self.check_result('list int', 'iloc', [0, 1, 2], 'ix',
{0: [0, 2, 4], 1: [0, 3, 6], 2: [0, 4, 8]},
typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [2], 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
# array of ints (GH5006), make sure that a single indexer is returning
# the correct type
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'ix',
{0: [0, 2, 4],
1: [0, 3, 6],
2: [0, 4, 8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([2]), 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'indexer',
[0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int_can_reach_first_index(self):
# GH10547 and GH10779
# negative integers should be able to reach index 0
df = DataFrame({'A': [2, 3, 5], 'B': [7, 11, 13]})
s = df['A']
expected = df.iloc[0]
result = df.iloc[-3]
tm.assert_series_equal(result, expected)
expected = df.iloc[[0]]
result = df.iloc[[-3]]
tm.assert_frame_equal(result, expected)
expected = s.iloc[0]
result = s.iloc[-3]
self.assertEqual(result, expected)
expected = s.iloc[[0]]
result = s.iloc[[-3]]
tm.assert_series_equal(result, expected)
# check the length 1 Series case highlighted in GH10547
expected = pd.Series(['a'], index=['A'])
result = expected.iloc[[-1]]
tm.assert_series_equal(result, expected)
def test_iloc_getitem_dups(self):
# no dups in panel (bug?)
self.check_result('list int (dups)', 'iloc', [0, 1, 1, 3], 'ix',
{0: [0, 2, 2, 6], 1: [0, 3, 3, 9]},
objs=['series', 'frame'], typs=['ints', 'uints'])
# GH 6766
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
# cross-sectional indexing
result = df.iloc[0, 0]
self.assertTrue(isnull(result))
result = df.iloc[0, :]
expected = Series([np.nan, 1, 3, 3], index=['A', 'B', 'A', 'B'],
name=0)
tm.assert_series_equal(result, expected)
def test_iloc_getitem_array(self):
# array like
s = Series(index=lrange(1, 4))
self.check_result('array like', 'iloc', s.index, 'ix',
{0: [2, 4, 6], 1: [3, 6, 9], 2: [4, 8, 12]},
typs=['ints', 'uints'])
def test_iloc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False, ]
self.check_result('bool', 'iloc', b, 'ix', b, typs=['ints', 'uints'])
self.check_result('bool', 'iloc', b, 'ix', b,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice(self):
# slices
self.check_result('slice', 'iloc', slice(1, 3), 'ix',
{0: [2, 4], 1: [3, 6], 2: [4, 8]},
typs=['ints', 'uints'])
self.check_result('slice', 'iloc', slice(1, 3), 'indexer',
slice(1, 3),
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice_dups(self):
df1 = DataFrame(np.random.randn(10, 4), columns=['A', 'A', 'B', 'B'])
df2 = DataFrame(np.random.randint(0, 10, size=20).reshape(10, 2),
columns=['A', 'C'])
# axis=1
df = concat([df1, df2], axis=1)
tm.assert_frame_equal(df.iloc[:, :4], df1)
tm.assert_frame_equal(df.iloc[:, 4:], df2)
df = concat([df2, df1], axis=1)
tm.assert_frame_equal(df.iloc[:, :2], df2)
tm.assert_frame_equal(df.iloc[:, 2:], df1)
exp = concat([df2, df1.iloc[:, [0]]], axis=1)
tm.assert_frame_equal(df.iloc[:, 0:3], exp)
# axis=0
df = concat([df, df], axis=0)
tm.assert_frame_equal(df.iloc[0:10, :2], df2)
tm.assert_frame_equal(df.iloc[0:10, 2:], df1)
tm.assert_frame_equal(df.iloc[10:, :2], df2)
tm.assert_frame_equal(df.iloc[10:, 2:], df1)
def test_iloc_setitem(self):
df = self.frame_ints
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
self.assertEqual(result, 1)
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
# GH5771
s = Series(0, index=[4, 5, 6])
s.iloc[1:2] += 1
expected = Series([0, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
def test_loc_setitem_slice(self):
# GH10503
# assigning the same type should not change the type
df1 = DataFrame({'a': [0, 1, 1],
'b': Series([100, 200, 300], dtype='uint32')})
ix = df1['a'] == 1
newb1 = df1.loc[ix, 'b'] + 1
df1.loc[ix, 'b'] = newb1
expected = DataFrame({'a': [0, 1, 1],
'b': Series([100, 201, 301], dtype='uint32')})
tm.assert_frame_equal(df1, expected)
# assigning a new type should get the inferred type
df2 = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
ix = df1['a'] == 1
newb2 = df2.loc[ix, 'b']
df1.loc[ix, 'b'] = newb2
expected = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_setitem_consistency(self):
# GH 5771
# loc with slice and series
s = Series(0, index=[4, 5, 6])
s.loc[4:5] += 1
expected = Series([1, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
# GH 5928
# chained indexing assignment
df = DataFrame({'a': [0, 1, 2]})
expected = df.copy()
with catch_warnings(record=True):
expected.ix[[0, 1, 2], 'a'] = -expected.ix[[0, 1, 2], 'a']
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]]
tm.assert_frame_equal(df, expected)
df = DataFrame({'a': [0, 1, 2], 'b': [0, 1, 2]})
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]].astype(
'float64') + 0.5
expected = DataFrame({'a': [0.5, -0.5, -1.5], 'b': [0, 1, 2]})
tm.assert_frame_equal(df, expected)
# GH 8607
# ix setitem consistency
df = DataFrame({'timestamp': [1413840976, 1413842580, 1413760580],
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
expected = DataFrame({'timestamp': pd.to_datetime(
[1413840976, 1413842580, 1413760580], unit='s'),
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
df2 = df.copy()
df2['timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
df2.loc[:, 'timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
with catch_warnings(record=True):
df2.ix[:, 2] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_consistency(self):
# GH 8613
# some edge cases where ix/loc should return the same
# this is not an exhaustive case
def compare(result, expected):
if is_scalar(expected):
self.assertEqual(result, expected)
else:
self.assertTrue(expected.equals(result))
# failure cases for .loc, but these work for .ix
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'))
for key in [slice(1, 3), tuple([slice(0, 2), slice(0, 2)]),
tuple([slice(0, 2), df.columns[0:2]])]:
for index in [tm.makeStringIndex, tm.makeUnicodeIndex,
tm.makeDateIndex, tm.makePeriodIndex,
tm.makeTimedeltaIndex]:
df.index = index(len(df.index))
with catch_warnings(record=True):
df.ix[key]
self.assertRaises(TypeError, lambda: df.loc[key])
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'),
index=pd.date_range('2012-01-01', periods=5))
for key in ['2012-01-03',
'2012-01-31',
slice('2012-01-03', '2012-01-03'),
slice('2012-01-03', '2012-01-04'),
slice('2012-01-03', '2012-01-06', 2),
slice('2012-01-03', '2012-01-31'),
tuple([[True, True, True, False, True]]), ]:
# getitem
# if the expected raises, then compare the exceptions
try:
with catch_warnings(record=True):
expected = df.ix[key]
except KeyError:
self.assertRaises(KeyError, lambda: df.loc[key])
continue
result = df.loc[key]
compare(result, expected)
# setitem
df1 = df.copy()
df2 = df.copy()
with catch_warnings(record=True):
df1.ix[key] = 10
df2.loc[key] = 10
compare(df2, df1)
# edge cases
s = Series([1, 2, 3, 4], index=list('abde'))
result1 = s['a':'c']
with catch_warnings(record=True):
result2 = s.ix['a':'c']
result3 = s.loc['a':'c']
tm.assert_series_equal(result1, result2)
tm.assert_series_equal(result1, result3)
# now work rather than raising KeyError
s = Series(range(5), [-2, -1, 1, 2, 3])
with catch_warnings(record=True):
result1 = s.ix[-10:3]
result2 = s.loc[-10:3]
tm.assert_series_equal(result1, result2)
with catch_warnings(record=True):
result1 = s.ix[0:3]
result2 = s.loc[0:3]
tm.assert_series_equal(result1, result2)
def test_loc_setitem_dups(self):
# GH 6541
df_orig = DataFrame(
{'me': list('rttti'),
'foo': list('aaade'),
'bar': np.arange(5, dtype='float64') * 1.34 + 2,
'bar2': np.arange(5, dtype='float64') * -.34 + 2}).set_index('me')
indexer = tuple(['r', ['bar', 'bar2']])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_series_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = tuple(['r', 'bar'])
df = df_orig.copy()
df.loc[indexer] *= 2.0
self.assertEqual(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = tuple(['t', ['bar', 'bar2']])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_frame_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
def test_iloc_setitem_dups(self):
# GH 6766
# iloc with a mask aligning from another iloc
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
expected = df.fillna(3)
expected['A'] = expected['A'].astype('float64')
inds = np.isnan(df.iloc[:, 0])
mask = inds[inds].index
df.iloc[mask, 0] = df.iloc[mask, 2]
tm.assert_frame_equal(df, expected)
# del a dup column across blocks
expected = DataFrame({0: [1, 2], 1: [3, 4]})
expected.columns = ['B', 'B']
del df['A']
tm.assert_frame_equal(df, expected)
# assign back to self
df.iloc[[0, 1], [0, 1]] = df.iloc[[0, 1], [0, 1]]
tm.assert_frame_equal(df, expected)
# reversed x 2
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
tm.assert_frame_equal(df, expected)
def test_chained_getitem_with_lists(self):
# GH6394
# Regression in chained getitem indexing with embedded list-like from
# 0.12
def check(result, expected):
tm.assert_numpy_array_equal(result, expected)
tm.assertIsInstance(result, np.ndarray)
df = DataFrame({'A': 5 * [np.zeros(3)], 'B': 5 * [np.ones(3)]})
expected = df['A'].iloc[2]
result = df.loc[2, 'A']
check(result, expected)
result2 = df.iloc[2]['A']
check(result2, expected)
result3 = df['A'].loc[2]
check(result3, expected)
result4 = df['A'].iloc[2]
check(result4, expected)
def test_loc_getitem_int(self):
# int label
self.check_result('int label', 'loc', 2, 'ix', 2,
typs=['ints', 'uints'], axes=0)
self.check_result('int label', 'loc', 3, 'ix', 3,
typs=['ints', 'uints'], axes=1)
self.check_result('int label', 'loc', 4, 'ix', 4,
typs=['ints', 'uints'], axes=2)
self.check_result('int label', 'loc', 2, 'ix', 2,
typs=['label'], fails=KeyError)
def test_loc_getitem_label(self):
# label
self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['labels'],
axes=0)
self.check_result('label', 'loc', 'null', 'ix', 'null', typs=['mixed'],
axes=0)
self.check_result('label', 'loc', 8, 'ix', 8, typs=['mixed'], axes=0)
self.check_result('label', 'loc', Timestamp('20130102'), 'ix', 1,
typs=['ts'], axes=0)
self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['empty'],
fails=KeyError)
def test_loc_getitem_label_out_of_range(self):
# out of range label
self.check_result('label range', 'loc', 'f', 'ix', 'f',
typs=['ints', 'uints', 'labels', 'mixed', 'ts'],
fails=KeyError)
self.check_result('label range', 'loc', 'f', 'ix', 'f',
typs=['floats'], fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20,
typs=['ints', 'uints', 'mixed'], fails=KeyError)
self.check_result('label range', 'loc', 20, 'ix', 20,
typs=['labels'], fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs=['ts'],
axes=0, fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs=['floats'],
axes=0, fails=TypeError)
def test_loc_getitem_label_list(self):
# list of labels
self.check_result('list lbl', 'loc', [0, 2, 4], 'ix', [0, 2, 4],
typs=['ints', 'uints'], axes=0)
self.check_result('list lbl', 'loc', [3, 6, 9], 'ix', [3, 6, 9],
typs=['ints', 'uints'], axes=1)
self.check_result('list lbl', 'loc', [4, 8, 12], 'ix', [4, 8, 12],
typs=['ints', 'uints'], axes=2)
self.check_result('list lbl', 'loc', ['a', 'b', 'd'], 'ix',
['a', 'b', 'd'], typs=['labels'], axes=0)
self.check_result('list lbl', 'loc', ['A', 'B', 'C'], 'ix',
['A', 'B', 'C'], typs=['labels'], axes=1)
self.check_result('list lbl', 'loc', ['Z', 'Y', 'W'], 'ix',
['Z', 'Y', 'W'], typs=['labels'], axes=2)
self.check_result('list lbl', 'loc', [2, 8, 'null'], 'ix',
[2, 8, 'null'], typs=['mixed'], axes=0)
self.check_result('list lbl', 'loc',
[Timestamp('20130102'), Timestamp('20130103')], 'ix',
[Timestamp('20130102'), Timestamp('20130103')],
typs=['ts'], axes=0)
self.check_result('list lbl', 'loc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['empty'], fails=KeyError)
self.check_result('list lbl', 'loc', [0, 2, 3], 'ix', [0, 2, 3],
typs=['ints', 'uints'], axes=0, fails=KeyError)
self.check_result('list lbl', 'loc', [3, 6, 7], 'ix', [3, 6, 7],
typs=['ints', 'uints'], axes=1, fails=KeyError)
self.check_result('list lbl', 'loc', [4, 8, 10], 'ix', [4, 8, 10],
typs=['ints', 'uints'], axes=2, fails=KeyError)
def test_loc_getitem_label_list_fails(self):
# fails
self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
typs=['ints', 'uints'], axes=1, fails=KeyError)
self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
typs=['ints', 'uints'], axes=2, fails=KeyError)
def test_loc_getitem_label_array_like(self):
# array like
self.check_result('array like', 'loc', Series(index=[0, 2, 4]).index,
'ix', [0, 2, 4], typs=['ints', 'uints'], axes=0)
self.check_result('array like', 'loc', Series(index=[3, 6, 9]).index,
'ix', [3, 6, 9], typs=['ints', 'uints'], axes=1)
self.check_result('array like', 'loc', Series(index=[4, 8, 12]).index,
'ix', [4, 8, 12], typs=['ints', 'uints'], axes=2)
def test_loc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False]
self.check_result('bool', 'loc', b, 'ix', b,
typs=['ints', 'uints', 'labels',
'mixed', 'ts', 'floats'])
self.check_result('bool', 'loc', b, 'ix', b, typs=['empty'],
fails=KeyError)
def test_loc_getitem_int_slice(self):
# ok
self.check_result('int slice2', 'loc', slice(2, 4), 'ix', [2, 4],
typs=['ints', 'uints'], axes=0)
self.check_result('int slice2', 'loc', slice(3, 6), 'ix', [3, 6],
typs=['ints', 'uints'], axes=1)
self.check_result('int slice2', 'loc', slice(4, 8), 'ix', [4, 8],
typs=['ints', 'uints'], axes=2)
# GH 3053
# loc should treat integer slices like label slices
from itertools import product
index = MultiIndex.from_tuples([t for t in product(
[6, 7, 8], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[6:8, :]
with catch_warnings(record=True):
expected = df.ix[6:8, :]
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_tuples([t
for t in product(
[10, 20, 30], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[20:30, :]
with catch_warnings(record=True):
expected = df.ix[20:30, :]
tm.assert_frame_equal(result, expected)
# doc examples
result = df.loc[10, :]
with catch_warnings(record=True):
expected = df.ix[10, :]
tm.assert_frame_equal(result, expected)
result = df.loc[:, 10]
# expected = df.ix[:,10] (this fails)
expected = df[10]
tm.assert_frame_equal(result, expected)
def test_loc_to_fail(self):
# GH3449
df = DataFrame(np.random.random((3, 3)),
index=['a', 'b', 'c'],
columns=['e', 'f', 'g'])
# raise a KeyError?
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([[1, 2], [1, 2]]))
# GH 7496
# loc should not fallback
s = Series()
s.loc[1] = 1
s.loc['a'] = 2
self.assertRaises(KeyError, lambda: s.loc[-1])
self.assertRaises(KeyError, lambda: s.loc[[-1, -2]])
self.assertRaises(KeyError, lambda: s.loc[['4']])
s.loc[-1] = 3
result = s.loc[[-1, -2]]
expected = Series([3, np.nan], index=[-1, -2])
tm.assert_series_equal(result, expected)
s['a'] = 2
self.assertRaises(KeyError, lambda: s.loc[[-2]])
del s['a']
def f():
s.loc[[-2]] = 0
self.assertRaises(KeyError, f)
# inconsistency between .loc[values] and .loc[values,:]
# GH 7999
df = DataFrame([['a'], ['b']], index=[1, 2], columns=['value'])
def f():
df.loc[[3], :]
self.assertRaises(KeyError, f)
def f():
df.loc[[3]]
self.assertRaises(KeyError, f)
def test_at_to_fail(self):
# at should not fallback
# GH 7814
s = Series([1, 2, 3], index=list('abc'))
result = s.at['a']
self.assertEqual(result, 1)
self.assertRaises(ValueError, lambda: s.at[0])
df = DataFrame({'A': [1, 2, 3]}, index=list('abc'))
result = df.at['a', 'A']
self.assertEqual(result, 1)
self.assertRaises(ValueError, lambda: df.at['a', 0])
s = Series([1, 2, 3], index=[3, 2, 1])
result = s.at[1]
self.assertEqual(result, 3)
self.assertRaises(ValueError, lambda: s.at['a'])
df = DataFrame({0: [1, 2, 3]}, index=[3, 2, 1])
result = df.at[1, 0]
self.assertEqual(result, 3)
self.assertRaises(ValueError, lambda: df.at['a', 0])
# GH 13822, incorrect error string with non-unique columns when missing
# column is accessed
df = DataFrame({'x': [1.], 'y': [2.], 'z': [3.]})
df.columns = ['x', 'x', 'z']
# Check that we get the correct value in the KeyError
self.assertRaisesRegexp(KeyError, r"\['y'\] not in index",
lambda: df[['x', 'y', 'z']])
def test_loc_getitem_label_slice(self):
# label slices (with ints)
self.check_result('lab slice', 'loc', slice(1, 3),
'ix', slice(1, 3),
typs=['labels', 'mixed', 'empty', 'ts', 'floats'],
fails=TypeError)
# real label slices
self.check_result('lab slice', 'loc', slice('a', 'c'),
'ix', slice('a', 'c'), typs=['labels'], axes=0)
self.check_result('lab slice', 'loc', slice('A', 'C'),
'ix', slice('A', 'C'), typs=['labels'], axes=1)
self.check_result('lab slice', 'loc', slice('W', 'Z'),
'ix', slice('W', 'Z'), typs=['labels'], axes=2)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=0)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=1, fails=TypeError)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=2, fails=TypeError)
# GH 14316
self.check_result('ts slice rev', 'loc', slice('20130104', '20130102'),
'indexer', [0, 1, 2], typs=['ts_rev'], axes=0)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=0, fails=TypeError)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=1, fails=KeyError)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=2, fails=KeyError)
self.check_result('mixed slice', 'loc', slice(2, 4, 2), 'ix', slice(
2, 4, 2), typs=['mixed'], axes=0, fails=TypeError)
def test_loc_general(self):
df = DataFrame(
np.random.rand(4, 4), columns=['A', 'B', 'C', 'D'],
index=['A', 'B', 'C', 'D'])
# want this to work
result = df.loc[:, "A":"B"].iloc[0:2, :]
self.assertTrue((result.columns == ['A', 'B']).all())
self.assertTrue((result.index == ['A', 'B']).all())
# mixed type
result = DataFrame({'a': [Timestamp('20130101')], 'b': [1]}).iloc[0]
expected = Series([Timestamp('20130101'), 1], index=['a', 'b'], name=0)
tm.assert_series_equal(result, expected)
self.assertEqual(result.dtype, object)
def test_loc_setitem_consistency(self):
# GH 6149
# coerce similary for setitem and loc when rows have a null-slice
expected = DataFrame({'date': Series(0, index=range(5),
dtype=np.int64),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(
range(5), dtype=np.int64)})
df.loc[:, 'date'] = 0
tm.assert_frame_equal(df, expected)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = np.array(0, dtype=np.int64)
tm.assert_frame_equal(df, expected)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = np.array([0, 0, 0, 0, 0], dtype=np.int64)
tm.assert_frame_equal(df, expected)
expected = DataFrame({'date': Series('foo', index=range(5)),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = 'foo'
tm.assert_frame_equal(df, expected)
expected = DataFrame({'date': Series(1.0, index=range(5)),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = 1.0
tm.assert_frame_equal(df, expected)
def test_loc_setitem_consistency_empty(self):
# empty (essentially noops)
expected = DataFrame(columns=['x', 'y'])
expected['x'] = expected['x'].astype(np.int64)
df = DataFrame(columns=['x', 'y'])
df.loc[:, 'x'] = 1
tm.assert_frame_equal(df, expected)
df = DataFrame(columns=['x', 'y'])
df['x'] = 1
tm.assert_frame_equal(df, expected)
def test_loc_setitem_consistency_slice_column_len(self):
# .loc[:,column] setting with slice == len of the column
# GH10408
data = """Level_0,,,Respondent,Respondent,Respondent,OtherCat,OtherCat
Level_1,,,Something,StartDate,EndDate,Yes/No,SomethingElse
Region,Site,RespondentID,,,,,
Region_1,Site_1,3987227376,A,5/25/2015 10:59,5/25/2015 11:22,Yes,
Region_1,Site_1,3980680971,A,5/21/2015 9:40,5/21/2015 9:52,Yes,Yes
Region_1,Site_2,3977723249,A,5/20/2015 8:27,5/20/2015 8:41,Yes,
Region_1,Site_2,3977723089,A,5/20/2015 8:33,5/20/2015 9:09,Yes,No"""
df = pd.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1, 2])
df.loc[:, ('Respondent', 'StartDate')] = pd.to_datetime(df.loc[:, (
'Respondent', 'StartDate')])
df.loc[:, ('Respondent', 'EndDate')] = pd.to_datetime(df.loc[:, (
'Respondent', 'EndDate')])
df.loc[:, ('Respondent', 'Duration')] = df.loc[:, (
'Respondent', 'EndDate')] - df.loc[:, ('Respondent', 'StartDate')]
df.loc[:, ('Respondent', 'Duration')] = df.loc[:, (
'Respondent', 'Duration')].astype('timedelta64[s]')
expected = Series([1380, 720, 840, 2160.], index=df.index,
name=('Respondent', 'Duration'))
tm.assert_series_equal(df[('Respondent', 'Duration')], expected)
def test_loc_setitem_frame(self):
df = self.frame_labels
result = df.iloc[0, 0]
df.loc['a', 'A'] = 1
result = df.loc['a', 'A']
self.assertEqual(result, 1)
result = df.iloc[0, 0]
self.assertEqual(result, 1)
df.loc[:, 'B':'D'] = 0
expected = df.loc[:, 'B':'D']
with catch_warnings(record=True):
result = df.ix[:, 1:]
tm.assert_frame_equal(result, expected)
# GH 6254
# setting issue
df = DataFrame(index=[3, 5, 4], columns=['A'])
df.loc[[4, 3, 5], 'A'] = np.array([1, 2, 3], dtype='int64')
expected = DataFrame(dict(A=Series(
[1, 2, 3], index=[4, 3, 5]))).reindex(index=[3, 5, 4])
tm.assert_frame_equal(df, expected)
# GH 6252
# setting with an empty frame
keys1 = ['@' + str(i) for i in range(5)]
val1 = np.arange(5, dtype='int64')
keys2 = ['@' + str(i) for i in range(4)]
val2 = np.arange(4, dtype='int64')
index = list(set(keys1).union(keys2))
df = DataFrame(index=index)
df['A'] = nan
df.loc[keys1, 'A'] = val1
df['B'] = nan
df.loc[keys2, 'B'] = val2
expected = DataFrame(dict(A=Series(val1, index=keys1), B=Series(
val2, index=keys2))).reindex(index=index)
tm.assert_frame_equal(df, expected)
# GH 8669
# invalid coercion of nan -> int
df = DataFrame({'A': [1, 2, 3], 'B': np.nan})
df.loc[df.B > df.A, 'B'] = df.A
expected = DataFrame({'A': [1, 2, 3], 'B': np.nan})
tm.assert_frame_equal(df, expected)
# GH 6546
# setting with mixed labels
df = DataFrame({1: [1, 2], 2: [3, 4], 'a': ['a', 'b']})
result = df.loc[0, [1, 2]]
expected = Series([1, 3], index=[1, 2], dtype=object, name=0)
tm.assert_series_equal(result, expected)
expected = DataFrame({1: [5, 2], 2: [6, 4], 'a': ['a', 'b']})
df.loc[0, [1, 2]] = [5, 6]
tm.assert_frame_equal(df, expected)
def test_loc_setitem_frame_multiples(self):
# multiple setting
df = DataFrame({'A': ['foo', 'bar', 'baz'],
'B': Series(
range(3), dtype=np.int64)})
rhs = df.loc[1:2]
rhs.index = df.index[0:2]
df.loc[0:1] = rhs
expected = DataFrame({'A': ['bar', 'baz', 'baz'],
'B': Series(
[1, 2, 2], dtype=np.int64)})
tm.assert_frame_equal(df, expected)
# multiple setting with frame on rhs (with M8)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(
range(5), dtype=np.int64)})
expected = DataFrame({'date': [Timestamp('20000101'), Timestamp(
'20000102'), Timestamp('20000101'), Timestamp('20000102'),
Timestamp('20000103')],
'val': Series(
[0, 1, 0, 1, 2], dtype=np.int64)})
rhs = df.loc[0:2]
rhs.index = df.index[2:5]
df.loc[2:4] = rhs
tm.assert_frame_equal(df, expected)
def test_iloc_getitem_frame(self):
df = DataFrame(np.random.randn(10, 4), index=lrange(0, 20, 2),
columns=lrange(0, 8, 2))
result = df.iloc[2]
with catch_warnings(record=True):
exp = df.ix[4]
tm.assert_series_equal(result, exp)
result = df.iloc[2, 2]
with catch_warnings(record=True):
exp = df.ix[4, 4]
self.assertEqual(result, exp)
# slice
result = df.iloc[4:8]
with catch_warnings(record=True):
expected = df.ix[8:14]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 2:3]
with catch_warnings(record=True):
expected = df.ix[:, 4:5]
tm.assert_frame_equal(result, expected)
# list of integers
result = df.iloc[[0, 1, 3]]
with catch_warnings(record=True):
expected = df.ix[[0, 2, 6]]
tm.assert_frame_equal(result, expected)
result = df.iloc[[0, 1, 3], [0, 1]]
with catch_warnings(record=True):
expected = df.ix[[0, 2, 6], [0, 2]]
tm.assert_frame_equal(result, expected)
# neg indicies
result = df.iloc[[-1, 1, 3], [-1, 1]]
with catch_warnings(record=True):
expected = df.ix[[18, 2, 6], [6, 2]]
tm.assert_frame_equal(result, expected)
# dups indicies
result = df.iloc[[-1, -1, 1, 3], [-1, 1]]
with catch_warnings(record=True):
expected = df.ix[[18, 18, 2, 6], [6, 2]]
tm.assert_frame_equal(result, expected)
# with index-like
s = Series(index=lrange(1, 5))
result = df.iloc[s.index]
with catch_warnings(record=True):
expected = df.ix[[2, 4, 6, 8]]
tm.assert_frame_equal(result, expected)
def test_iloc_getitem_labelled_frame(self):
# try with labelled frame
df = DataFrame(np.random.randn(10, 4),
index=list('abcdefghij'), columns=list('ABCD'))
result = df.iloc[1, 1]
exp = df.loc['b', 'B']
self.assertEqual(result, exp)
result = df.iloc[:, 2:3]
expected = df.loc[:, ['C']]
tm.assert_frame_equal(result, expected)
# negative indexing
result = df.iloc[-1, -1]
exp = df.loc['j', 'D']
self.assertEqual(result, exp)
# out-of-bounds exception
self.assertRaises(IndexError, df.iloc.__getitem__, tuple([10, 5]))
# trying to use a label
self.assertRaises(ValueError, df.iloc.__getitem__, tuple(['j', 'D']))
def test_iloc_getitem_doc_issue(self):
# multi axis slicing issue with single block
# surfaced in GH 6059
arr = np.random.randn(6, 4)
index = date_range('20130101', periods=6)
columns = list('ABCD')
df = DataFrame(arr, index=index, columns=columns)
# defines ref_locs
df.describe()
result = df.iloc[3:5, 0:2]
str(result)
result.dtypes
expected = DataFrame(arr[3:5, 0:2], index=index[3:5],
columns=columns[0:2])
tm.assert_frame_equal(result, expected)
# for dups
df.columns = list('aaaa')
result = df.iloc[3:5, 0:2]
str(result)
result.dtypes
expected = DataFrame(arr[3:5, 0:2], index=index[3:5],
columns=list('aa'))
tm.assert_frame_equal(result, expected)
# related
arr = np.random.randn(6, 4)
index = list(range(0, 12, 2))
columns = list(range(0, 8, 2))
df = DataFrame(arr, index=index, columns=columns)
df._data.blocks[0].mgr_locs
result = df.iloc[1:5, 2:4]
str(result)
result.dtypes
expected = DataFrame(arr[1:5, 2:4], index=index[1:5],
columns=columns[2:4])
tm.assert_frame_equal(result, expected)
def test_setitem_ndarray_1d(self):
# GH5508
# len of indexer vs length of the 1d ndarray
df = DataFrame(index=Index(lrange(1, 11)))
df['foo'] = np.zeros(10, dtype=np.float64)
df['bar'] = np.zeros(10, dtype=np.complex)
# invalid
def f():
with catch_warnings(record=True):
df.ix[2:5, 'bar'] = np.array([2.33j, 1.23 + 0.1j, 2.2])
self.assertRaises(ValueError, f)
def f():
df.loc[df.index[2:5], 'bar'] = np.array([2.33j, 1.23 + 0.1j,
2.2, 1.0])
self.assertRaises(ValueError, f)
# valid
df.loc[df.index[2:6], 'bar'] = np.array([2.33j, 1.23 + 0.1j,
2.2, 1.0])
result = df.loc[df.index[2:6], 'bar']
expected = Series([2.33j, 1.23 + 0.1j, 2.2, 1.0], index=[3, 4, 5, 6],
name='bar')
tm.assert_series_equal(result, expected)
# dtype getting changed?
df = DataFrame(index=Index(lrange(1, 11)))
df['foo'] = np.zeros(10, dtype=np.float64)
df['bar'] = np.zeros(10, dtype=np.complex)
def f():
df[2:5] = np.arange(1, 4) * 1j
self.assertRaises(ValueError, f)
def test_iloc_setitem_series(self):
df = DataFrame(np.random.randn(10, 4), index=list('abcdefghij'),
columns=list('ABCD'))
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
self.assertEqual(result, 1)
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
s.iloc[1] = 1
result = s.iloc[1]
self.assertEqual(result, 1)
s.iloc[:4] = 0
expected = s.iloc[:4]
result = s.iloc[:4]
tm.assert_series_equal(result, expected)
s = Series([-1] * 6)
s.iloc[0::2] = [0, 2, 4]
s.iloc[1::2] = [1, 3, 5]
result = s
expected = Series([0, 1, 2, 3, 4, 5])
tm.assert_series_equal(result, expected)
def test_iloc_setitem_list_of_lists(self):
# GH 7551
# list-of-list is set incorrectly in mixed vs. single dtyped frames
df = DataFrame(dict(A=np.arange(5, dtype='int64'),
B=np.arange(5, 10, dtype='int64')))
df.iloc[2:4] = [[10, 11], [12, 13]]
expected = DataFrame(dict(A=[0, 1, 10, 12, 4], B=[5, 6, 11, 13, 9]))
tm.assert_frame_equal(df, expected)
df = DataFrame(
dict(A=list('abcde'), B=np.arange(5, 10, dtype='int64')))
df.iloc[2:4] = [['x', 11], ['y', 13]]
expected = DataFrame(dict(A=['a', 'b', 'x', 'y', 'e'],
B=[5, 6, 11, 13, 9]))
tm.assert_frame_equal(df, expected)
def test_ix_general(self):
# ix general issues
# GH 2817
data = {'amount': {0: 700, 1: 600, 2: 222, 3: 333, 4: 444},
'col': {0: 3.5, 1: 3.5, 2: 4.0, 3: 4.0, 4: 4.0},
'year': {0: 2012, 1: 2011, 2: 2012, 3: 2012, 4: 2012}}
df = DataFrame(data).set_index(keys=['col', 'year'])
key = 4.0, 2012
# emits a PerformanceWarning, ok
with self.assert_produces_warning(PerformanceWarning):
tm.assert_frame_equal(df.loc[key], df.iloc[2:])
# this is ok
df.sort_index(inplace=True)
res = df.loc[key]
# col has float dtype, result should be Float64Index
index = MultiIndex.from_arrays([[4.] * 3, [2012] * 3],
names=['col', 'year'])
expected = DataFrame({'amount': [222, 333, 444]}, index=index)
tm.assert_frame_equal(res, expected)
def test_ix_weird_slicing(self):
# http://stackoverflow.com/q/17056560/1240268
df = DataFrame({'one': [1, 2, 3, np.nan, np.nan],
'two': [1, 2, 3, 4, 5]})
df.loc[df['one'] > 1, 'two'] = -df['two']
expected = DataFrame({'one': {0: 1.0,
1: 2.0,
2: 3.0,
3: nan,
4: nan},
'two': {0: 1,
1: -2,
2: -3,
3: 4,
4: 5}})
tm.assert_frame_equal(df, expected)
def test_loc_coerceion(self):
# 12411
df = DataFrame({'date': [pd.Timestamp('20130101').tz_localize('UTC'),
pd.NaT]})
expected = df.dtypes
result = df.iloc[[0]]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[[1]]
tm.assert_series_equal(result.dtypes, expected)
# 12045
import datetime
df = DataFrame({'date': [datetime.datetime(2012, 1, 1),
datetime.datetime(1012, 1, 2)]})
expected = df.dtypes
result = df.iloc[[0]]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[[1]]
tm.assert_series_equal(result.dtypes, expected)
# 11594
df = DataFrame({'text': ['some words'] + [None] * 9})
expected = df.dtypes
result = df.iloc[0:2]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[3:]
tm.assert_series_equal(result.dtypes, expected)
def test_setitem_dtype_upcast(self):
# GH3216
df = DataFrame([{"a": 1}, {"a": 3, "b": 2}])
df['c'] = np.nan
self.assertEqual(df['c'].dtype, np.float64)
df.loc[0, 'c'] = 'foo'
expected = DataFrame([{"a": 1, "c": 'foo'},
{"a": 3, "b": 2, "c": np.nan}])
tm.assert_frame_equal(df, expected)
# GH10280
df = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=list('ab'),
columns=['foo', 'bar', 'baz'])
for val in [3.14, 'wxyz']:
left = df.copy()
left.loc['a', 'bar'] = val
right = DataFrame([[0, val, 2], [3, 4, 5]], index=list('ab'),
columns=['foo', 'bar', 'baz'])
tm.assert_frame_equal(left, right)
self.assertTrue(is_integer_dtype(left['foo']))
self.assertTrue(is_integer_dtype(left['baz']))
left = DataFrame(np.arange(6, dtype='int64').reshape(2, 3) / 10.0,
index=list('ab'),
columns=['foo', 'bar', 'baz'])
left.loc['a', 'bar'] = 'wxyz'
right = DataFrame([[0, 'wxyz', .2], [.3, .4, .5]], index=list('ab'),
columns=['foo', 'bar', 'baz'])
tm.assert_frame_equal(left, right)
self.assertTrue(is_float_dtype(left['foo']))
self.assertTrue(is_float_dtype(left['baz']))
def test_setitem_iloc(self):
# setitem with an iloc list
df = DataFrame(np.arange(9).reshape((3, 3)), index=["A", "B", "C"],
columns=["A", "B", "C"])
df.iloc[[0, 1], [1, 2]]
df.iloc[[0, 1], [1, 2]] += 100
expected = DataFrame(
np.array([0, 101, 102, 3, 104, 105, 6, 7, 8]).reshape((3, 3)),
index=["A", "B", "C"], columns=["A", "B", "C"])
tm.assert_frame_equal(df, expected)
def test_dups_fancy_indexing(self):
# GH 3455
from pandas.util.testing import makeCustomDataframe as mkdf
df = mkdf(10, 3)
df.columns = ['a', 'a', 'b']
result = df[['b', 'a']].columns
expected = Index(['b', 'a', 'a'])
self.assert_index_equal(result, expected)
# across dtypes
df = DataFrame([[1, 2, 1., 2., 3., 'foo', 'bar']],
columns=list('aaaaaaa'))
df.head()
str(df)
result = DataFrame([[1, 2, 1., 2., 3., 'foo', 'bar']])
result.columns = list('aaaaaaa')
# TODO(wesm): unused?
df_v = df.iloc[:, 4] # noqa
res_v = result.iloc[:, 4] # noqa
tm.assert_frame_equal(df, result)
# GH 3561, dups not in selected order
df = DataFrame(
{'test': [5, 7, 9, 11],
'test1': [4., 5, 6, 7],
'other': list('abcd')}, index=['A', 'A', 'B', 'C'])
rows = ['C', 'B']
expected = DataFrame(
{'test': [11, 9],
'test1': [7., 6],
'other': ['d', 'c']}, index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
result = df.loc[Index(rows)]
tm.assert_frame_equal(result, expected)
rows = ['C', 'B', 'E']
expected = DataFrame(
{'test': [11, 9, np.nan],
'test1': [7., 6, np.nan],
'other': ['d', 'c', np.nan]}, index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
# see GH5553, make sure we use the right indexer
rows = ['F', 'G', 'H', 'C', 'B', 'E']
expected = DataFrame({'test': [np.nan, np.nan, np.nan, 11, 9, np.nan],
'test1': [np.nan, np.nan, np.nan, 7., 6, np.nan],
'other': [np.nan, np.nan, np.nan,
'd', 'c', np.nan]},
index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
# inconsistent returns for unique/duplicate indices when values are
# missing
df = DataFrame(randn(4, 3), index=list('ABCD'))
expected = df.ix[['E']]
dfnu = DataFrame(randn(5, 3), index=list('AABCD'))
result = dfnu.ix[['E']]
tm.assert_frame_equal(result, expected)
# ToDo: check_index_type can be True after GH 11497
# GH 4619; duplicate indexer with missing label
df = DataFrame({"A": [0, 1, 2]})
result = df.ix[[0, 8, 0]]
expected = DataFrame({"A": [0, np.nan, 0]}, index=[0, 8, 0])
tm.assert_frame_equal(result, expected, check_index_type=False)
df = DataFrame({"A": list('abc')})
result = df.ix[[0, 8, 0]]
expected = DataFrame({"A": ['a', np.nan, 'a']}, index=[0, 8, 0])
tm.assert_frame_equal(result, expected, check_index_type=False)
# non unique with non unique selector
df = DataFrame({'test': [5, 7, 9, 11]}, index=['A', 'A', 'B', 'C'])
expected = DataFrame(
{'test': [5, 7, 5, 7, np.nan]}, index=['A', 'A', 'A', 'A', 'E'])
result = df.ix[['A', 'A', 'E']]
tm.assert_frame_equal(result, expected)
# GH 5835
# dups on index and missing values
df = DataFrame(
np.random.randn(5, 5), columns=['A', 'B', 'B', 'B', 'A'])
expected = pd.concat(
[df.ix[:, ['A', 'B']], DataFrame(np.nan, columns=['C'],
index=df.index)], axis=1)
result = df.ix[:, ['A', 'B', 'C']]
tm.assert_frame_equal(result, expected)
# GH 6504, multi-axis indexing
df = DataFrame(np.random.randn(9, 2),
index=[1, 1, 1, 2, 2, 2, 3, 3, 3], columns=['a', 'b'])
expected = df.iloc[0:6]
result = df.loc[[1, 2]]
tm.assert_frame_equal(result, expected)
expected = df
result = df.loc[:, ['a', 'b']]
tm.assert_frame_equal(result, expected)
expected = df.iloc[0:6, :]
result = df.loc[[1, 2], ['a', 'b']]
tm.assert_frame_equal(result, expected)
def test_indexing_mixed_frame_bug(self):
# GH3492
df = DataFrame({'a': {1: 'aaa', 2: 'bbb', 3: 'ccc'},
'b': {1: 111, 2: 222, 3: 333}})
# this works, new column is created correctly
df['test'] = df['a'].apply(lambda x: '_' if x == 'aaa' else x)
# this does not work, ie column test is not changed
idx = df['test'] == '_'
temp = df.ix[idx, 'a'].apply(lambda x: '-----' if x == 'aaa' else x)
df.ix[idx, 'test'] = temp
self.assertEqual(df.iloc[0, 2], '-----')
# if I look at df, then element [0,2] equals '_'. If instead I type
# df.ix[idx,'test'], I get '-----', finally by typing df.iloc[0,2] I
# get '_'.
def test_multitype_list_index_access(self):
# GH 10610
df = pd.DataFrame(np.random.random((10, 5)),
columns=["a"] + [20, 21, 22, 23])
with self.assertRaises(KeyError):
df[[22, 26, -8]]
self.assertEqual(df[21].shape[0], df.shape[0])
def test_set_index_nan(self):
# GH 3586
df = DataFrame({'PRuid': {17: 'nonQC',
18: 'nonQC',
19: 'nonQC',
20: '10',
21: '11',
22: '12',
23: '13',
24: '24',
25: '35',
26: '46',
27: '47',
28: '48',
29: '59',
30: '10'},
'QC': {17: 0.0,
18: 0.0,
19: 0.0,
20: nan,
21: nan,
22: nan,
23: nan,
24: 1.0,
25: nan,
26: nan,
27: nan,
28: nan,
29: nan,
30: nan},
'data': {17: 7.9544899999999998,
18: 8.0142609999999994,
19: 7.8591520000000008,
20: 0.86140349999999999,
21: 0.87853110000000001,
22: 0.8427041999999999,
23: 0.78587700000000005,
24: 0.73062459999999996,
25: 0.81668560000000001,
26: 0.81927080000000008,
27: 0.80705009999999999,
28: 0.81440240000000008,
29: 0.80140849999999997,
30: 0.81307740000000006},
'year': {17: 2006,
18: 2007,
19: 2008,
20: 1985,
21: 1985,
22: 1985,
23: 1985,
24: 1985,
25: 1985,
26: 1985,
27: 1985,
28: 1985,
29: 1985,
30: 1986}}).reset_index()
result = df.set_index(['year', 'PRuid', 'QC']).reset_index().reindex(
columns=df.columns)
tm.assert_frame_equal(result, df)
def test_multi_nan_indexing(self):
# GH 3588
df = DataFrame({"a": ['R1', 'R2', np.nan, 'R4'],
'b': ["C1", "C2", "C3", "C4"],
"c": [10, 15, np.nan, 20]})
result = df.set_index(['a', 'b'], drop=False)
expected = DataFrame({"a": ['R1', 'R2', np.nan, 'R4'],
'b': ["C1", "C2", "C3", "C4"],
"c": [10, 15, np.nan, 20]},
index=[Index(['R1', 'R2', np.nan, 'R4'],
name='a'),
Index(['C1', 'C2', 'C3', 'C4'], name='b')])
tm.assert_frame_equal(result, expected)
def test_multi_assign(self):
# GH 3626, an assignement of a sub-df to a df
df = DataFrame({'FC': ['a', 'b', 'a', 'b', 'a', 'b'],
'PF': [0, 0, 0, 0, 1, 1],
'col1': lrange(6),
'col2': lrange(6, 12)})
df.ix[1, 0] = np.nan
df2 = df.copy()
mask = ~df2.FC.isnull()
cols = ['col1', 'col2']
dft = df2 * 2
dft.ix[3, 3] = np.nan
expected = DataFrame({'FC': ['a', np.nan, 'a', 'b', 'a', 'b'],
'PF': [0, 0, 0, 0, 1, 1],
'col1': Series([0, 1, 4, 6, 8, 10]),
'col2': [12, 7, 16, np.nan, 20, 22]})
# frame on rhs
df2.ix[mask, cols] = dft.ix[mask, cols]
tm.assert_frame_equal(df2, expected)
df2.ix[mask, cols] = dft.ix[mask, cols]
tm.assert_frame_equal(df2, expected)
# with an ndarray on rhs
df2 = df.copy()
df2.ix[mask, cols] = dft.ix[mask, cols].values
tm.assert_frame_equal(df2, expected)
df2.ix[mask, cols] = dft.ix[mask, cols].values
tm.assert_frame_equal(df2, expected)
# broadcasting on the rhs is required
df = DataFrame(dict(A=[1, 2, 0, 0, 0], B=[0, 0, 0, 10, 11], C=[
0, 0, 0, 10, 11], D=[3, 4, 5, 6, 7]))
expected = df.copy()
mask = expected['A'] == 0
for col in ['A', 'B']:
expected.loc[mask, col] = df['D']
df.loc[df['A'] == 0, ['A', 'B']] = df['D']
tm.assert_frame_equal(df, expected)
def test_ix_assign_column_mixed(self):
# GH #1142
df = DataFrame(tm.getSeriesData())
df['foo'] = 'bar'
orig = df.ix[:, 'B'].copy()
df.ix[:, 'B'] = df.ix[:, 'B'] + 1
tm.assert_series_equal(df.B, orig + 1)
# GH 3668, mixed frame with series value
df = DataFrame({'x': lrange(10), 'y': lrange(10, 20), 'z': 'bar'})
expected = df.copy()
for i in range(5):
indexer = i * 2
v = 1000 + i * 200
expected.ix[indexer, 'y'] = v
self.assertEqual(expected.ix[indexer, 'y'], v)
df.ix[df.x % 2 == 0, 'y'] = df.ix[df.x % 2 == 0, 'y'] * 100
tm.assert_frame_equal(df, expected)
# GH 4508, making sure consistency of assignments
df = DataFrame({'a': [1, 2, 3], 'b': [0, 1, 2]})
df.ix[[0, 2, ], 'b'] = [100, -100]
expected = DataFrame({'a': [1, 2, 3], 'b': [100, 1, -100]})
tm.assert_frame_equal(df, expected)
df = pd.DataFrame({'a': lrange(4)})
df['b'] = np.nan
df.ix[[1, 3], 'b'] = [100, -100]
expected = DataFrame({'a': [0, 1, 2, 3],
'b': [np.nan, 100, np.nan, -100]})
tm.assert_frame_equal(df, expected)
# ok, but chained assignments are dangerous
# if we turn off chained assignement it will work
with option_context('chained_assignment', None):
df = pd.DataFrame({'a': lrange(4)})
df['b'] = np.nan
df['b'].ix[[1, 3]] = [100, -100]
tm.assert_frame_equal(df, expected)
def test_ix_get_set_consistency(self):
# GH 4544
# ix/loc get/set not consistent when
# a mixed int/string index
df = DataFrame(np.arange(16).reshape((4, 4)),
columns=['a', 'b', 8, 'c'],
index=['e', 7, 'f', 'g'])
self.assertEqual(df.ix['e', 8], 2)
self.assertEqual(df.loc['e', 8], 2)
df.ix['e', 8] = 42
self.assertEqual(df.ix['e', 8], 42)
self.assertEqual(df.loc['e', 8], 42)
df.loc['e', 8] = 45
self.assertEqual(df.ix['e', 8], 45)
self.assertEqual(df.loc['e', 8], 45)
def test_setitem_list(self):
# GH 6043
# ix with a list
df = DataFrame(index=[0, 1], columns=[0])
df.ix[1, 0] = [1, 2, 3]
df.ix[1, 0] = [1, 2]
result = DataFrame(index=[0, 1], columns=[0])
result.ix[1, 0] = [1, 2]
tm.assert_frame_equal(result, df)
# ix with an object
class TO(object):
def __init__(self, value):
self.value = value
def __str__(self):
return "[{0}]".format(self.value)
__repr__ = __str__
def __eq__(self, other):
return self.value == other.value
def view(self):
return self
df = DataFrame(index=[0, 1], columns=[0])
df.ix[1, 0] = TO(1)
df.ix[1, 0] = TO(2)
result = DataFrame(index=[0, 1], columns=[0])
result.ix[1, 0] = TO(2)
tm.assert_frame_equal(result, df)
# remains object dtype even after setting it back
df = DataFrame(index=[0, 1], columns=[0])
df.ix[1, 0] = TO(1)
df.ix[1, 0] = np.nan
result = DataFrame(index=[0, 1], columns=[0])
tm.assert_frame_equal(result, df)
def test_iloc_mask(self):
# GH 3631, iloc with a mask (of a series) should raise
df = DataFrame(lrange(5), list('ABCDE'), columns=['a'])
mask = (df.a % 2 == 0)
self.assertRaises(ValueError, df.iloc.__getitem__, tuple([mask]))
mask.index = lrange(len(mask))
self.assertRaises(NotImplementedError, df.iloc.__getitem__,
tuple([mask]))
# ndarray ok
result = df.iloc[np.array([True] * len(mask), dtype=bool)]
tm.assert_frame_equal(result, df)
# the possibilities
locs = np.arange(4)
nums = 2 ** locs
reps = lmap(bin, nums)
df = DataFrame({'locs': locs, 'nums': nums}, reps)
expected = {
(None, ''): '0b1100',
(None, '.loc'): '0b1100',
(None, '.iloc'): '0b1100',
('index', ''): '0b11',
('index', '.loc'): '0b11',
('index', '.iloc'): ('iLocation based boolean indexing '
'cannot use an indexable as a mask'),
('locs', ''): 'Unalignable boolean Series provided as indexer '
'(index of the boolean Series and of the indexed '
'object do not match',
('locs', '.loc'): 'Unalignable boolean Series provided as indexer '
'(index of the boolean Series and of the '
'indexed object do not match',
('locs', '.iloc'): ('iLocation based boolean indexing on an '
'integer type is not available'),
}
# UserWarnings from reindex of a boolean mask
with warnings.catch_warnings(record=True):
result = dict()
for idx in [None, 'index', 'locs']:
mask = (df.nums > 2).values
if idx:
mask = Series(mask, list(reversed(getattr(df, idx))))
for method in ['', '.loc', '.iloc']:
try:
if method:
accessor = getattr(df, method[1:])
else:
accessor = df
ans = str(bin(accessor[mask]['nums'].sum()))
except Exception as e:
ans = str(e)
key = tuple([idx, method])
r = expected.get(key)
if r != ans:
raise AssertionError(
"[%s] does not match [%s], received [%s]"
% (key, ans, r))
def test_ix_slicing_strings(self):
# GH3836
data = {'Classification':
['SA EQUITY CFD', 'bbb', 'SA EQUITY', 'SA SSF', 'aaa'],
'Random': [1, 2, 3, 4, 5],
'X': ['correct', 'wrong', 'correct', 'correct', 'wrong']}
df = DataFrame(data)
x = df[~df.Classification.isin(['SA EQUITY CFD', 'SA EQUITY', 'SA SSF'
])]
df.ix[x.index, 'X'] = df['Classification']
expected = DataFrame({'Classification': {0: 'SA EQUITY CFD',
1: 'bbb',
2: 'SA EQUITY',
3: 'SA SSF',
4: 'aaa'},
'Random': {0: 1,
1: 2,
2: 3,
3: 4,
4: 5},
'X': {0: 'correct',
1: 'bbb',
2: 'correct',
3: 'correct',
4: 'aaa'}}) # bug was 4: 'bbb'
tm.assert_frame_equal(df, expected)
def test_non_unique_loc(self):
# GH3659
# non-unique indexer with loc slice
# https://groups.google.com/forum/?fromgroups#!topic/pydata/zTm2No0crYs
# these are going to raise becuase the we are non monotonic
df = DataFrame({'A': [1, 2, 3, 4, 5, 6],
'B': [3, 4, 5, 6, 7, 8]}, index=[0, 1, 0, 1, 2, 3])
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([slice(1, None)]))
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([slice(0, None)]))
self.assertRaises(KeyError, df.loc.__getitem__, tuple([slice(1, 2)]))
# monotonic are ok
df = DataFrame({'A': [1, 2, 3, 4, 5, 6],
'B': [3, 4, 5, 6, 7, 8]},
index=[0, 1, 0, 1, 2, 3]).sort_index(axis=0)
result = df.loc[1:]
expected = DataFrame({'A': [2, 4, 5, 6], 'B': [4, 6, 7, 8]},
index=[1, 1, 2, 3])
tm.assert_frame_equal(result, expected)
result = df.loc[0:]
tm.assert_frame_equal(result, df)
result = df.loc[1:2]
expected = DataFrame({'A': [2, 4, 5], 'B': [4, 6, 7]},
index=[1, 1, 2])
tm.assert_frame_equal(result, expected)
def test_loc_name(self):
# GH 3880
df = DataFrame([[1, 1], [1, 1]])
df.index.name = 'index_name'
result = df.iloc[[0, 1]].index.name
self.assertEqual(result, 'index_name')
result = df.ix[[0, 1]].index.name
self.assertEqual(result, 'index_name')
result = df.loc[[0, 1]].index.name
self.assertEqual(result, 'index_name')
def test_iloc_non_unique_indexing(self):
# GH 4017, non-unique indexing (on the axis)
df = DataFrame({'A': [0.1] * 3000, 'B': [1] * 3000})
idx = np.array(lrange(30)) * 99
expected = df.iloc[idx]
df3 = pd.concat([df, 2 * df, 3 * df])
result = df3.iloc[idx]
tm.assert_frame_equal(result, expected)
df2 = DataFrame({'A': [0.1] * 1000, 'B': [1] * 1000})
df2 = pd.concat([df2, 2 * df2, 3 * df2])
sidx = df2.index.to_series()
expected = df2.iloc[idx[idx <= sidx.max()]]
new_list = []
for r, s in expected.iterrows():
new_list.append(s)
new_list.append(s * 2)
new_list.append(s * 3)
expected = DataFrame(new_list)
expected = pd.concat([expected, DataFrame(index=idx[idx > sidx.max()])
])
result = df2.loc[idx]
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_string_slice(self):
# GH 14424
# string indexing against datetimelike with object
# dtype should properly raises KeyError
df = pd.DataFrame([1], pd.Index([pd.Timestamp('2011-01-01')],
dtype=object))
self.assertTrue(df.index.is_all_dates)
with tm.assertRaises(KeyError):
df['2011']
with tm.assertRaises(KeyError):
df.loc['2011', 0]
df = pd.DataFrame()
self.assertFalse(df.index.is_all_dates)
with tm.assertRaises(KeyError):
df['2011']
with tm.assertRaises(KeyError):
df.loc['2011', 0]
def test_mi_access(self):
# GH 4145
data = """h1 main h3 sub h5
0 a A 1 A1 1
1 b B 2 B1 2
2 c B 3 A1 3
3 d A 4 B2 4
4 e A 5 B2 5
5 f B 6 A2 6
"""
df = pd.read_csv(StringIO(data), sep=r'\s+', index_col=0)
df2 = df.set_index(['main', 'sub']).T.sort_index(1)
index = Index(['h1', 'h3', 'h5'])
columns = MultiIndex.from_tuples([('A', 'A1')], names=['main', 'sub'])
expected = DataFrame([['a', 1, 1]], index=columns, columns=index).T
result = df2.loc[:, ('A', 'A1')]
tm.assert_frame_equal(result, expected)
result = df2[('A', 'A1')]
tm.assert_frame_equal(result, expected)
# GH 4146, not returning a block manager when selecting a unique index
# from a duplicate index
# as of 4879, this returns a Series (which is similar to what happens
# with a non-unique)
expected = Series(['a', 1, 1], index=['h1', 'h3', 'h5'], name='A1')
result = df2['A']['A1']
tm.assert_series_equal(result, expected)
# selecting a non_unique from the 2nd level
expected = DataFrame([['d', 4, 4], ['e', 5, 5]],
index=Index(['B2', 'B2'], name='sub'),
columns=['h1', 'h3', 'h5'], ).T
result = df2['A']['B2']
tm.assert_frame_equal(result, expected)
def test_non_unique_loc_memory_error(self):
# GH 4280
# non_unique index with a large selection triggers a memory error
columns = list('ABCDEFG')
def gen_test(l, l2):
return pd.concat([DataFrame(randn(l, len(columns)),
index=lrange(l), columns=columns),
DataFrame(np.ones((l2, len(columns))),
index=[0] * l2, columns=columns)])
def gen_expected(df, mask):
l = len(mask)
return pd.concat([df.take([0], convert=False),
DataFrame(np.ones((l, len(columns))),
index=[0] * l,
columns=columns),
df.take(mask[1:], convert=False)])
df = gen_test(900, 100)
self.assertFalse(df.index.is_unique)
mask = np.arange(100)
result = df.loc[mask]
expected = gen_expected(df, mask)
tm.assert_frame_equal(result, expected)
df = gen_test(900000, 100000)
self.assertFalse(df.index.is_unique)
mask = np.arange(100000)
result = df.loc[mask]
expected = gen_expected(df, mask)
tm.assert_frame_equal(result, expected)
def test_astype_assignment(self):
# GH4312 (iloc)
df_orig = DataFrame([['1', '2', '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
df = df_orig.copy()
df.iloc[:, 0:2] = df.iloc[:, 0:2].astype(np.int64)
expected = DataFrame([[1, 2, '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.iloc[:, 0:2] = df.iloc[:, 0:2]._convert(datetime=True, numeric=True)
expected = DataFrame([[1, 2, '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
# GH5702 (loc)
df = df_orig.copy()
df.loc[:, 'A'] = df.loc[:, 'A'].astype(np.int64)
expected = DataFrame([[1, '2', '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc[:, ['B', 'C']] = df.loc[:, ['B', 'C']].astype(np.int64)
expected = DataFrame([['1', 2, 3, '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
# full replacements / no nans
df = DataFrame({'A': [1., 2., 3., 4.]})
df.iloc[:, 0] = df['A'].astype(np.int64)
expected = DataFrame({'A': [1, 2, 3, 4]})
tm.assert_frame_equal(df, expected)
df = DataFrame({'A': [1., 2., 3., 4.]})
df.loc[:, 'A'] = df['A'].astype(np.int64)
expected = DataFrame({'A': [1, 2, 3, 4]})
tm.assert_frame_equal(df, expected)
def test_astype_assignment_with_dups(self):
# GH 4686
# assignment with dups that has a dtype change
cols = pd.MultiIndex.from_tuples([('A', '1'), ('B', '1'), ('A', '2')])
df = DataFrame(np.arange(3).reshape((1, 3)),
columns=cols, dtype=object)
index = df.index.copy()
df['A'] = df['A'].astype(np.float64)
self.assert_index_equal(df.index, index)
# TODO(wesm): unused variables
# result = df.get_dtype_counts().sort_index()
# expected = Series({'float64': 2, 'object': 1}).sort_index()
def test_dups_loc(self):
# GH4726
# dup indexing with iloc/loc
df = DataFrame([[1, 2, 'foo', 'bar', Timestamp('20130101')]],
columns=['a', 'a', 'a', 'a', 'a'], index=[1])
expected = Series([1, 2, 'foo', 'bar', Timestamp('20130101')],
index=['a', 'a', 'a', 'a', 'a'], name=1)
result = df.iloc[0]
tm.assert_series_equal(result, expected)
result = df.loc[1]
tm.assert_series_equal(result, expected)
def test_partial_setting(self):
# GH2578, allow ix and friends to partially set
# series
s_orig = Series([1, 2, 3])
s = s_orig.copy()
s[5] = 5
expected = Series([1, 2, 3, 5], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s.loc[5] = 5
expected = Series([1, 2, 3, 5], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s[5] = 5.
expected = Series([1, 2, 3, 5.], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s.loc[5] = 5.
expected = Series([1, 2, 3, 5.], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
# iloc/iat raise
s = s_orig.copy()
def f():
s.iloc[3] = 5.
self.assertRaises(IndexError, f)
def f():
s.iat[3] = 5.
self.assertRaises(IndexError, f)
# ## frame ##
df_orig = DataFrame(
np.arange(6).reshape(3, 2), columns=['A', 'B'], dtype='int64')
# iloc/iat raise
df = df_orig.copy()
def f():
df.iloc[4, 2] = 5.
self.assertRaises(IndexError, f)
def f():
df.iat[4, 2] = 5.
self.assertRaises(IndexError, f)
# row setting where it exists
expected = DataFrame(dict({'A': [0, 4, 4], 'B': [1, 5, 5]}))
df = df_orig.copy()
df.iloc[1] = df.iloc[2]
tm.assert_frame_equal(df, expected)
expected = DataFrame(dict({'A': [0, 4, 4], 'B': [1, 5, 5]}))
df = df_orig.copy()
df.loc[1] = df.loc[2]
tm.assert_frame_equal(df, expected)
# like 2578, partial setting with dtype preservation
expected = DataFrame(dict({'A': [0, 2, 4, 4], 'B': [1, 3, 5, 5]}))
df = df_orig.copy()
df.loc[3] = df.loc[2]
tm.assert_frame_equal(df, expected)
# single dtype frame, overwrite
expected = DataFrame(dict({'A': [0, 2, 4], 'B': [0, 2, 4]}))
df = df_orig.copy()
df.ix[:, 'B'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# mixed dtype frame, overwrite
expected = DataFrame(dict({'A': [0, 2, 4], 'B': Series([0, 2, 4])}))
df = df_orig.copy()
df['B'] = df['B'].astype(np.float64)
df.ix[:, 'B'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# single dtype frame, partial setting
expected = df_orig.copy()
expected['C'] = df['A']
df = df_orig.copy()
df.ix[:, 'C'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# mixed frame, partial setting
expected = df_orig.copy()
expected['C'] = df['A']
df = df_orig.copy()
df.ix[:, 'C'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# ## panel ##
p_orig = Panel(np.arange(16).reshape(2, 4, 2),
items=['Item1', 'Item2'],
major_axis=pd.date_range('2001/1/12', periods=4),
minor_axis=['A', 'B'], dtype='float64')
# panel setting via item
p_orig = Panel(np.arange(16).reshape(2, 4, 2),
items=['Item1', 'Item2'],
major_axis=pd.date_range('2001/1/12', periods=4),
minor_axis=['A', 'B'], dtype='float64')
expected = p_orig.copy()
expected['Item3'] = expected['Item1']
p = p_orig.copy()
p.loc['Item3'] = p['Item1']
tm.assert_panel_equal(p, expected)
# panel with aligned series
expected = p_orig.copy()
expected = expected.transpose(2, 1, 0)
expected['C'] = DataFrame({'Item1': [30, 30, 30, 30],
'Item2': [32, 32, 32, 32]},
index=p_orig.major_axis)
expected = expected.transpose(2, 1, 0)
p = p_orig.copy()
p.loc[:, :, 'C'] = Series([30, 32], index=p_orig.items)
tm.assert_panel_equal(p, expected)
# GH 8473
dates = date_range('1/1/2000', periods=8)
df_orig = DataFrame(np.random.randn(8, 4), index=dates,
columns=['A', 'B', 'C', 'D'])
expected = pd.concat([df_orig, DataFrame(
{'A': 7}, index=[dates[-1] + 1])])
df = df_orig.copy()
df.loc[dates[-1] + 1, 'A'] = 7
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.at[dates[-1] + 1, 'A'] = 7
tm.assert_frame_equal(df, expected)
exp_other = DataFrame({0: 7}, index=[dates[-1] + 1])
expected = pd.concat([df_orig, exp_other], axis=1)
df = df_orig.copy()
df.loc[dates[-1] + 1, 0] = 7
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.at[dates[-1] + 1, 0] = 7
tm.assert_frame_equal(df, expected)
def test_partial_setting_mixed_dtype(self):
# in a mixed dtype environment, try to preserve dtypes
# by appending
df = DataFrame([[True, 1], [False, 2]], columns=["female", "fitness"])
s = df.loc[1].copy()
s.name = 2
expected = df.append(s)
df.loc[2] = df.loc[1]
tm.assert_frame_equal(df, expected)
# columns will align
df = DataFrame(columns=['A', 'B'])
df.loc[0] = Series(1, index=range(4))
tm.assert_frame_equal(df, DataFrame(columns=['A', 'B'], index=[0]))
# columns will align
df = DataFrame(columns=['A', 'B'])
df.loc[0] = Series(1, index=['B'])
exp = DataFrame([[np.nan, 1]], columns=['A', 'B'],
index=[0], dtype='float64')
tm.assert_frame_equal(df, exp)
# list-like must conform
df = DataFrame(columns=['A', 'B'])
def f():
df.loc[0] = [1, 2, 3]
self.assertRaises(ValueError, f)
# these are coerced to float unavoidably (as its a list-like to begin)
df = DataFrame(columns=['A', 'B'])
df.loc[3] = [6, 7]
exp = DataFrame([[6, 7]], index=[3], columns=['A', 'B'],
dtype='float64')
tm.assert_frame_equal(df, exp)
def test_series_partial_set(self):
# partial set with new index
# Regression from GH4825
ser = Series([0.1, 0.2], index=[1, 2])
# loc
expected = Series([np.nan, 0.2, np.nan], index=[3, 2, 3])
result = ser.loc[[3, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([np.nan, 0.2, np.nan, np.nan], index=[3, 2, 3, 'x'])
result = ser.loc[[3, 2, 3, 'x']]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.2, 0.2, 0.1], index=[2, 2, 1])
result = ser.loc[[2, 2, 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.2, 0.2, np.nan, 0.1], index=[2, 2, 'x', 1])
result = ser.loc[[2, 2, 'x', 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
# raises as nothing in in the index
self.assertRaises(KeyError, lambda: ser.loc[[3, 3, 3]])
expected = Series([0.2, 0.2, np.nan], index=[2, 2, 3])
result = ser.loc[[2, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.3, np.nan, np.nan], index=[3, 4, 4])
result = Series([0.1, 0.2, 0.3], index=[1, 2, 3]).loc[[3, 4, 4]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([np.nan, 0.3, 0.3], index=[5, 3, 3])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[1, 2, 3, 4]).loc[[5, 3, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([np.nan, 0.4, 0.4], index=[5, 4, 4])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[1, 2, 3, 4]).loc[[5, 4, 4]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.4, np.nan, np.nan], index=[7, 2, 2])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[4, 5, 6, 7]).loc[[7, 2, 2]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.4, np.nan, np.nan], index=[4, 5, 5])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[1, 2, 3, 4]).loc[[4, 5, 5]]
tm.assert_series_equal(result, expected, check_index_type=True)
# iloc
expected = Series([0.2, 0.2, 0.1, 0.1], index=[2, 2, 1, 1])
result = ser.iloc[[1, 1, 0, 0]]
tm.assert_series_equal(result, expected, check_index_type=True)
def test_series_partial_set_with_name(self):
# GH 11497
idx = Index([1, 2], dtype='int64', name='idx')
ser = Series([0.1, 0.2], index=idx, name='s')
# loc
exp_idx = Index([3, 2, 3], dtype='int64', name='idx')
expected = Series([np.nan, 0.2, np.nan], index=exp_idx, name='s')
result = ser.loc[[3, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([3, 2, 3, 'x'], dtype='object', name='idx')
expected = Series([np.nan, 0.2, np.nan, np.nan], index=exp_idx,
name='s')
result = ser.loc[[3, 2, 3, 'x']]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([2, 2, 1], dtype='int64', name='idx')
expected = Series([0.2, 0.2, 0.1], index=exp_idx, name='s')
result = ser.loc[[2, 2, 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([2, 2, 'x', 1], dtype='object', name='idx')
expected = Series([0.2, 0.2, np.nan, 0.1], index=exp_idx, name='s')
result = ser.loc[[2, 2, 'x', 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
# raises as nothing in in the index
self.assertRaises(KeyError, lambda: ser.loc[[3, 3, 3]])
exp_idx = Index([2, 2, 3], dtype='int64', name='idx')
expected = Series([0.2, 0.2, np.nan], index=exp_idx, name='s')
result = ser.loc[[2, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([3, 4, 4], dtype='int64', name='idx')
expected = Series([0.3, np.nan, np.nan], index=exp_idx, name='s')
idx = Index([1, 2, 3], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3], index=idx, name='s').loc[[3, 4, 4]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([5, 3, 3], dtype='int64', name='idx')
expected = Series([np.nan, 0.3, 0.3], index=exp_idx, name='s')
idx = Index([1, 2, 3, 4], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
name='s').loc[[5, 3, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([5, 4, 4], dtype='int64', name='idx')
expected = Series([np.nan, 0.4, 0.4], index=exp_idx, name='s')
idx = Index([1, 2, 3, 4], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
name='s').loc[[5, 4, 4]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([7, 2, 2], dtype='int64', name='idx')
expected = Series([0.4, np.nan, np.nan], index=exp_idx, name='s')
idx = Index([4, 5, 6, 7], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
name='s').loc[[7, 2, 2]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([4, 5, 5], dtype='int64', name='idx')
expected = Series([0.4, np.nan, np.nan], index=exp_idx, name='s')
idx = Index([1, 2, 3, 4], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
name='s').loc[[4, 5, 5]]
tm.assert_series_equal(result, expected, check_index_type=True)
# iloc
exp_idx = Index([2, 2, 1, 1], dtype='int64', name='idx')
expected = Series([0.2, 0.2, 0.1, 0.1], index=exp_idx, name='s')
result = ser.iloc[[1, 1, 0, 0]]
tm.assert_series_equal(result, expected, check_index_type=True)
def test_partial_set_invalid(self):
# GH 4940
# allow only setting of 'valid' values
orig = tm.makeTimeDataFrame()
df = orig.copy()
# don't allow not string inserts
def f():
df.loc[100.0, :] = df.ix[0]
self.assertRaises(TypeError, f)
def f():
df.loc[100, :] = df.ix[0]
self.assertRaises(TypeError, f)
def f():
df.ix[100.0, :] = df.ix[0]
self.assertRaises(TypeError, f)
def f():
df.ix[100, :] = df.ix[0]
self.assertRaises(ValueError, f)
# allow object conversion here
df = orig.copy()
df.loc['a', :] = df.ix[0]
exp = orig.append(pd.Series(df.ix[0], name='a'))
tm.assert_frame_equal(df, exp)
tm.assert_index_equal(df.index,
pd.Index(orig.index.tolist() + ['a']))
self.assertEqual(df.index.dtype, 'object')
def test_partial_set_empty_series(self):
# GH5226
# partially set with an empty object series
s = Series()
s.loc[1] = 1
tm.assert_series_equal(s, Series([1], index=[1]))
s.loc[3] = 3
tm.assert_series_equal(s, Series([1, 3], index=[1, 3]))
s = Series()
s.loc[1] = 1.
tm.assert_series_equal(s, Series([1.], index=[1]))
s.loc[3] = 3.
tm.assert_series_equal(s, Series([1., 3.], index=[1, 3]))
s = Series()
s.loc['foo'] = 1
tm.assert_series_equal(s, Series([1], index=['foo']))
s.loc['bar'] = 3
tm.assert_series_equal(s, Series([1, 3], index=['foo', 'bar']))
s.loc[3] = 4
tm.assert_series_equal(s, Series([1, 3, 4], index=['foo', 'bar', 3]))
def test_partial_set_empty_frame(self):
# partially set with an empty object
# frame
df = DataFrame()
def f():
df.loc[1] = 1
self.assertRaises(ValueError, f)
def f():
df.loc[1] = Series([1], index=['foo'])
self.assertRaises(ValueError, f)
def f():
df.loc[:, 1] = 1
self.assertRaises(ValueError, f)
# these work as they don't really change
# anything but the index
# GH5632
expected = DataFrame(columns=['foo'], index=pd.Index(
[], dtype='int64'))
def f():
df = DataFrame()
df['foo'] = Series([], dtype='object')
return df
tm.assert_frame_equal(f(), expected)
def f():
df = DataFrame()
df['foo'] = Series(df.index)
return df
tm.assert_frame_equal(f(), expected)
def f():
df = DataFrame()
df['foo'] = df.index
return df
tm.assert_frame_equal(f(), expected)
expected = DataFrame(columns=['foo'],
index=pd.Index([], dtype='int64'))
expected['foo'] = expected['foo'].astype('float64')
def f():
df = DataFrame()
df['foo'] = []
return df
tm.assert_frame_equal(f(), expected)
def f():
df = DataFrame()
df['foo'] = Series(range(len(df)))
return df
tm.assert_frame_equal(f(), expected)
def f():
df = DataFrame()
tm.assert_index_equal(df.index, pd.Index([], dtype='object'))
df['foo'] = range(len(df))
return df
expected = DataFrame(columns=['foo'],
index=pd.Index([], dtype='int64'))
expected['foo'] = expected['foo'].astype('float64')
tm.assert_frame_equal(f(), expected)
df = DataFrame()
tm.assert_index_equal(df.columns, pd.Index([], dtype=object))
df2 = DataFrame()
df2[1] = Series([1], index=['foo'])
df.loc[:, 1] = Series([1], index=['foo'])
tm.assert_frame_equal(df, DataFrame([[1]], index=['foo'], columns=[1]))
tm.assert_frame_equal(df, df2)
# no index to start
expected = DataFrame({0: Series(1, index=range(4))},
columns=['A', 'B', 0])
df = DataFrame(columns=['A', 'B'])
df[0] = Series(1, index=range(4))
df.dtypes
str(df)
tm.assert_frame_equal(df, expected)
df = DataFrame(columns=['A', 'B'])
df.loc[:, 0] = Series(1, index=range(4))
df.dtypes
str(df)
tm.assert_frame_equal(df, expected)
def test_partial_set_empty_frame_row(self):
# GH5720, GH5744
# don't create rows when empty
expected = DataFrame(columns=['A', 'B', 'New'],
index=pd.Index([], dtype='int64'))
expected['A'] = expected['A'].astype('int64')
expected['B'] = expected['B'].astype('float64')
expected['New'] = expected['New'].astype('float64')
df = DataFrame({"A": [1, 2, 3], "B": [1.2, 4.2, 5.2]})
y = df[df.A > 5]
y['New'] = np.nan
tm.assert_frame_equal(y, expected)
# tm.assert_frame_equal(y,expected)
expected = DataFrame(columns=['a', 'b', 'c c', 'd'])
expected['d'] = expected['d'].astype('int64')
df = DataFrame(columns=['a', 'b', 'c c'])
df['d'] = 3
tm.assert_frame_equal(df, expected)
tm.assert_series_equal(df['c c'], Series(name='c c', dtype=object))
# reindex columns is ok
df = DataFrame({"A": [1, 2, 3], "B": [1.2, 4.2, 5.2]})
y = df[df.A > 5]
result = y.reindex(columns=['A', 'B', 'C'])
expected = DataFrame(columns=['A', 'B', 'C'],
index=pd.Index([], dtype='int64'))
expected['A'] = expected['A'].astype('int64')
expected['B'] = expected['B'].astype('float64')
expected['C'] = expected['C'].astype('float64')
tm.assert_frame_equal(result, expected)
def test_partial_set_empty_frame_set_series(self):
# GH 5756
# setting with empty Series
df = DataFrame(Series())
tm.assert_frame_equal(df, DataFrame({0: Series()}))
df = DataFrame(Series(name='foo'))
tm.assert_frame_equal(df, DataFrame({'foo': Series()}))
def test_partial_set_empty_frame_empty_copy_assignment(self):
# GH 5932
# copy on empty with assignment fails
df = DataFrame(index=[0])
df = df.copy()
df['a'] = 0
expected = DataFrame(0, index=[0], columns=['a'])
tm.assert_frame_equal(df, expected)
def test_partial_set_empty_frame_empty_consistencies(self):
# GH 6171
# consistency on empty frames
df = DataFrame(columns=['x', 'y'])
df['x'] = [1, 2]
expected = DataFrame(dict(x=[1, 2], y=[np.nan, np.nan]))
tm.assert_frame_equal(df, expected, check_dtype=False)
df = DataFrame(columns=['x', 'y'])
df['x'] = ['1', '2']
expected = DataFrame(
dict(x=['1', '2'], y=[np.nan, np.nan]), dtype=object)
tm.assert_frame_equal(df, expected)
df = DataFrame(columns=['x', 'y'])
df.loc[0, 'x'] = 1
expected = DataFrame(dict(x=[1], y=[np.nan]))
tm.assert_frame_equal(df, expected, check_dtype=False)
def test_cache_updating(self):
# GH 4939, make sure to update the cache on setitem
df = tm.makeDataFrame()
df['A'] # cache series
df.ix["Hello Friend"] = df.ix[0]
self.assertIn("Hello Friend", df['A'].index)
self.assertIn("Hello Friend", df['B'].index)
panel = tm.makePanel()
panel.ix[0] # get first item into cache
panel.ix[:, :, 'A+1'] = panel.ix[:, :, 'A'] + 1
self.assertIn("A+1", panel.ix[0].columns)
self.assertIn("A+1", panel.ix[1].columns)
# 5216
# make sure that we don't try to set a dead cache
a = np.random.rand(10, 3)
df = DataFrame(a, columns=['x', 'y', 'z'])
tuples = [(i, j) for i in range(5) for j in range(2)]
index = MultiIndex.from_tuples(tuples)
df.index = index
# setting via chained assignment
# but actually works, since everything is a view
df.loc[0]['z'].iloc[0] = 1.
result = df.loc[(0, 0), 'z']
self.assertEqual(result, 1)
# correct setting
df.loc[(0, 0), 'z'] = 2
result = df.loc[(0, 0), 'z']
self.assertEqual(result, 2)
# 10264
df = DataFrame(np.zeros((5, 5), dtype='int64'), columns=[
'a', 'b', 'c', 'd', 'e'], index=range(5))
df['f'] = 0
df.f.values[3] = 1
# TODO(wesm): unused?
# y = df.iloc[np.arange(2, len(df))]
df.f.values[3] = 2
expected = DataFrame(np.zeros((5, 6), dtype='int64'), columns=[
'a', 'b', 'c', 'd', 'e', 'f'], index=range(5))
expected.at[3, 'f'] = 2
tm.assert_frame_equal(df, expected)
expected = Series([0, 0, 0, 2, 0], name='f')
tm.assert_series_equal(df.f, expected)
def test_set_ix_out_of_bounds_axis_0(self):
df = pd.DataFrame(
randn(2, 5), index=["row%s" % i for i in range(2)],
columns=["col%s" % i for i in range(5)])
self.assertRaises(ValueError, df.ix.__setitem__, (2, 0), 100)
def test_set_ix_out_of_bounds_axis_1(self):
df = pd.DataFrame(
randn(5, 2), index=["row%s" % i for i in range(5)],
columns=["col%s" % i for i in range(2)])
self.assertRaises(ValueError, df.ix.__setitem__, (0, 2), 100)
def test_iloc_empty_list_indexer_is_ok(self):
from pandas.util.testing import makeCustomDataframe as mkdf
df = mkdf(5, 2)
# vertical empty
tm.assert_frame_equal(df.iloc[:, []], df.iloc[:, :0],
check_index_type=True, check_column_type=True)
# horizontal empty
tm.assert_frame_equal(df.iloc[[], :], df.iloc[:0, :],
check_index_type=True, check_column_type=True)
# horizontal empty
tm.assert_frame_equal(df.iloc[[]], df.iloc[:0, :],
check_index_type=True,
check_column_type=True)
def test_loc_empty_list_indexer_is_ok(self):
from pandas.util.testing import makeCustomDataframe as mkdf
df = mkdf(5, 2)
# vertical empty
tm.assert_frame_equal(df.loc[:, []], df.iloc[:, :0],
check_index_type=True, check_column_type=True)
# horizontal empty
tm.assert_frame_equal(df.loc[[], :], df.iloc[:0, :],
check_index_type=True, check_column_type=True)
# horizontal empty
tm.assert_frame_equal(df.loc[[]], df.iloc[:0, :],
check_index_type=True,
check_column_type=True)
def test_ix_empty_list_indexer_is_ok(self):
from pandas.util.testing import makeCustomDataframe as mkdf
df = mkdf(5, 2)
# vertical empty
tm.assert_frame_equal(df.ix[:, []], df.iloc[:, :0],
check_index_type=True,
check_column_type=True)
# horizontal empty
tm.assert_frame_equal(df.ix[[], :], df.iloc[:0, :],
check_index_type=True,
check_column_type=True)
# horizontal empty
tm.assert_frame_equal(df.ix[[]], df.iloc[:0, :],
check_index_type=True,
check_column_type=True)
def test_index_type_coercion(self):
# GH 11836
# if we have an index type and set it with something that looks
# to numpy like the same, but is actually, not
# (e.g. setting with a float or string '0')
# then we need to coerce to object
# integer indexes
for s in [Series(range(5)),
Series(range(5), index=range(1, 6))]:
self.assertTrue(s.index.is_integer())
for indexer in [lambda x: x.ix,
lambda x: x.loc,
lambda x: x]:
s2 = s.copy()
indexer(s2)[0.1] = 0
self.assertTrue(s2.index.is_floating())
self.assertTrue(indexer(s2)[0.1] == 0)
s2 = s.copy()
indexer(s2)[0.0] = 0
exp = s.index
if 0 not in s:
exp = Index(s.index.tolist() + [0])
tm.assert_index_equal(s2.index, exp)
s2 = s.copy()
indexer(s2)['0'] = 0
self.assertTrue(s2.index.is_object())
for s in [Series(range(5), index=np.arange(5.))]:
self.assertTrue(s.index.is_floating())
for idxr in [lambda x: x.ix,
lambda x: x.loc,
lambda x: x]:
s2 = s.copy()
idxr(s2)[0.1] = 0
self.assertTrue(s2.index.is_floating())
self.assertTrue(idxr(s2)[0.1] == 0)
s2 = s.copy()
idxr(s2)[0.0] = 0
tm.assert_index_equal(s2.index, s.index)
s2 = s.copy()
idxr(s2)['0'] = 0
self.assertTrue(s2.index.is_object())
def test_float_index_to_mixed(self):
df = DataFrame({0.0: np.random.rand(10), 1.0: np.random.rand(10)})
df['a'] = 10
tm.assert_frame_equal(DataFrame({0.0: df[0.0],
1.0: df[1.0],
'a': [10] * 10}),
df)
def test_duplicate_ix_returns_series(self):
df = DataFrame(np.random.randn(3, 3), index=[0.1, 0.2, 0.2],
columns=list('abc'))
r = df.ix[0.2, 'a']
e = df.loc[0.2, 'a']
tm.assert_series_equal(r, e)
def test_float_index_non_scalar_assignment(self):
df = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]}, index=[1., 2., 3.])
df.loc[df.index[:2]] = 1
expected = DataFrame({'a': [1, 1, 3], 'b': [1, 1, 5]}, index=df.index)
tm.assert_frame_equal(expected, df)
df = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]}, index=[1., 2., 3.])
df2 = df.copy()
df.loc[df.index] = df.loc[df.index]
tm.assert_frame_equal(df, df2)
def test_float_index_at_iat(self):
s = pd.Series([1, 2, 3], index=[0.1, 0.2, 0.3])
for el, item in s.iteritems():
self.assertEqual(s.at[el], item)
for i in range(len(s)):
self.assertEqual(s.iat[i], i + 1)
def test_rhs_alignment(self):
# GH8258, tests that both rows & columns are aligned to what is
# assigned to. covers both uniform data-type & multi-type cases
def run_tests(df, rhs, right):
# label, index, slice
r, i, s = list('bcd'), [1, 2, 3], slice(1, 4)
c, j, l = ['joe', 'jolie'], [1, 2], slice(1, 3)
left = df.copy()
left.loc[r, c] = rhs
tm.assert_frame_equal(left, right)
left = df.copy()
left.iloc[i, j] = rhs
tm.assert_frame_equal(left, right)
left = df.copy()
left.ix[s, l] = rhs
tm.assert_frame_equal(left, right)
left = df.copy()
left.ix[i, j] = rhs
tm.assert_frame_equal(left, right)
left = df.copy()
left.ix[r, c] = rhs
tm.assert_frame_equal(left, right)
xs = np.arange(20).reshape(5, 4)
cols = ['jim', 'joe', 'jolie', 'joline']
df = pd.DataFrame(xs, columns=cols, index=list('abcde'))
# right hand side; permute the indices and multiplpy by -2
rhs = -2 * df.iloc[3:0:-1, 2:0:-1]
# expected `right` result; just multiply by -2
right = df.copy()
right.iloc[1:4, 1:3] *= -2
# run tests with uniform dtypes
run_tests(df, rhs, right)
# make frames multi-type & re-run tests
for frame in [df, rhs, right]:
frame['joe'] = frame['joe'].astype('float64')
frame['jolie'] = frame['jolie'].map('@{0}'.format)
run_tests(df, rhs, right)
def test_str_label_slicing_with_negative_step(self):
SLC = pd.IndexSlice
def assert_slices_equivalent(l_slc, i_slc):
tm.assert_series_equal(s.loc[l_slc], s.iloc[i_slc])
if not idx.is_integer:
# For integer indices, ix and plain getitem are position-based.
tm.assert_series_equal(s[l_slc], s.iloc[i_slc])
tm.assert_series_equal(s.ix[l_slc], s.iloc[i_slc])
for idx in [_mklbl('A', 20), np.arange(20) + 100,
np.linspace(100, 150, 20)]:
idx = Index(idx)
s = Series(np.arange(20), index=idx)
assert_slices_equivalent(SLC[idx[9]::-1], SLC[9::-1])
assert_slices_equivalent(SLC[:idx[9]:-1], SLC[:8:-1])
assert_slices_equivalent(SLC[idx[13]:idx[9]:-1], SLC[13:8:-1])
assert_slices_equivalent(SLC[idx[9]:idx[13]:-1], SLC[:0])
def test_slice_with_zero_step_raises(self):
s = Series(np.arange(20), index=_mklbl('A', 20))
self.assertRaisesRegexp(ValueError, 'slice step cannot be zero',
lambda: s[::0])
self.assertRaisesRegexp(ValueError, 'slice step cannot be zero',
lambda: s.loc[::0])
self.assertRaisesRegexp(ValueError, 'slice step cannot be zero',
lambda: s.ix[::0])
def test_indexing_assignment_dict_already_exists(self):
df = pd.DataFrame({'x': [1, 2, 6],
'y': [2, 2, 8],
'z': [-5, 0, 5]}).set_index('z')
expected = df.copy()
rhs = dict(x=9, y=99)
df.loc[5] = rhs
expected.loc[5] = [9, 99]
tm.assert_frame_equal(df, expected)
def test_indexing_dtypes_on_empty(self):
# Check that .iloc and .ix return correct dtypes GH9983
df = DataFrame({'a': [1, 2, 3], 'b': ['b', 'b2', 'b3']})
df2 = df.ix[[], :]
self.assertEqual(df2.loc[:, 'a'].dtype, np.int64)
tm.assert_series_equal(df2.loc[:, 'a'], df2.iloc[:, 0])
tm.assert_series_equal(df2.loc[:, 'a'], df2.ix[:, 0])
def test_range_in_series_indexing(self):
# range can cause an indexing error
# GH 11652
for x in [5, 999999, 1000000]:
s = pd.Series(index=range(x))
s.loc[range(1)] = 42
tm.assert_series_equal(s.loc[range(1)], Series(42.0, index=[0]))
s.loc[range(2)] = 43
tm.assert_series_equal(s.loc[range(2)], Series(43.0, index=[0, 1]))
def test_non_reducing_slice(self):
df = pd.DataFrame([[0, 1], [2, 3]])
slices = [
# pd.IndexSlice[:, :],
pd.IndexSlice[:, 1],
pd.IndexSlice[1, :],
pd.IndexSlice[[1], [1]],
pd.IndexSlice[1, [1]],
pd.IndexSlice[[1], 1],
pd.IndexSlice[1],
pd.IndexSlice[1, 1],
slice(None, None, None),
[0, 1],
np.array([0, 1]),
pd.Series([0, 1])
]
for slice_ in slices:
tslice_ = _non_reducing_slice(slice_)
self.assertTrue(isinstance(df.loc[tslice_], DataFrame))
def test_list_slice(self):
# like dataframe getitem
slices = [['A'], pd.Series(['A']), np.array(['A'])]
df = | pd.DataFrame({'A': [1, 2], 'B': [3, 4]}, index=['A', 'B']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# Copyright (c) 2021, libracore AG and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
import pandas as pd
from frappe.utils.data import add_days, getdate, get_datetime, now_datetime
# Header mapping (ERPNext <> MVD)
hm = {
'mitglied_nr': 'mitglied_nr',
'mitglied_id': 'mitglied_id',
'status_c': 'status_c',
'sektion_id': 'sektion_id',
'zuzug_sektion': 'sektion_zq_id',
'mitgliedtyp_c': 'mitgliedtyp_c',
'mitglied_c': 'mitglied_c',
'wichtig': 'wichtig',
'eintritt': 'datum_eintritt',
'austritt': 'datum_austritt',
'wegzug': 'datum_wegzug',
'zuzug': 'datum_zuzug',
'kuendigung': 'datum_kuend_per',
'adresstyp_c': 'adresstyp_c',
'adress_id': 'adress_id',
'firma': 'firma',
'zusatz_firma': 'zusatz_firma',
'anrede_c': 'anrede_c',
'nachname_1': 'nachname_1',
'vorname_1': 'vorname_1',
'tel_p_1': 'tel_p_1',
'tel_m_1': 'tel_m_1',
'tel_g_1': 'tel_g_1',
'e_mail_1': 'e_mail_1',
'zusatz_adresse': 'zusatz_adresse',
'strasse': 'strasse',
'nummer': 'nummer',
'nummer_zu': 'nummer_zu',
'postfach': 'postfach',
'postfach_nummer': 'postfach_nummer',
'plz': 'plz',
'ort': 'ort',
'nachname_2': 'nachname_2',
'vorname_2': 'vorname_2',
'tel_p_2': 'tel_p_2',
'tel_m_2': 'tel_m_2',
'tel_g_2': 'tel_g_2',
'e_mail_2': 'e_mail_2',
'datum': 'datum',
'jahr': 'jahr',
'offen': 'offen',
'ref_nr_five_1': 'ref_nr_five_1',
'kz_1': 'kz_1',
'tkategorie_d': 'tkategorie_d',
'pers_name': 'pers_name',
'datum_von': 'datum_von',
'datum_bis': 'datum_bis',
'datum_erinnerung': 'datum_erinnerung',
'notiz_termin': 'notiz_termin',
'erledigt': 'erledigt',
'nkategorie_d': 'nkategorie_d',
'notiz': 'notiz',
'weitere_kontaktinfos': 'weitere_kontaktinfos',
'mkategorie_d': 'mkategorie_d',
'benutzer_name': 'benutzer_name',
'jahr_bez_mitgl': 'jahr_bez_mitgl',
'objekt_hausnummer': 'objekt_hausnummer',
'nummer_zu': 'nummer_zu',
'objekt_nummer_zu': 'objekt_nummer_zu',
'rg_nummer_zu': 'rg_nummer_zu',
'buchungen': 'buchungen',
'online_haftpflicht': 'online_haftpflicht',
'online_gutschrift': 'online_gutschrift',
'online_betrag': 'online_betrag',
'datum_online_verbucht': 'datum_online_verbucht',
'datum_online_gutschrift': 'datum_online_gutschrift',
'online_payment_method': 'online_payment_method',
'online_payment_id': 'online_payment_id'
}
def read_csv(site_name, file_name, limit=False):
# display all coloumns for error handling
pd.set_option('display.max_rows', None, 'display.max_columns', None)
# read csv
df = pd.read_csv('/home/frappe/frappe-bench/sites/{site_name}/private/files/{file_name}'.format(site_name=site_name, file_name=file_name))
# loop through rows
count = 1
max_loop = limit
if not limit:
index = df.index
max_loop = len(index)
for index, row in df.iterrows():
if count <= max_loop:
if not migliedschaft_existiert(str(get_value(row, 'mitglied_id'))):
if get_value(row, 'adresstyp_c') == 'MITGL':
create_mitgliedschaft(row)
else:
frappe.log_error("{0}".format(row), 'Adresse != MITGL, aber ID noch nicht erfasst')
else:
update_mitgliedschaft(row)
print("{count} of {max_loop} --> {percent}".format(count=count, max_loop=max_loop, percent=((100 / max_loop) * count)))
count += 1
else:
break
def create_mitgliedschaft(data):
try:
if get_value(data, 'vorname_2') or get_value(data, 'nachname_2'):
hat_solidarmitglied = 1
else:
hat_solidarmitglied = 0
strasse = get_value(data, 'strasse')
postfach = check_postfach(data, 'postfach')
if postfach == 1:
strasse = 'Postfach'
else:
if get_value(data, 'postfach_nummer') and not strasse:
strasse = 'Postfach'
postfach = 1
kundentyp = 'Einzelperson'
if get_value(data, 'mitgliedtyp_c') == 'GESCH':
kundentyp = 'Unternehmen'
zuzug = get_formatted_datum(get_value(data, 'zuzug'))
if zuzug:
zuzug_von = get_sektion(get_value(data, 'zuzug_sektion'))
else:
zuzug_von = ''
new_mitgliedschaft = frappe.get_doc({
'doctype': 'MV Mitgliedschaft',
'mitglied_nr': str(get_value(data, 'mitglied_nr')).zfill(8),
'mitglied_id': str(get_value(data, 'mitglied_id')),
'status_c': get_status_c(get_value(data, 'status_c')),
'sektion_id': get_sektion(get_value(data, 'sektion_id')),
'mitgliedtyp_c': get_mitgliedtyp_c(get_value(data, 'mitgliedtyp_c')),
'mitglied_c': get_mitglied_c(get_value(data, 'mitglied_c')),
#'wichtig': get_value(data, 'wichtig'),
'eintritt': get_formatted_datum(get_value(data, 'eintritt')),
'austritt': get_formatted_datum(get_value(data, 'austritt')),
'wegzug': get_formatted_datum(get_value(data, 'wegzug')),
#'wegzug_zu': '', --> woher kommt diese Info?
'zuzug': zuzug,
'zuzug_von': zuzug_von,
'kuendigung': get_formatted_datum(get_value(data, 'kuendigung')),
'kundentyp': kundentyp,
'firma': get_value(data, 'firma'),
'zusatz_firma': get_value(data, 'zusatz_firma'),
'anrede_c': get_anrede_c(get_value(data, 'anrede_c')),
'nachname_1': get_value(data, 'nachname_1'),
'vorname_1': get_value(data, 'vorname_1'),
'tel_p_1': str(get_value(data, 'tel_p_1')),
'tel_m_1': str(get_value(data, 'tel_m_1')),
'tel_g_1': str(get_value(data, 'tel_g_1')),
'e_mail_1': get_value(data, 'e_mail_1'),
'zusatz_adresse': get_value(data, 'zusatz_adresse'),
'strasse': strasse,
'objekt_strasse': strasse, # fallback
'objekt_ort': get_value(data, 'ort'), # fallback
'nummer': get_value(data, 'nummer'),
'nummer_zu': get_value(data, 'nummer_zu'),
'postfach': postfach,
'postfach_nummer': get_value(data, 'postfach_nummer'),
'plz': get_value(data, 'plz'),
'ort': get_value(data, 'ort'),
'hat_solidarmitglied': hat_solidarmitglied,
'nachname_2': get_value(data, 'nachname_2'),
'vorname_2': get_value(data, 'vorname_2'),
'tel_p_2': str(get_value(data, 'tel_p_2')),
#'tel_m_2': str(get_value(data, 'tel_m_2')),
'tel_g_2': str(get_value(data, 'tel_g_2')),
'e_mail_2': str(get_value(data, 'e_mail_2'))
})
new_mitgliedschaft.insert()
frappe.db.commit()
return
except Exception as err:
frappe.log_error("{0}\n---\n{1}".format(err, data), 'create_mitgliedschaft')
return
def update_mitgliedschaft(data):
try:
mitgliedschaft = frappe.get_doc("MV Mitgliedschaft", str(get_value(data, 'mitglied_id')))
if get_value(data, 'adresstyp_c') == 'MITGL':
# Mitglied (inkl. Soli)
if get_value(data, 'vorname_2') or get_value(data, 'nachname_2'):
hat_solidarmitglied = 1
else:
hat_solidarmitglied = 0
strasse = get_value(data, 'strasse')
postfach = check_postfach(data, 'postfach')
if postfach == 1:
strasse = 'Postfach'
else:
if get_value(data, 'postfach_nummer') and not strasse:
strasse = 'Postfach'
postfach = 1
kundentyp = 'Einzelperson'
if get_value(data, 'mitglied_c') == 'GESCH':
kundentyp = 'Unternehmen'
zuzug = get_formatted_datum(get_value(data, 'zuzug'))
if zuzug:
zuzug_von = get_sektion(get_value(data, 'zuzug_sektion'))
else:
zuzug_von = ''
mitgliedschaft.mitglied_nr = str(get_value(data, 'mitglied_nr')).zfill(8)
mitgliedschaft.status_c = get_status_c(get_value(data, 'status_c'))
mitgliedschaft.sektion_id = get_sektion(get_value(data, 'sektion_id'))
mitgliedschaft.mitgliedtyp_c = get_mitgliedtyp_c(get_value(data, 'mitgliedtyp_c'))
mitgliedschaft.mitglied_c = get_mitglied_c(get_value(data, 'mitglied_c'))
#mitgliedschaft.wichtig = get_value(data, 'wichtig')
mitgliedschaft.eintritt = get_formatted_datum(get_value(data, 'eintritt'))
mitgliedschaft.austritt = get_formatted_datum(get_value(data, 'austritt'))
mitgliedschaft.wegzug = get_formatted_datum(get_value(data, 'wegzug'))
mitgliedschaft.zuzug = zuzug
#mitgliedschaft.wegzug_zu = '' --> woher kommt diese Info?
mitgliedschaft.zuzug_von = zuzug_von
mitgliedschaft.kuendigung = get_formatted_datum(get_value(data, 'kuendigung'))
mitgliedschaft.kundentyp = kundentyp
mitgliedschaft.firma = get_value(data, 'firma')
mitgliedschaft.zusatz_firma = get_value(data, 'zusatz_firma')
mitgliedschaft.anrede_c = get_anrede_c(get_value(data, 'anrede_c'))
mitgliedschaft.nachname_1 = get_value(data, 'nachname_1')
mitgliedschaft.vorname_1 = get_value(data, 'vorname_1')
mitgliedschaft.tel_p_1 = str(get_value(data, 'tel_p_1'))
mitgliedschaft.tel_m_1 = str(get_value(data, 'tel_m_1'))
mitgliedschaft.tel_g_1 = str(get_value(data, 'tel_g_1'))
mitgliedschaft.e_mail_1 = get_value(data, 'e_mail_1')
mitgliedschaft.zusatz_adresse = get_value(data, 'zusatz_adresse')
mitgliedschaft.strasse = strasse
mitgliedschaft.nummer = get_value(data, 'nummer')
mitgliedschaft.nummer_zu = get_value(data, 'nummer_zu')
mitgliedschaft.postfach = postfach
mitgliedschaft.postfach_nummer = get_value(data, 'postfach_nummer')
mitgliedschaft.plz = get_value(data, 'plz')
mitgliedschaft.ort = get_value(data, 'ort')
mitgliedschaft.hat_solidarmitglied = hat_solidarmitglied
mitgliedschaft.nachname_2 = get_value(data, 'nachname_2')
mitgliedschaft.vorname_2 = get_value(data, 'vorname_2')
mitgliedschaft.tel_p_2 = str(get_value(data, 'tel_p_2'))
#mitgliedschaft.tel_m_2 = str(get_value(data, 'tel_m_2'))
mitgliedschaft.tel_g_2 = str(get_value(data, 'tel_g_2'))
mitgliedschaft.e_mail_2 = get_value(data, 'e_mail_2')
mitgliedschaft.adress_id_mitglied = get_value(data, 'adress_id')
elif get_value(data, 'adresstyp_c') == 'OBJEKT':
# Objekt Adresse
mitgliedschaft.objekt_zusatz_adresse = get_value(data, 'zusatz_adresse')
mitgliedschaft.objekt_strasse = get_value(data, 'strasse') or 'Fehlende Angaben!'
mitgliedschaft.objekt_hausnummer = get_value(data, 'nummer')
mitgliedschaft.objekt_nummer_zu = get_value(data, 'nummer_zu')
mitgliedschaft.objekt_plz = get_value(data, 'plz')
mitgliedschaft.objekt_ort = get_value(data, 'ort') or 'Fehlende Angaben!'
mitgliedschaft.adress_id_objekt = get_value(data, 'adress_id')
elif get_value(data, 'adresstyp_c') == 'RECHN':
# Rechnungs Adresse
strasse = get_value(data, 'strasse')
postfach = check_postfach(data, 'postfach')
if postfach == 1:
strasse = 'Postfach'
else:
if get_value(data, 'postfach_nummer') and not strasse:
strasse = 'Postfach'
postfach = 1
mitgliedschaft.abweichende_rechnungsadresse = 1
mitgliedschaft.rg_zusatz_adresse = get_value(data, 'zusatz_adresse')
mitgliedschaft.rg_strasse = strasse
mitgliedschaft.rg_nummer = get_value(data, 'nummer')
mitgliedschaft.rg_nummer_zu = get_value(data, 'nummer_zu')
mitgliedschaft.rg_postfach = postfach
mitgliedschaft.rg_postfach_nummer = get_value(data, 'postfach_nummer')
mitgliedschaft.rg_plz = get_value(data, 'plz')
mitgliedschaft.rg_ort = get_value(data, 'ort')
mitgliedschaft.adress_id_rg = get_value(data, 'adress_id')
# else:
# TBD!
mitgliedschaft.save(ignore_permissions=True)
frappe.db.commit()
return
except Exception as err:
frappe.log_error("{0}\n{1}".format(err, data), 'update_mitgliedschaft')
return
def get_sektion(id):
# Aufliestung nicht abschliessend, prüfen!
if id == 25:
return 'MVD'
elif id == 4:
return 'Bern'
elif id == 8:
return 'Basel Stadt'
elif id == 14:
return 'Luzern'
elif id == 3:
return 'Aargau'
else:
return 'Sektions-ID unbekannt'
def get_status_c(status_c):
# Aufliestung vermutlich nicht abschliessend, prüfen!
if status_c == 'AREG':
return 'Mitglied'
elif status_c == 'MUTATI':
return 'Mutation'
elif status_c == 'AUSSCH':
return 'Ausschluss'
elif status_c == 'GESTOR':
return 'Gestorben'
elif status_c == 'KUNDIG':
return 'Kündigung'
elif status_c == 'WEGZUG':
return 'Wegzug'
elif status_c == 'ZUZUG':
return 'Zuzug'
else:
return 'Mitglied'
def get_mitgliedtyp_c(mitgliedtyp_c):
# TBD!!!!!!!!!!
if mitgliedtyp_c == 'PRIV':
return 'Privat'
else:
return 'Privat'
def get_mitglied_c(mitglied_c):
# TBD!!!!!!!!!!
if mitglied_c == 'MITGL':
return 'Mitglied'
else:
return 'Mitglied'
def get_anrede_c(anrede_c):
anrede_c = int(anrede_c)
if anrede_c == 1:
return 'Herr'
elif anrede_c == 2:
return 'Frau'
elif anrede_c == 3:
return 'Frau und Herr'
elif anrede_c == 4:
return 'Herr und Frau'
elif anrede_c == 5:
return 'Familie'
elif anrede_c == 7:
return 'Herren'
elif anrede_c == 8:
return 'Frauen'
else:
return ''
def get_formatted_datum(datum):
if datum:
datum_raw = datum.split(" ")[0]
if not datum_raw:
return ''
else:
return datum_raw.replace("/", "-")
else:
return ''
def check_postfach(row, value):
value = row[hm[value]]
if not | pd.isnull(value) | pandas.isnull |
import logging
import traceback
from collections import Counter
import pandas as pd
from morpher.jobs import MorpherJob
class Sample(MorpherJob):
def do_execute(self):
filename = self.get_input("filename")
target = self.get_input_variables("target")
sampling_method = self.get_input_variables("sampling_method")
df = | pd.read_csv(filepath_or_buffer=filename) | pandas.read_csv |
import os
import threading
from multiprocessing import Process
import queue
import time
import datetime
import numpy as np
import cv2 as cv
import random
import logging
import imutils
#import scipy
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
import pyarrow.fs as pfs
from . import CamAiMessage
from . import CamAiDetection
logger = logging.getLogger(__name__)
#logger.setLevel(logging.WARNING)
logger.setLevel(logging.ERROR)
#formatter = logging.Formatter('%(asctime)s:%(message)s')
#formatter = logging.Formatter('%(asctime)s:%(name)s:%(message)s')
formatter = logging.Formatter('%(asctime)s:%(name)s:%(funcName)s:%(lineno)d:%(levelname)s:%(message)s', datefmt='%Y-%m-%d %H:%M:%S')
#file_handler = logging.FileHandler('CamAiCameraWriter.errorlog')
#file_handler.setFormatter(formatter)
#file_handler.setLevel(logging.ERROR)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
#stream_handler.setLevel(logging.WARNING)
stream_handler.setLevel(logging.ERROR)
#logger.addHandler(file_handler)
logger.addHandler(stream_handler)
DEBUG = False
Q_Depth_Profiler_Interval = 300
Alarm_Cache_Size = 60 # Batch size used when events are detected
class CamAiCamera (object):
def __init__(
self,
handle,
# url,
queues=None,
detection=None,
camvars = None,
managervars = None
):
self.start_time = time.time()
# Camera Options
self.handle = handle
# Detection object for detection state dependant functions
self.detection = detection
if camvars and len(camvars) > 0 :
for key in camvars:
logger.debug(f"Adding camera attribute: {key}, with value: {camvars[key]}")
setattr(self, key, camvars[key])
self.camvars = camvars.copy()
if managervars and len(managervars) > 0 :
for key in managervars:
logger.debug(f"Adding manager attribute: {key}, with value: {managervars[key]}")
setattr(self, key, managervars[key])
self.managervars = managervars.copy()
if queues is not None:
self.reader_queue = queues['reader_queue']
self.writer_queue = queues['writer_queue']
self.detect_queue = queues['detect_queue']
self.response_queue = queues['response_queue']
self.oob_queue = queues['oob_queue']
self.notification_queue = queues['notification_queue']
else:
logger.warning(f"Camera {name}: Queues are not initialized yet")
if self.subdir is None:
self.mydir = os.path.join(self.basedir,'')
else:
self.mydir = os.path.join(self.subdir,'')
if not os.path.exists(self.mydir):
os.makedirs(self.mydir)
self.odb = None
@classmethod
def from_dict(cls, camera_handle, cameraconfig,
managerconfig, camera_queues, detection):
from .CamAiConfig import CAMVARS, MANAGERVARS
camvars = CAMVARS
# Update mandatory configuration variables:defaults with user provided values if any
for key in camvars:
try:
camvars[key] = cameraconfig[key]
except KeyError:
logger.warning(f"{camvars['name']}: key: {key} doesn't exist in the config file, going with default {camvars[key]}")
# Add variables from configuration that are optional, TODO: Might not
# need this, handy while experimenting new variables
for key in cameraconfig:
try:
camvars[key] = cameraconfig[key]
except KeyError:
logger.warning(f"{camvars['name']}: key: {key} doesn't exist in defaults , going with user provided value{cameraconfig[key]}")
managervars = MANAGERVARS
for key in managervars:
try:
managervars[key] = managerconfig[key]
except KeyError:
logger.warning(f"Manager {managervars['name']}: key: {key} doesn't exist in the config file, going with default {managervars[key]}")
for key in managerconfig:
try:
managervars[key] = managerconfig[key]
except KeyError:
logger.warning(f"Manager {managervars['name']}: key: {key} doesn't exist in the config file, going with default {managervars[key]}")
return CamAiCamera(
handle=camera_handle,
queues=camera_queues,
detection=detection,
camvars = camvars,
managervars = managervars
)
def get_handle(self):
return self.handle
@property
def rotation(self):
return self._rotation
@rotation.setter
def rotation(self, angle):
if angle >= 0 and angle <= 360:
self._rotation = angle
else:
self._rotation = 0
def start(self):
cname = "Observer: " + self.name
if self.multiprocessing_observer is True:
self.observer = Process(
target=self._observe_camera, args=([]), name=cname)
else:
self.observer = threading.Thread(
target=self._observe_camera, args=([]), name=cname)
self.observer.do_observe = True
logger.warning(f"{cname} : Starting")
self.start_time = time.time()
self.observer.start()
#self.start_video_player()
def stop(self):
cname = "Observer: " + self.name
if self.multiprocessing_observer is False:
self.observer.do_observe = False
logger.warning(f"{cname} : Stopping")
#self.stop_video_player()
def join(self, waittime=10):
cname = "Observer: " + self.name
self.observer.join(waittime)
logger.warning(f"{cname} : Joining")
def start_video_player(self):
cname = "Observer: " + self.name
try:
if True:
pass
#self.video_player = CamAiViewer.play_stream(self.url)
except:
logger.error(f"{cname} : vlc exception: maybe this camera does not support multiple streams")
def stop_video_player(self):
cname = "Observer: " + self.name
try:
if self.video_player:
#logger.error(f"{cname} : starting video player with url: {self.url}")
self.video_player.stop()
except:
logger.error(f"{cname} : vlc exception: issue stopping video player")
def get_object_timeseries(self):
name = "Observer: " + self.name
parquet_file = os.path.join(self.mydir, self.name + '_events.parquet')
if os.path.isfile(parquet_file):
dbtable = pq.read_table(parquet_file)
odb = dbtable.to_pandas()
if odb is None:
logger.error(f"{name} : odb is None after loading {parquet_file}")
else:
odb['object_name'].astype('str', copy=False)
odb['confidence'].astype('float64', copy=False)
odb['boundingbox_x1'].astype('int64', copy=False)
odb['boundingbox_y1'].astype('int64', copy=False)
odb['boundingbox_x2'].astype('int64', copy=False)
odb['boundingbox_y2'].astype('int64', copy=False)
logger.debug(f"{name} : odb types: {odb.dtypes}")
else:
# First time, so create the db dataframe
col_names = ['detect_time',
'object_name',
'confidence',
'boundingbox_x1',
'boundingbox_y1',
'boundingbox_x2',
'boundingbox_y2'
]
odb = pd.DataFrame(columns=col_names)
odb['detect_time'] = | pd.to_datetime(odb['detect_time']) | pandas.to_datetime |
import torch
import logging
import pickle5 as pickle
class CallbackBase:
def __init__(self, verbose=True) -> None:
super().__init__()
self.algorithm = None
self.callbacks = None
self.logger = logging.getLogger(self.__class__.__name__)
self.summary_writer = None
self.verbose = verbose
self.agent = None
def _begin_fit(self, agent, callbacks, summary_writer=None, **kwargs):
self.algorithm = agent.model
self.agent = agent
self.callbacks = callbacks
self.summary_writer = summary_writer
def _after_fit(self, **kwargs):
pass
def _begin_next(self, **kwargs):
pass
def _after_next(self, **kwargs):
pass
import matplotlib.pyplot as plt
plt.rcParams["font.family"] = "serif"
import numpy as np
from itertools import combinations
import time
import re
class NonDominatedProgress(CallbackBase):
def __init__(self,
plot_freq=1,
plot_pf=True,
labels=None,
**kwargs):
super().__init__(**kwargs)
self.plot_freq = plot_freq
self.labels = labels
self.plot_info = None
self.path = None
self.plot_pf = plot_pf
def _begin_fit(self, **kwargs):
super()._begin_fit(**kwargs)
n_obj = self.algorithm.problem.n_obj
if not self.labels:
self.labels = [r'$f_{}(x)$'.format((i+1)) for i in range(n_obj)]
n_obj = list(combinations(range(n_obj), r=2))
ax_labels = list(combinations(self.labels, r=2))
pf = self.algorithm.problem.pareto_front()
if pf is None:
pf = [None] * len(ax_labels)
else:
pf = list(combinations(pf.T.tolist(), r=2))
pf = [None] * len(ax_labels)
self.plot_info = [n_obj, ax_labels, pf]
# self.path = os.path.join(
# self.agent.cfg.gif_dir,
# '[{}][{}][{}-{}]-G{:0>3d}.jpg'.format(
# self.algorithm.__class__.__name__,
# self.algorithm.__class__.__name__,
# self.algorithm.n_gen
# )
# )
def _begin_next(self, **kwargs):
if self.algorithm.n_gen is not None and self.algorithm.n_gen % self.plot_freq == 0:
f_pop = self.algorithm.pop.get('F'); f_opt = self.algorithm.opt.get('F')
for i, (obj_pair, labels, data) in enumerate(zip(*self.plot_info)):
fig = self.__plot_figure(f_pop, f_opt, obj_pair, labels, data)
if self.summary_writer:
self.summary_writer.add_figure(
tag='fig/' + re.sub(r'(?u)[^-\w.]', '', '{}-{}'.format(*labels)),
figure=fig,
global_step=self.algorithm.n_gen
)
self.summary_writer.close()
fig.savefig(
os.path.join(
self.agent.cfg.gif_dir,
re.sub(r'(?u)[^-\w.]', '', '[{}-{}]-G{:0>3d}.jpg'.format(
*labels,
self.algorithm.n_gen
))
)
)
if self.verbose:
plt.show()
plt.close(fig)
def __plot_figure(self,
f_pop,
f_opt,
obj_pair,
labels,
data):
fig, ax = plt.subplots()
ax.set_xlabel(labels[0]); ax.set_ylabel(labels[1])
if data:
ax.plot(*data, label='pareto front', color='red')
X = f_pop[:, obj_pair[0]]; Y = f_pop[:, obj_pair[1]]
X_opt = f_opt[:, obj_pair[0]]; Y_opt = f_opt[:, obj_pair[1]]
# lim = ax.get_xlim(), ax.get_ylim()
ax.scatter(X, Y, marker='o', color='green', facecolors='none', label='gen: {}'.format(self.algorithm.n_gen))
ax.plot(X_opt[np.argsort(X_opt)], Y_opt[np.argsort(X_opt)], 'g--')
ax.legend(loc='best')
ax.set_title('Objective Space')
ax.grid(True, linestyle='--')
fig.tight_layout()
return fig
from pymoo.indicators.igd import IGD
from pymoo.indicators.hv import Hypervolume
# from pymoo.performance_indicator.igd import IGD
# from pymoo.performance_indicator.hv import Hypervolume
import pandas as pd
class PerformanceMonitor(CallbackBase):
def __init__(self,
metric,
normalize=True,
from_archive=False,
convert_to_pf_space=False,
topk=1,
**kwargs) -> None:
super().__init__(**kwargs)
self.convert_to_pf_space = convert_to_pf_space
self.monitor = None
self.metric = metric
self.normalize = normalize
self.from_archive = from_archive
self.topk = topk
self.top_lst = []
self.current_score = None
self.current_time = 0
self.current_gen = 0
self.data = []
def __repr__(self) -> str:
info = {
'metric': self.metric,
'current_val': self.current_score,
'top_k': self.top_lst
}
return str(info)
def _after_next(self, F, reverse, **kwargs):
score = self.monitor.do(F)
# score = self.monitor.calc(F)
self.current_score = score
self.top_lst += [score]
self.top_lst = list(set(self.top_lst))
self.top_lst = sorted(self.top_lst, reverse=reverse)
if len(self.top_lst) > self.topk:
self.top_lst = self.top_lst[:self.topk]
if self.verbose:
if score in self.top_lst:
msg = \
'{}={:.3f} (best={:.3f})'.format(
self.metric.lower(),
score,
self.top_lst[0]
)
else:
msg = \
'{} was not in top {}'.format(
self.metric.lower(),
self.topk
)
if self.algorithm.n_gen == 1:
self.current_time = 0
elif self.current_gen != self.algorithm.n_gen:
self.current_gen = self.algorithm.n_gen
try:
self.current_time += sum(self.algorithm.problem.history['runtime'][self.current_gen-1])
except:
pass
if self.summary_writer:
self.summary_writer.add_scalar(
tag='metric/{}'.format(self.metric),
scalar_value=score,
global_step=self.algorithm.evaluator.n_eval,
walltime=self.current_time
)
self.summary_writer.close()
self.data += [[self.current_time, self.algorithm.n_gen, self.algorithm.evaluator.n_eval, score]]
df = | pd.DataFrame(self.data, columns=['walltime', 'n_gen', 'n_eval', self.metric]) | pandas.DataFrame |
import pandas as pd
from bids import BIDSLayout
from .roi import extract_timecourse_from_nii
def get_fmriprep_timeseries(fmriprep_folder,
sourcedata_folder,
atlas,
atlas_type=None,
low_pass=None,
high_pass=1./128,
confounds_to_include=None,
*args,
**kwargs):
"""
Extract time series for each subject, task and run in a preprocessed
dataset in BIDS format, given all the ROIs in `atlas`.
Currently only `fmriprep` outputs are supported. The `sourcedata_folder`
is necessary to look up the TRs of the functional runs.
Parameters
----------
fmriprep_folder: string
Path to the folder that contains fmriprep'ed functional MRI data.
sourcedata_folder: string
Path to BIDS folder that has been used as input for fmriprep
atlas: sklearn.datasets.base.Bunch
This Bunch should contain at least a `maps`-attribute
containing a label (3D) or probabilistic atlas (4D),
as well as an `label` attribute, with one label for
every ROI in the atlas.
The function automatically detects which of the two is
provided. It extracts a (weighted) time course per ROI.
In the case of the probabilistic atlas, the voxels are
weighted by their probability (see also the Mappers in
nilearn).
atlas_type: str, optional
Can be 'labels' or 'probabilistic'. A label atlas
should be 3D and contains one unique number per ROI.
A Probabilistic atlas contains as many volume as
ROIs.
Usually, `atlas_type` can be detected automatically.
low_pass: None or float, optional
This parameter is passed to signal.clean. Please see the related
documentation for details
high_pass: None or float, optional
This parameter is passed to signal.clean. Please see the related
documentation for details
confounds_to_include: list of strings
List of confounds that should be regressed out.
By default a limited list of confounds is regressed out:
Namely, FramewiseDisplacement, aCompCor00, aCompCor01, aCompCor02,
aCompCor03, aCompCor04, aCompCor05, X, Y, Z, RotX, RotY, and RotZ
Examples
--------
>>> source_data = '/data/ds001/sourcedata'
>>> fmriprep_data = '/data/ds001/derivatives/fmriprep'
>>> from nilearn import datasets
>>> atlas = datasets.fetch_atlas_pauli_2017()
>>> from nideconv.utils.roi import get_fmriprep_timeseries
>>> ts = get_fmriprep_timeseries(fmriprep_data,
source_data,
atlas)
>>> ts.head()
roi Pu Ca
subject task time
001 stroop 0.0 -0.023651 -0.000767
1.5 -0.362429 -0.012455
3.0 0.087955 -0.062127
4.5 -0.099711 0.146744
6.0 -0.443499 0.093190
"""
if confounds_to_include is None:
confounds_to_include = ['FramewiseDisplacement', 'aCompCor00',
'aCompCor01', 'aCompCor02', 'aCompCor03',
'aCompCor04', 'aCompCor05', 'X', 'Y', 'Z',
'RotX', 'RotY', 'RotZ']
index_keys = []
timecourses = []
for func, confounds, meta in _get_func_and_confounds(fmriprep_folder,
sourcedata_folder):
print("Extracting signal from {}...".format(func.filename))
confounds = pd.read_table(confounds.filename).fillna(method='bfill')
tc = extract_timecourse_from_nii(atlas,
func.filename,
t_r=meta['RepetitionTime'],
atlas_type=atlas_type,
low_pass=low_pass,
high_pass=high_pass,
confounds=confounds[confounds_to_include].values)
for key in ['subject', 'task', 'run', 'session']:
if hasattr(func, key):
tc[key] = getattr(func, key)
if key not in index_keys:
index_keys.append(key)
timecourses.append(tc)
timecourses = pd.concat(timecourses)
timecourses = timecourses.set_index(index_keys, append=True)
timecourses = timecourses.reorder_levels(index_keys + ['time'])
return timecourses
def get_bids_onsets(bids_folder):
"""
Get event onsets from a BIDS folder in a nideconv-ready
format.
Parameters
----------
bids_folder: str
Folder containing fMRI dataset according to BIDS-standard.
Returns
-------
onsets: DataFrame
Dataframe containing onsets, with subject and potentially
session, task and run as indices.
"""
layout = BIDSLayout(bids_folder)
events = layout.get(type='events', extensions='tsv')
onsets =[]
index_keys = []
for event in events:
onsets_ = pd.read_table(event.filename)
for key in ['subject', 'run', 'task', 'session']:
if hasattr(event, key):
onsets_[key] = getattr(event, key)
if key not in index_keys:
index_keys.append(key)
onsets.append(onsets_)
onsets = | pd.concat(onsets) | pandas.concat |
#!/bin/python
from baseball_scraper import baseball_reference, espn, fangraphs
from baseball_id import Lookup
from yahoo_fantasy_bot import utils, source
import pandas as pd
import numpy as np
import datetime
import logging
logger = logging.getLogger()
class Builder:
"""Class that constructs prediction datasets for hitters and pitchers.
The datasets it generates are fully populated with projected stats. The
projection stats are scraped from fangraphs.com.
:param lg: Yahoo! league
:type lg: yahoo_fantasy_api.league.League
:param cfg: config details
:type cfg: ConfigParser
:param csv_details: Details about projections, stored in csv format
:type csv_details: dict
:param ts: Scraper to use to pull team data from baseball_reference.com
:type ts: baseball_reference.TeamScraper
:param es: Scraper to use to pull probable starters from espn
:type es: espn.ProbableStartersScraper
:param tss: Scraper to use to pull team list data from baseball_reference
:type tss: baseball_reference.TeamSummaryScraper
"""
def __init__(self, lg, cfg, csv_details, ts, es, tss):
hitters = source.read_csv(csv_details['hitters'])
pitchers = source.read_csv(csv_details['pitchers'])
self.ppool = | pd.concat([hitters, pitchers], sort=True) | pandas.concat |
from minder_utils.models.feature_extractors import SimCLR, Partial_Order, AutoEncoder
from minder_utils.dataloader import process_data
from minder_utils.evaluate.evaluate_models import evaluate_features
from minder_utils.dataloader import Dataloader
from minder_utils.util.initial import first_run
import pandas as pd
import numpy as np
import os
os.chdir('..')
pd.set_option('display.max_rows', 500)
| pd.set_option('display.max_columns', 500) | pandas.set_option |
# County median income (Census SAIPE- source: https://www.census.gov/programs-surveys/saipe/data/datasets.All.html)
import pandas as pd
import numpy as np
import os
# 1989-2002
dats = os.listdir('Med_Inc_SAIPE/')[-6:] + os.listdir('Med_Inc_SAIPE/')[:3]
dats_df = pd.DataFrame()
years = [1989, 1993, 1995] + list(range(1997, 2003))
counter = 0
for file in dats:
df = pd.read_table('Med_Inc_SAIPE/' + file, sep='\s+', skiprows=1, usecols=[0,1,22,23,25], error_bad_lines=False)
df.columns = ['State FIPS', 'County FIPS', 'Median HH Income', 'County', 'State']
df['Year'] = np.full(len(df), years[counter])
dats_df = pd.concat([dats_df, df])
counter += 1
dats_df = dats_df.sort_values(['State FIPS', 'County FIPS'])
dats_df['State FIPS'] = dats_df['State FIPS'].astype(str).apply(lambda x:x.zfill(2))
dats_df['County FIPS'] = dats_df['County FIPS'].astype(str).apply(lambda x:x.zfill(3))
dats_df['FIPS'] = dats_df['State FIPS'] + dats_df['County FIPS']
# Changing Dade County (FL) code to updated Miami-Dade County code
dats_df.loc[dats_df.FIPS == '12025', 'FIPS'] = '12086'
dats_df.loc[dats_df.FIPS == '12086', 'County'] = 'Miami-Dade'
# Changing Skagway-Yakutat-Angoon County (AK) code to updated Skagway-Hoonah-Angoon County code
dats_df.loc[dats_df.FIPS == '02231', 'FIPS'] = '02232'
dats_df.loc[dats_df.FIPS == '02232', 'County'] = 'Skagway-Hoonah-Angoon'
dats_df['Median HH Income'] = dats_df['Median HH Income'].replace('.', np.nan).astype(float)
missing_fips89 = dats_df.FIPS[dats_df['Median HH Income'].isnull()].unique()
not_missingdf = dats_df[~((dats_df.FIPS.isin(missing_fips89)) & (dats_df.Year <= 1993))]
missingdf = dats_df[(dats_df.FIPS.isin(missing_fips89)) & (dats_df.Year <= 1993)]
missingdf['Median HH Income'] = missingdf['Median HH Income'].fillna(method='bfill')
dats_df = pd.concat([not_missingdf, missingdf]).sort_values(['FIPS', 'Year'])
# Adding rows for missing 1990-1992 records (assigning avg. of 1989 & 1993 incomes to 1991; then avg. of 89 & 91 to 90 and avg of 91 & 93 to 92)
dats91 = dats_df[dats_df.Year == 1993].copy()
dats91['Median HH Income'] = pd.Series()
dats91['Year'] = list(np.full(len(dats91), 1991))
dats_df = pd.concat([dats_df, dats91]).sort_values(['FIPS', 'Year'])
dats_df['Median HH Income'] = dats_df['Median HH Income'].replace('.', np.nan).astype(float)
# Assigning avg. of prev and following incomes
# Source: https://stackoverflow.com/questions/44032771/fill-cell-containing-nan-with-average-of-value-before-and-after
dats_df['Median HH Income'] = round(dats_df['Median HH Income'].fillna((dats_df['Median HH Income'].shift() + dats_df['Median HH Income'].shift(-1))/2), 0)
dats90 = dats_df[dats_df.Year == 1993].copy()
dats90['Median HH Income'] = pd.Series()
dats90['Year'] = list(np.full(len(dats90), 1990))
dats92 = dats_df[dats_df.Year == 1993].copy()
dats92['Median HH Income'] = pd.Series()
dats92['Year'] = list(np.full(len(dats92), 1992))
dats94 = dats_df[dats_df.Year == 1993].copy()
dats94['Median HH Income'] = pd.Series()
dats94['Year'] = list(np.full(len(dats94), 1994))
dats96 = dats_df[dats_df.Year == 1993].copy()
dats96['Median HH Income'] = pd.Series()
dats96['Year'] = list(np.full(len(dats96), 1996))
dats_df = pd.concat([dats_df, dats90])
dats_df = pd.concat([dats_df, dats92])
dats_df = pd.concat([dats_df, dats94])
dats_df = pd.concat([dats_df, dats96]).sort_values(['FIPS', 'Year'])
# These counties contain too many incomplete years (easier to remove altogether)
dats_df = dats_df[~dats_df.FIPS.isin(['15005', '51780'])]
# Assigning avg. of prev and following incomes to years missing data
dats_df['Median HH Income'] = round(dats_df['Median HH Income'].fillna((dats_df['Median HH Income'].shift() + dats_df['Median HH Income'].shift(-1))/2), 0)
dats_df = dats_df[dats_df['County FIPS'].astype(int) > 0]
dats_df['Med_Inc'] = dats_df['Median HH Income'].astype(int)
dats_df = dats_df[['Year', 'State', 'FIPS', 'Med_Inc']].reset_index(drop=True)
#------------------------------------------------------------------------------------------------------------------------#
# Replacing incorrect state abbrev & county names (some data was mixed up through extraction from .dat files)
# Source: https://www.census.gov/library/publications/2011/compendia/usa-counties-2011.html
counties = pd.read_excel('CLF01.xls', usecols=[0,1])[2:]
counties['STCOU'] = counties['STCOU'].astype(str).apply(lambda x:x.zfill(5))
areaname_dict = {}
for fips in counties['STCOU']:
area = counties.Areaname[counties.STCOU == fips].iloc[0]
areaname_dict[fips] = area
dats_df['Areaname'] = dats_df['FIPS'].map(areaname_dict)
dats_df.loc[dats_df.FIPS == '12086', 'Areaname'] = 'Miami-Dade, FL' # County FIPS changed in 1997
dats_df['State'] = dats_df['Areaname'].str.split(',', expand=True)[1].str.strip()
#------------------------------------------------------------------------------------------------------------------------#
# 2003-2018
excels_df = pd.DataFrame()
years = range(2003, 2019)
counter = 0
for file in os.listdir('Med_Inc_SAIPE')[3:-6]:
if years[counter] < 2005:
skiprows = 1
elif years[counter] < 2013:
skiprows = 2
else:
skiprows = 3
df = pd.read_excel('Med_Inc_SAIPE/' + file, skiprows=skiprows, usecols=[0,1,2,3,22])
df.columns = ['State FIPS', 'County FIPS', 'State', 'County', 'Median HH Income']
df['Year'] = np.full(len(df), years[counter])
excels_df = pd.concat([excels_df, df])
counter += 1
excels_df = excels_df[excels_df['County FIPS'] > 0]
excels_df['County FIPS'] = excels_df['County FIPS'].astype(int)
excels_df['State FIPS'] = excels_df['State FIPS'].astype(int).astype(str).apply(lambda x:x.zfill(2))
excels_df['County FIPS'] = excels_df['County FIPS'].astype(int).astype(str).apply(lambda x:x.zfill(3))
excels_df['FIPS'] = excels_df['State FIPS'] + excels_df['County FIPS']
excels_df = excels_df[excels_df.FIPS != '15005']
excels_df['Areaname'] = excels_df['FIPS'].map(areaname_dict)
excels_df['Med_Inc'] = excels_df['Median HH Income'].astype(int)
excels_df = excels_df[['Year', 'State', 'FIPS', 'Med_Inc', 'Areaname']].reset_index(drop=True)
#------------------------------------------------------------------------------------------------------------------------#
# Combining 89-02 w/ 03-18 income data
combined = | pd.concat([dats_df, excels_df]) | pandas.concat |
#!/usr/bin/python3
"""
Last update: April 2021
Author: <NAME>, PhD - The Scripps Research Institute, La Jolla (CA)
Contact info: <EMAIL>
GitHub project repository: https://github.com/ldascenzo/pytheas
***DESCRIPTION***
Pytheas visualization algorithm.
Additional information on the output files and the parameters can be found in the Matching&Scoring section of the
Pytheas manual
OPTIONS
--digest_file (required) -> Input file obtained from the Pytheas in silico digestion workflow
--mgf_file (required) -> Experimental measured peaks in mgf file format
--score_file -> scored/matched peaks to visualize, output of the scoring script
--highest_peaks (OPT, default = all) -> Number of most intense MS2 peaks to be shown in the output spectrum
--mz_min (OPT, default = auto) -> minimum value of m/z (x axis) to use for plotting
--mz_max (OPT, default = auto) -> maximum value of m/z (x axis) to use for plotting
--digest_spectra (OPT, default = n) -> choose (y/n) if to plot the spectra for the theoretical digest
--Sp_cutoff (OPT, default = 0) -> Minimum Sp score cutoff for target/decoy sequences
--dSp_cutoff (OPT, default = 1) -> Maximum dSp score cutoff for target/decoy sequences
--visualize_decoys (OPT, default='n') -> Visualize (y/n) decoy sequences
--modified_spectra_only (OPT, default='n') -> Visualize (y/n) only spectra for targets/decoys containing nucleotide
modifications
--rank_max (OPT, default=99) -> Maximum rank of spectra to visualize (default=99[all])')
--only_unique_positions (opt, default = n) -> Visualize (y/n) only target/decoy sequences mapping to unique sequence
positions
--remove_redundant_sequences_X (opt, default = n)-> Remove (y/n) redundant matches containing X, keeping only the
highest ranking (default = n)
--dSp2_minimum_cutoff (opt, default=0) -> Minimum dSp2 score cutoff for target/decoy sequences
--MS1_offset_cutoff (opt, default=0) -> Maximum MS1 matching offset (ppm) value for target/decoy sequences. It applies
to positive and negative values
***OUTPUT***
1) visualization_output -> html file containing a table with all the visualized targets/decoys and the spectra
"""
import argparse, subprocess, os, sys, re
import matplotlib
import ntpath
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import pyteomics.mgf as pymgf
from datetime import datetime
import multiprocessing
import shutil
time = datetime.now()
# Initialize and define launch options
parser = argparse.ArgumentParser(description='List of available options')
parser.add_argument('--digest_file', required=True, help='Input file obtained from the Pytheas '
'in silico digestion workflow (required)')
parser.add_argument('--mgf_file', required=True, help='Experimental measured peaks in mgf file format (required)')
parser.add_argument('--match_file', required=True, help='File (txt) generated by the Pytheas matching '
'and scoring algorithm (match_output_[dataset])')
parser.add_argument('--highest_peaks', default='all',
help='Number of most intense MS2 peaks to be shown in the output spectrum (DEFAULT = all)')
parser.add_argument('--mz_min', default=-1, type=int, help='Minimum value to use for x axis on the spectra plotting '
'(default = auto)')
parser.add_argument('--mz_max', default=-1, type=int, help='Maximum value to use for x axis on the spectra plotting'
'(default = auto)')
parser.add_argument('--digest_spectra', default='n', choices=['y', 'n'],
help='Choose (y/n) if the digest spectra have to be visualized (DEFAULT=n)')
parser.add_argument('--Sp_cutoff', default=0, type=np.float64,
help='Minimum Sp score cutoff for target/decoy sequences (DEFAULT=0')
parser.add_argument('--dSp_cutoff', default=1, type=np.float64,
help='Maximum dSp score cutoff for target/decoy sequences (DEFAULT=1)')
parser.add_argument('--dSp2_minimum_cutoff', default=0, type=float, help='Minimum dSp2 score cutoff for target/decoy'
' sequences (default=0)')
parser.add_argument('--MS1_offset_cutoff', default=0, type=float, help='Maximum MS1 matching offset (ppm) value for '
'target/decoy sequences. It applies to positive '
'and negative values (default=0)')
parser.add_argument('--visualize_decoys', default='n', choices=['y', 'n'],
help='Visualize (y/n) decoy sequences (DEFAULT=n)')
parser.add_argument('--modified_spectra_only', default='n', choices=['y', 'n'],
help='Visualize only spectra for targets/decoys containing nucleotide modifications (default=n)')
parser.add_argument('--rank_max', default=99, type=int,
help='Maximum rank value for target/decoy sequences (default=99[all])')
parser.add_argument('--only_unique_positions', default='n', choices=['y', 'n'],
help='Visualize (y/n) only target/decoy sequences mapping to unique sequence positions')
parser.add_argument('--remove_redundant_sequences_with_X', default='n', choices=['y', 'n'],
help='Remove redundant matches containing X, keeping only the highest ranking (default = n)')
args = parser.parse_args()
params = {
'figure.dpi': 300,
'axes.labelsize': 9,
'font.size': 10,
'legend.fontsize': 8,
'legend.frameon': True,
'xtick.labelsize': 7,
'ytick.labelsize': 7,
'font.family': 'serif',
'axes.linewidth': 0.5,
'xtick.major.size': 4, # major tick size in points
'xtick.minor.size': 2, # minor tick size in points
'xtick.direction': 'out',
'ytick.major.size': 4, # major tick size in points
'ytick.minor.size': 2, # minor tick size in points
'ytick.direction': 'out',
}
plt.rcParams.update(params)
# Color code for MS2 ion series in the output spectra
color_MS2 = {'a': 'green', 'a-B': 'green', 'w': 'green', 'b': 'blue', 'x': 'blue', 'c': 'magenta', 'y': 'magenta',
'd': 'orange', 'z': 'orange'}
MS2_ion_series = ['a', 'b', 'c', 'd', 'w', 'x', 'y', 'z', 'a-B', 'y-P', 'z-P']
# Parameter used to exclude Ion + 1/2 Na from being a reference peak for scaling intensities
NA_mass, H_mass = 22.989769282, 1.007825032
def __round_number(x, base=25):
"""
Round a number to its closest integer value multiple of 25. Used to determine values for the axis
on the spectra plots
"""
return int(base * round(np.float64(x) / base))
def __conv_inch(length_mm):
"""
Converts a length from millimeters to inch
"""
return length_mm / 25.4
def __plot_limits(xy_dataframe, dic_key):
"""
Determines the limits for x and y axes, defined based on the min and max values of m/z (x) and intensity (y)
from the input file
"""
return __round_number(min(xy_dataframe[dic_key]['m/z'])) - 25, __round_number(
max(xy_dataframe[dic_key]['m/z'])) + 25, __round_number(max(xy_dataframe[dic_key]['intensity'])) + 50
def __annotation(row, ax, bars_width):
"""
Annotate the spectrum with ion series and a dashed line over the bar
"""
ax.annotate(row['ion'] + " " + str(round(row['m/z'], 3)), xy=(row['m/z'], 105), xycoords='data',
xytext=(row['m/z'] - bars_width * 4, 130), textcoords='data', rotation=90,
color=color_MS2[row['ion'][0]], size=5)
plt.axvline(np.float64(row['m/z']), ymax=100 / 150, color=color_MS2[row['ion'][0]], linewidth=bars_width,
linestyle='dashed')
def __create_directory(name):
"""
Create the directory in which to output the spectra
"""
if os.path.exists("./{}/".format(name)):
shutil.rmtree("./{}".format(name))
os.mkdir("./{}".format(name))
def __digest_table(p, dic):
"""
Generates a dataframe with all the info to be outputed as a final table with all matching ions
"""
out_dic = {}
for precursor in p:
seq_length, d = len(dic[precursor][6]), {}
# Creates an empty dictionary for all the MS2 ion series
for ion in dic[precursor][12:]:
m = re.search(r'\d+$', ion.split("(")[0].split('-')[0])
if m:
ion_series = ion.split("(")[0].replace(m.group(), '') + "(" + ion.split(':')[0].split("(")[1]
if ion_series not in d.keys():
d[ion_series] = [''] * seq_length
else:
ion_series = ion.split(':')[0]
if ion_series not in d.keys():
d[ion_series] = [''] * seq_length
# Fills the dictionary with m/z values on
for ion in dic[precursor][12:]:
m = re.search(r'\d+$', ion.split("(")[0].split('-')[0])
if m:
ion_series = ion.split("(")[0].replace(m.group(), '') + "(" + ion.split(':')[0].split("(")[1]
if ion[0] == 'a' or ion[0] == 'b' or ion[0] == 'c' or ion[0] == 'd':
d[ion_series][int(m.group()) - 1] = str(round(np.float64(ion.split(':')[1]), 4))
else:
d[ion_series][seq_length - int(m.group())] = str(round(np.float64(ion.split(':')[1]), 4))
else:
ion_series = ion.split(':')[0]
# Add a control to account if some of the ion series names are used as modified bases
if ion_series.split("(")[0] in MS2_ion_series:
ion_series = "B" + ion_series
d[ion_series] = str(round(np.float64(ion.split(':')[1]), 4))
# Convert the dictionary with all theoretical MS2 ions into a dataframe
df = pd.DataFrame(data=d)
out_dic[precursor] = df
return out_dic
def digest_peaks(input_file):
"""
Creates a dictionary with the info on m/z:intensities from the digest file input
Args:
input_file (text file): the digest input file
Returns:
peaks (dictionary with peaks stored in pandas dataframes)
"""
######NOTE!!!!
# The first part of this function is necessary to generate the output txt file for the digest peaks,
# to rework for final version
###########
dic, outlist = {}, []
# Iterates within the lines of the digest file
for line in open(input_file, 'r'):
# Only lines starting with a number are considered (ones with precursor ions)
if line[0].isdigit():
# A dictionary key is created with { M_sequence : all the info }
dic[line.split()[0] + "_" + line.split()[7]] = line.split()[1:]
# Order the precursor ions by crescent m/z value
precursors = sorted(dic.keys(), key=lambda x: np.float64(x.split('_')[0]))
# Creates a dictionary with dataframes containing all the ions from the digest
digest_df = __digest_table(precursors, dic)
# First line of the output file carries the info on the precursor ion
outlist.append(args.digest_file + "\n\n")
# Preparing the lines for the final output, with header and m/z intensity ion
for ion in precursors:
outlist.append(
"BEGIN IONS\nprecursor= {} seq= {} mod= {} charge= {}\n".format(ion.split('_')[0], ion.split('_')[1],
dic[ion][7], dic[ion][4]))
MS2_ions = sorted(dic[ion][12:], key=lambda x: np.float64(x.split(':')[-1]))
for i in MS2_ions:
outlist.append(str(round(np.float64(i.split(':')[-1]), 6)) + "\t100\t" + i.split(':')[0] + "\n")
outlist.append("END IONS\n")
flag, out_dic = 0, {}
for line in outlist:
if "precursor" in line:
flag, prec_mass, seq, charge = 1, line.split('=')[1].split()[0], line.split('=')[2].split()[0], \
line.split('=')[4].split()[0]
out_dic[prec_mass + "_" + seq + "_" + charge] = {'m/z': [], 'intensity': [], 'ion': []}
if "END IONS" in line:
flag = 0
if flag == 1 and line[0].isdigit():
(out_dic[prec_mass + "_" + seq + "_" + charge]['m/z'].append(np.float64(line.split("\t")[0])),
out_dic[prec_mass + "_" + seq + "_" + charge]['intensity'].append(np.float64(line.split("\t")[1])),
out_dic[prec_mass + "_" + seq + "_" + charge]['ion'].append(line.split("\t")[2][:-1]))
# Transform the input data in pandas dataframes
for key in out_dic.keys():
df = | pd.DataFrame(out_dic[key]) | pandas.DataFrame |
""" Librairie personnelle pour exécuter des tests de normalité,
homostéradiscité, ANOVA, Kruskall-Wallis
"""
#! /usr/bin/env python3
# coding: utf-8
# ====================================================================
# Outil visualisation - projet 3 Openclassrooms
# Version : 0.0.0 - CRE LR 13/03/2021
# ====================================================================
from scipy.stats import chi2_contingency
from scipy.stats import chi2
from scipy.stats import shapiro, normaltest, anderson
import pandas as pd
from IPython.display import display
# --------------------------------------------------------------------
# -- VERSION
# --------------------------------------------------------------------
__version__ = '0.0.0'
# --------------------------------------------------------------------
# -- TESTS DE NORMALITE
# --------------------------------------------------------------------
def test_normalite(data):
"""
Test de la normalité d'une distribution.
Parameters
----------
data : dataframe ou dataframe restreint (une seule variable) obligatoire
Returns
-------
None.
"""
# H0 : la distribution des données est normale (P>0,05)
# H1 : la distribution des données n'est pas normale (P<0,05)
df_resultat = | pd.DataFrame([]) | pandas.DataFrame |
from tenbagger.src.scripts.insiders import format_insiders
from tenbagger.src.utils.utilities import read_yaml
import pandas as pd
import subprocess
class NotifyInsider:
def __init__(self):
self.env = read_yaml('user_data/env/environment.yaml')
self.today = | pd.Timestamp.today() | pandas.Timestamp.today |
#!/usr/bin/env python
# coding: utf-8
# ___
#
# <a href='http://www.pieriandata.com'> <img src='../Pierian_Data_Logo.png' /></a>
# ___
# # Logistic Regression with Python
#
# For this lecture we will be working with the [Titanic Data Set from Kaggle](https://www.kaggle.com/c/titanic). This is a very famous data set and very often is a student's first step in machine learning!
#
# We'll be trying to predict a classification- survival or deceased.
# Let's begin our understanding of implementing Logistic Regression in Python for classification.
#
# We'll use a "semi-cleaned" version of the titanic data set, if you use the data set hosted directly on Kaggle, you may need to do some additional cleaning not shown in this lecture notebook.
#
# ## Import Libraries
# Let's import some libraries to get started!
# In[13]:
import pandas as pd
import numpy as np
import datetime
import re
#import mathdata
# import matplotlib.pyplot as plt
import seaborn as sns
import os
# ## The Data
#
# Let's start by reading in the titanic_train.csv file into a pandas dataframe.
# In[14]:
#train_1958
path = input('Please type in the path of your data folder:')
# In[16]:
# this function reads a csv file and process it by
# 1. removing the trash
# 2. get date into the same format
# 3. get time into the same format
# 4. fix the wind speed (change into string)
# input: filename str ---eg.'2011-2018ord.csv'
# output: pandas dataframe
def readfile(filename):
# performing task 1
trash_offset = 25
trash_index = 0
train = pd.read_csv(path+'/'+filename, skiprows= range(0,8) )
train = train.loc[:, ~train.columns.str.contains('^Unnamed')]
nrows = train.shape[0]
#print(nrows)
for x in range(nrows-trash_offset,nrows):
if type(train.loc[x]['Time']) != str:
trash_index = x
break
train.drop(range(trash_index,nrows), inplace = True)
# performing task 2
# check if the date data is in the right form
date_pattern = re.compile(r'\d\d\d\d-\d\d-\d\d')
searchObj = re.search(date_pattern, train['Date'][0])
if not searchObj:
nrows = train.shape[0]
for x in range(0,nrows):
train.at[x,'Date'] = datetime.datetime.strptime(train.at[x,'Date'], "%m/%d/%Y").strftime("%Y-%m-%d")
# performing task 3
# check if time data is in the right form
time_pattern = re.compile(r'^\d:\d\d')
searchObj = re.search(time_pattern, train['Time'][0])
if searchObj:
nrows = train.shape[0]
for x in range(0,nrows):
# task 3
searchObj = re.search(time_pattern, train['Time'][x])
if searchObj:
train.at[x,'Time'] = '0' + train.at[x,'Time']
# performing task 4
train = train.astype({train.columns[4]:'str'})
return train
# In[17]:
#train_test = readfile('1991-2000ord.csv')
#train_temp = readfile('1971-1980ord.csv')
#train_test
# In[18]:
# this function takes in a date and calculate the mean min max for the features
# input: date -- string in the form of 'yyyy-mm-dd' eg:'1958-11-01'
# train -- the main datafram to analyze
# output-- list containing:
# mean_result -- datafram for mean of all the features
# min_result -- datafram of min of all the features
# max_result -- datafram of max of all the features
# invalid_feature -- 0 list of size 8, to be replaced with 1 to indicate invalid feature
def analyze_by_day(date, train):
#test = '1958-11-01'
train_found = train[train['Date'] == date]
#print(train_found)
invalid_feature = np.zeros(8)
#train_found.shape[0]
# out of the 8 features
for y in range(2,train_found.shape[1]):
# calculate how many 'm' there are for each feature out of 24 days
m_count = 0
for x in range(0, train_found.shape[0]):
# count the number of 'm'
if train_found.iloc[x,y].lower() == 'm':
m_count += 1
# if there are total of 6 or more 'm' make this feature invalid
if m_count >= 6:
invalid_feature[y-2] = 1
m_count = 0
#print(invalid_feature)
# now we have which feature is invalid, calculate mean etc for each feature
df2 = train_found.drop(columns =['Date','Time'])
df1 = df2.apply(pd.to_numeric, errors='coerce')
for x in range(0,8):
df1[df1.columns[x]].fillna(value=df1[df1.columns[x]].mean(), inplace = True)
mean_result = df1.mean()
min_result = df1.min()
max_result = df1.max()
#print(invalid_feature)
#print(mean_result)
#based on the invalid array, assign the final result list
for x in range(0,8):
if invalid_feature[x] == 1:
mean_result[x] = float('nan')
min_result[x] = float('nan')
max_result[x] = float('nan')
return mean_result,min_result,max_result,invalid_feature
# In[19]:
#mean_result = pd.DataFrame()
#max_result = pd.DataFrame()
#min_result = pd.DataFrame()
#invalid_feature = np.zeros(8)
#temp = analyze_by_day('1958-11-01', train_temp)
# In[20]:
# read all the csv files
listOfFiles = os.listdir(path)
print(listOfFiles)
file_pattern = re.compile(r'ord.csv')
train_temp = pd.DataFrame()
for x in range(0,len(listOfFiles)):
searchObj = re.search(file_pattern, listOfFiles[x])
if searchObj:
print (listOfFiles[x])
train_temp = pd.concat([train_temp,readfile(listOfFiles[x])], axis = 0, ignore_index=True)
# In[21]:
# now that we have read all the files ask user to input a range
first_date = input("Please input the starting date as in yyyy-mm-dd: ")
d1 = datetime.datetime.strptime(first_date, "%Y-%m-%d").date()
#print(d1)
second_date = input("Please input the ending date as in yyyy-mm-dd: ")
d2 = datetime.datetime.strptime(second_date, "%Y-%m-%d").date()
delta = d2-d1
while delta.days <= 0:
first_date = input("Please input a valid starting date as in yyyy-mm-dd: ")
d1 = datetime.datetime.strptime(first_date, "%Y-%m-%d").date()
second_date = input("Please input a valid ending date as in yyyy-mm-dd: ")
d2 = datetime.datetime.strptime(second_date, "%Y-%m-%d").date()
delta = d2-d1
mean_temp = []
min_temp =[]
max_temp = []
invalid_temp = []
if delta.days >0:
for i in range(delta.days+1):
temp_day = d1+datetime.timedelta(days=i)
day_str = temp_day.strftime('%Y-%m-%d')
temp = analyze_by_day(day_str, train_temp)
mean_temp.append(temp[0])
min_temp.append(temp[1])
max_temp.append(temp[2])
invalid_temp.append(temp[3])
# group them together
mean_df = pd.DataFrame(mean_temp)
min_df = | pd.DataFrame(min_temp) | pandas.DataFrame |
import pandas as pd
import pytest
from woodwork import init_series
from woodwork.logical_types import (
Age,
AgeNullable,
Boolean,
BooleanNullable,
Categorical,
Double,
Integer,
IntegerNullable,
)
from evalml.pipelines.components import ReplaceNullableTypes
@pytest.fixture
def nullable_data():
return pd.DataFrame(
{
"non_nullable_integer": [0, 1, 2, 3, 4],
"nullable_integer": [0, 1, 2, 3, None],
"non_nullable_age": [20, 21, 22, 23, 24],
"nullable_age": [20, None, 22, 23, None],
"non_nullable_boolean": [True, False, True, False, True],
"nullable_boolean": [None, True, False, True, False],
}
)
@pytest.mark.parametrize("methods_to_test", ["fit and transform", "fit_transform"])
@pytest.mark.parametrize("input_type", ["ww", "pandas"])
def test_replace_nullable_types(nullable_data, input_type, methods_to_test):
X = nullable_data
nullable_types_replacer = ReplaceNullableTypes()
X = X.astype(
{
"nullable_integer": "Int64",
"nullable_age": "Int64",
"nullable_boolean": "boolean",
}
)
assert str(X.dtypes.loc["non_nullable_integer"]) == "int64"
assert str(X.dtypes.loc["nullable_integer"]) == "Int64"
assert str(X.dtypes.loc["non_nullable_age"]) == "int64"
assert str(X.dtypes.loc["nullable_age"]) == "Int64"
assert str(X.dtypes.loc["non_nullable_boolean"]) == "bool"
assert str(X.dtypes.loc["nullable_boolean"]) == "boolean"
if input_type == "ww":
X.ww.init(logical_types={"nullable_age": AgeNullable, "non_nullable_age": Age})
assert isinstance(X.ww.logical_types["nullable_integer"], IntegerNullable)
assert isinstance(X.ww.logical_types["nullable_age"], AgeNullable)
assert isinstance(X.ww.logical_types["nullable_boolean"], BooleanNullable)
if methods_to_test == "fit and transform":
nullable_types_replacer.fit(X)
assert set(nullable_types_replacer._nullable_int_cols) == {
"nullable_integer",
"nullable_age",
}
assert nullable_types_replacer._nullable_bool_cols == ["nullable_boolean"]
X_t, y_t = nullable_types_replacer.transform(X)
assert set(X_t.columns) == set(X.columns)
assert X_t.shape == X.shape
elif methods_to_test == "fit_transform":
X_t, y_t = nullable_types_replacer.fit_transform(X)
assert set(nullable_types_replacer._nullable_int_cols) == {
"nullable_integer",
"nullable_age",
}
assert nullable_types_replacer._nullable_bool_cols == ["nullable_boolean"]
assert set(X_t.columns) == set(X.columns)
assert X_t.shape == X.shape
# Check the pandas dtypes
assert str(X_t.dtypes.loc["non_nullable_integer"]) == "int64"
assert str(X_t.dtypes.loc["nullable_integer"]) == "float64"
assert str(X_t.dtypes.loc["non_nullable_age"]) == "int64"
assert str(X_t.dtypes.loc["nullable_age"]) == "float64"
assert str(X_t.dtypes.loc["non_nullable_boolean"]) == "bool"
assert str(X_t.dtypes.loc["nullable_boolean"]) == "category"
# Check the Woodwork dtypes
assert isinstance(X_t.ww.logical_types["non_nullable_integer"], Integer)
assert isinstance(X_t.ww.logical_types["nullable_integer"], Double)
if input_type == "ww":
assert isinstance(X_t.ww.logical_types["non_nullable_age"], Age)
else:
assert isinstance(X_t.ww.logical_types["non_nullable_age"], Integer)
assert isinstance(X_t.ww.logical_types["nullable_age"], Double)
assert isinstance(X_t.ww.logical_types["non_nullable_boolean"], Boolean)
assert isinstance(X_t.ww.logical_types["nullable_boolean"], Categorical)
@pytest.mark.parametrize("input_type", ["ww", "pandas"])
def test_replace_nullable_types_boolean_target(nullable_data, input_type):
nullable_types_replacer = ReplaceNullableTypes()
# Get input data
X = nullable_data
X = X.astype({"nullable_integer": "Int64", "nullable_boolean": "boolean"})
y = pd.Series([True, False, None, True, False])
y = y.astype("boolean")
if input_type == "ww":
y = init_series(y)
assert isinstance(y.ww.logical_type, BooleanNullable)
nullable_types_replacer.fit(X, y)
assert nullable_types_replacer._nullable_target == "nullable_bool"
X_t, y_t = nullable_types_replacer.transform(X, y)
assert str(y_t.dtypes) == "category"
assert isinstance(y_t.ww.logical_type, Categorical)
@pytest.mark.parametrize("input_type", ["ww", "pandas"])
def test_replace_nullable_types_integer_target(nullable_data, input_type):
nullable_types_replacer = ReplaceNullableTypes()
# Get input data
X = nullable_data
X = X.astype({"nullable_integer": "Int64", "nullable_boolean": "boolean"})
y = | pd.Series([0, 1, None, 3, 4]) | pandas.Series |
import asyncio
from datetime import datetime, timedelta, timezone
import logging
import pickle
from typing import Any, Collection, Dict, Iterable, List, Optional, Tuple, Union
import aiomcache
import numpy as np
import pandas as pd
import sentry_sdk
from sqlalchemy import and_, func, join, or_, select, union_all
from sqlalchemy.sql.elements import BinaryExpression
from athenian.api import metadata
from athenian.api.async_utils import gather, postprocess_datetime, read_sql_query
from athenian.api.cache import cached, max_exptime, middle_term_exptime
from athenian.api.controllers.logical_repos import coerce_logical_repos, contains_logical_repos, \
drop_logical_repo
from athenian.api.controllers.miners.filters import JIRAFilter, LabelFilter
from athenian.api.controllers.miners.github.branches import load_branch_commit_dates
from athenian.api.controllers.miners.github.commit import DAG, \
fetch_precomputed_commit_history_dags, fetch_repository_commits, RELEASE_FETCH_COMMITS_COLUMNS
from athenian.api.controllers.miners.github.dag_accelerated import extract_subdag, \
mark_dag_access, mark_dag_parents, searchsorted_inrange
from athenian.api.controllers.miners.github.precomputed_prs import \
DonePRFactsLoader, MergedPRFactsLoader, update_unreleased_prs
from athenian.api.controllers.miners.github.release_load import dummy_releases_df, \
group_repos_by_release_match, match_groups_to_sql, ReleaseLoader
from athenian.api.controllers.miners.github.released_pr import index_name, new_released_prs_df, \
release_columns
from athenian.api.controllers.miners.jira.issue import generate_jira_prs_query
from athenian.api.controllers.miners.types import nonemax, PullRequestFactsMap
from athenian.api.controllers.prefixer import Prefixer
from athenian.api.controllers.settings import LogicalPRSettings, LogicalRepositorySettings, \
ReleaseMatch, ReleaseSettings
from athenian.api.db import add_pdb_hits, add_pdb_misses, Database, insert_or_ignore
from athenian.api.defer import defer
from athenian.api.models.metadata.github import NodeCommit, NodeRepository, PullRequest, \
PullRequestLabel, PushCommit, Release
from athenian.api.models.precomputed.models import GitHubRelease as PrecomputedRelease, \
GitHubRepository
from athenian.api.tracing import sentry_span
async def load_commit_dags(releases: pd.DataFrame,
account: int,
meta_ids: Tuple[int, ...],
mdb: Database,
pdb: Database,
cache: Optional[aiomcache.Client],
) -> Dict[str, DAG]:
"""Produce the commit history DAGs which should contain the specified releases."""
pdags = await fetch_precomputed_commit_history_dags(
releases[Release.repository_full_name.name].unique(), account, pdb, cache)
return await fetch_repository_commits(
pdags, releases, RELEASE_FETCH_COMMITS_COLUMNS, False, account, meta_ids, mdb, pdb, cache)
class PullRequestToReleaseMapper:
"""Mapper from pull requests to releases."""
@classmethod
@sentry_span
async def map_prs_to_releases(cls,
prs: pd.DataFrame,
releases: pd.DataFrame,
matched_bys: Dict[str, ReleaseMatch],
branches: pd.DataFrame,
default_branches: Dict[str, str],
time_to: datetime,
dags: Dict[str, DAG],
release_settings: ReleaseSettings,
prefixer: Prefixer,
account: int,
meta_ids: Tuple[int, ...],
mdb: Database,
pdb: Database,
cache: Optional[aiomcache.Client],
labels: Optional[pd.DataFrame] = None,
) -> Tuple[pd.DataFrame,
PullRequestFactsMap,
asyncio.Event]:
"""
Match the merged pull requests to the nearest releases that include them.
:return: 1. pd.DataFrame with the mapped PRs. \
2. Precomputed facts about unreleased merged PRs. \
3. Synchronization for updating the pdb table with merged unreleased PRs.
"""
assert isinstance(time_to, datetime)
assert isinstance(mdb, Database)
assert isinstance(pdb, Database)
assert prs.index.nlevels == 2
pr_releases = new_released_prs_df()
unreleased_prs_event = asyncio.Event()
if prs.empty:
unreleased_prs_event.set()
return pr_releases, {}, unreleased_prs_event
_, unreleased_prs, precomputed_pr_releases = await gather(
load_branch_commit_dates(branches, meta_ids, mdb),
MergedPRFactsLoader.load_merged_unreleased_pull_request_facts(
prs, nonemax(releases[Release.published_at.name].nonemax(), time_to),
LabelFilter.empty(), matched_bys, default_branches, release_settings,
prefixer, account, pdb),
DonePRFactsLoader.load_precomputed_pr_releases(
prs, time_to, matched_bys, default_branches, release_settings,
prefixer, account, pdb, cache),
)
add_pdb_hits(pdb, "map_prs_to_releases/released", len(precomputed_pr_releases))
add_pdb_hits(pdb, "map_prs_to_releases/unreleased", len(unreleased_prs))
pr_releases = precomputed_pr_releases
merged_prs = prs[~prs.index.isin(pr_releases.index.union(unreleased_prs.keys()))]
if merged_prs.empty:
unreleased_prs_event.set()
return pr_releases, unreleased_prs, unreleased_prs_event
labels, missed_released_prs, dead_prs = await gather(
cls._fetch_labels(merged_prs.index.get_level_values(0).values, labels, meta_ids, mdb),
cls._map_prs_to_releases(merged_prs, dags, releases),
cls._find_dead_merged_prs(merged_prs),
)
assert missed_released_prs.index.nlevels == 2
assert dead_prs.index.nlevels == 2
# PRs may wrongly classify as dead although they are really released; remove the conflicts
dead_prs.drop(index=missed_released_prs.index, inplace=True, errors="ignore")
add_pdb_misses(pdb, "map_prs_to_releases/released", len(missed_released_prs))
add_pdb_misses(pdb, "map_prs_to_releases/dead", len(dead_prs))
add_pdb_misses(pdb, "map_prs_to_releases/unreleased",
len(merged_prs) - len(missed_released_prs) - len(dead_prs))
if not dead_prs.empty:
if not missed_released_prs.empty:
missed_released_prs = pd.concat([missed_released_prs, dead_prs])
else:
missed_released_prs = dead_prs
await defer(update_unreleased_prs(
merged_prs, missed_released_prs, time_to, labels, matched_bys, default_branches,
release_settings, account, pdb, unreleased_prs_event),
"update_unreleased_prs(%d, %d)" % (len(merged_prs), len(missed_released_prs)))
return pr_releases.append(missed_released_prs), unreleased_prs, unreleased_prs_event
@classmethod
async def _map_prs_to_releases(cls,
prs: pd.DataFrame,
dags: Dict[str, DAG],
releases: pd.DataFrame,
) -> pd.DataFrame:
if prs.empty:
return new_released_prs_df()
assert prs.index.nlevels == 2
release_repos = releases[Release.repository_full_name.name].values
unique_release_repos, release_index_map, release_repo_counts = np.unique(
release_repos, return_inverse=True, return_counts=True)
# stable sort to preserve the decreasing order by published_at
release_repo_order = np.argsort(release_index_map, kind="stable")
ordered_release_shas = releases[Release.sha.name].values[release_repo_order].astype("S40")
release_repo_offsets = np.zeros(len(release_repo_counts) + 1, dtype=int)
np.cumsum(release_repo_counts, out=release_repo_offsets[1:])
pr_repos = prs.index.get_level_values(1).values
unique_pr_repos, pr_index_map, pr_repo_counts = np.unique(
pr_repos, return_inverse=True, return_counts=True)
pr_repo_order = np.argsort(pr_index_map)
pr_merge_hashes = \
prs[PullRequest.merge_commit_sha.name].values[pr_repo_order].astype("S40")
pr_merged_at = prs[PullRequest.merged_at.name].values[pr_repo_order].astype(
releases[Release.published_at.name].values.dtype, copy=False)
pr_node_ids = prs.index.get_level_values(0).values[pr_repo_order]
pr_repo_offsets = np.zeros(len(pr_repo_counts) + 1, dtype=int)
np.cumsum(pr_repo_counts, out=pr_repo_offsets[1:])
release_pos = pr_pos = 0
released_prs = []
log = logging.getLogger("%s.map_prs_to_releases" % metadata.__package__)
while release_pos < len(unique_release_repos) and pr_pos < len(unique_pr_repos):
release_repo = unique_release_repos[release_pos]
pr_repo = unique_pr_repos[pr_pos]
if release_repo == pr_repo:
hashes, vertexes, edges = dags[drop_logical_repo(pr_repo)]
if len(hashes) == 0:
log.error("very suspicious: empty DAG for %s", pr_repo)
release_beg = release_repo_offsets[release_pos]
release_end = release_repo_offsets[release_pos + 1]
ownership = mark_dag_access(
hashes, vertexes, edges, ordered_release_shas[release_beg:release_end], True)
unmatched = np.flatnonzero(ownership == (release_end - release_beg))
if len(unmatched) > 0:
hashes = np.delete(hashes, unmatched)
ownership = np.delete(ownership, unmatched)
if len(hashes) == 0:
release_pos += 1
continue
pr_beg = pr_repo_offsets[pr_pos]
pr_end = pr_repo_offsets[pr_pos + 1]
merge_hashes = pr_merge_hashes[pr_beg:pr_end]
merges_found = searchsorted_inrange(hashes, merge_hashes)
found_mask = hashes[merges_found] == merge_hashes
found_releases = releases[release_columns].take(
release_repo_order[release_beg:release_end]
[ownership[merges_found[found_mask]]])
if not found_releases.empty:
found_releases[Release.published_at.name] = np.maximum(
found_releases[Release.published_at.name].values,
pr_merged_at[pr_beg:pr_end][found_mask])
found_releases[index_name] = pr_node_ids[pr_beg:pr_end][found_mask]
released_prs.append(found_releases)
release_pos += 1
pr_pos += 1
elif release_repo < pr_repo:
release_pos += 1
else:
pr_pos += 1
if released_prs:
released_prs = pd.concat(released_prs, copy=False)
released_prs.set_index([index_name, Release.repository_full_name.name], inplace=True)
else:
released_prs = new_released_prs_df()
return postprocess_datetime(released_prs)
@classmethod
@sentry_span
async def _find_dead_merged_prs(cls, prs: pd.DataFrame) -> pd.DataFrame:
assert prs.index.nlevels == 2
dead_mask = prs["dead"].values.astype(bool, copy=False)
node_ids = prs.index.get_level_values(0).values
repos = prs.index.get_level_values(1).values
dead_prs = [
(pr_id, None, None, None, None, None, repo, ReleaseMatch.force_push_drop)
for repo, pr_id in zip(repos[dead_mask], node_ids[dead_mask])
]
return new_released_prs_df(dead_prs)
@classmethod
@sentry_span
async def _fetch_labels(cls,
node_ids: Iterable[int],
df: Optional[pd.DataFrame],
meta_ids: Tuple[int, ...],
mdb: Database,
) -> Dict[int, List[str]]:
if df is not None:
labels = {}
for node_id, name in zip(df.index.get_level_values(0).values,
df[PullRequestLabel.name.name].values):
labels.setdefault(node_id, []).append(name)
return labels
rows = await mdb.fetch_all(
select([PullRequestLabel.pull_request_node_id, func.lower(PullRequestLabel.name)])
.where(and_(PullRequestLabel.pull_request_node_id.in_(node_ids),
PullRequestLabel.acc_id.in_(meta_ids))))
labels = {}
for row in rows:
node_id, label = row[0], row[1]
labels.setdefault(node_id, []).append(label)
return labels
class ReleaseToPullRequestMapper:
"""Mapper from releases to pull requests."""
release_loader = ReleaseLoader
@classmethod
@sentry_span
async def map_releases_to_prs(cls,
repos: Collection[str],
branches: pd.DataFrame,
default_branches: Dict[str, str],
time_from: datetime,
time_to: datetime,
authors: Collection[str],
mergers: Collection[str],
jira: JIRAFilter,
release_settings: ReleaseSettings,
logical_settings: LogicalRepositorySettings,
updated_min: Optional[datetime],
updated_max: Optional[datetime],
pdags: Optional[Dict[str, DAG]],
prefixer: Prefixer,
account: int,
meta_ids: Tuple[int, ...],
mdb: Database,
pdb: Database,
rdb: Database,
cache: Optional[aiomcache.Client],
pr_blacklist: Optional[BinaryExpression] = None,
pr_whitelist: Optional[BinaryExpression] = None,
truncate: bool = True,
precomputed_observed: Optional[Tuple[
np.ndarray, np.ndarray]] = None,
) -> Union[Tuple[pd.DataFrame,
pd.DataFrame,
ReleaseSettings,
Dict[str, ReleaseMatch],
Dict[str, DAG],
Tuple[np.ndarray, np.ndarray]],
pd.DataFrame]:
"""Find pull requests which were released between `time_from` and `time_to` but merged before \
`time_from`.
The returned DataFrame-s with releases are already with logical repositories.
:param authors: Required PR commit_authors.
:param mergers: Required PR mergers.
:param truncate: Do not load releases after `time_to`.
:param precomputed_observed: Saved all_observed_commits and all_observed_repos from \
the previous identical invocation. \
See PullRequestMiner._mine().
:return: pd.DataFrame with found PRs that were created before `time_from` and released \
between `time_from` and `time_to` \
(the rest exists if `precomputed_observed` is None) + \
pd.DataFrame with the discovered releases between \
`time_from` and `time_to` (today if not `truncate`) \
+\
holistic release settings that enforce the happened release matches in \
[`time_from`, `time_to`] \
+ \
`matched_bys` so that we don't have to compute that mapping again. \
+ \
commit DAGs that contain the relevant releases. \
+ \
observed commits and repositories (precomputed cache for \
the second call if needed).
"""
assert isinstance(time_from, datetime)
assert isinstance(time_to, datetime)
assert isinstance(mdb, Database)
assert isinstance(pdb, Database)
assert isinstance(pr_blacklist, (BinaryExpression, type(None)))
assert isinstance(pr_whitelist, (BinaryExpression, type(None)))
assert (updated_min is None) == (updated_max is None)
if precomputed_observed is None:
(
all_observed_commits, all_observed_repos,
releases_in_time_range, release_settings, matched_bys, dags,
) = await cls._map_releases_to_prs_observe(
repos, branches, default_branches, time_from, time_to,
release_settings, logical_settings, pdags,
prefixer, account, meta_ids, mdb, pdb, rdb, cache, truncate)
else:
all_observed_commits, all_observed_repos = precomputed_observed
if len(all_observed_commits):
prs = await cls._find_old_released_prs(
all_observed_commits, all_observed_repos, time_from, authors, mergers, jira,
updated_min, updated_max, pr_blacklist, pr_whitelist,
prefixer, meta_ids, mdb, cache)
else:
prs = pd.DataFrame(columns=[c.name for c in PullRequest.__table__.columns
if c.name != PullRequest.node_id.name])
prs.index = | pd.Index([], name=PullRequest.node_id.name) | pandas.Index |
from __future__ import print_function, absolute_import, division
import pandas as pd
import numpy as np
import argparse
import json
import math
import re
import os
import sys
import csv
import socket # -- ip checks
import seaborn as sns
import matplotlib.pyplot as plt
from jinja2 import Environment, PackageLoader
# --- functions ---
def get_config(config):
""" convert json config file into a python dict """
with open(config, 'r') as f:
config_dict = json.load(f)[0]
return config_dict
# -- load data --
def get_dataframe(config):
""" load csv into python dataframe """
df = pd.read_csv(config['input_file'], low_memory=False)
return df
# --
def get_overview(config, df):
""" return details of the dataframe and any issues found """
overview_msg = {}
df = df.copy()
column_cnt = len(df.columns)
try:
df['EVENT_TIMESTAMP'] = pd.to_datetime(df[config['required_features']['EVENT_TIMESTAMP']], infer_datetime_format=True)
date_range = df['EVENT_TIMESTAMP'].min().strftime('%Y-%m-%d') + ' to ' + df['EVENT_TIMESTAMP'].max().strftime('%Y-%m-%d')
day_cnt = (df['EVENT_TIMESTAMP'].max() - df['EVENT_TIMESTAMP'].min()).days
except:
overview_msg[config['required_features']['EVENT_TIMESTAMP']] = " Unable to convert" + config['required_features']['EVENT_TIMESTAMP'] + " to timestamp"
date_range = ""
day_cnt = 0
record_cnt = df.shape[0]
memory_size = df.memory_usage(index=True).sum()
record_size = round(float(memory_size) / record_cnt,2)
n_dupe = record_cnt - len(df.drop_duplicates())
if record_cnt <= 10000:
overview_msg["Record count"] = "A minimum of 10,000 rows are required to train the model, your dataset contains " + str(record_cnt)
overview_stats = {
"Record count" : "{:,}".format(record_cnt) ,
"Column count" : "{:,}".format(column_cnt),
"Duplicate count" : "{:,}".format(n_dupe),
"Memory size" : "{:.2f}".format(memory_size/1024**2) + " MB",
"Record size" : "{:,}".format(record_size) + " bytes",
"Date range" : date_range,
"Day count" : "{:,}".format(day_cnt) + " days",
"overview_msg" : overview_msg,
"overview_cnt" : len(overview_msg)
}
return df, overview_stats
def set_feature(row, config):
""" sets the feature type of each variable in the file, identifies features with issues
as well as the required features. this is the first pass of rules
"""
rulehit = 0
feature = ""
message = ""
required_features = config['required_features']
# -- assign numeric --
if ((row._dtype in ['float64', 'int64']) and (row['nunique'] > 1)):
feature = "numeric"
message = "(" + "{:,}".format(row['nunique']) + ") unique"
# -- assign categorical --
if ((row._dtype == 'object') and ( row.nunique_pct <= 0.75)):
feature = "categorical"
message = "(" + "{:.2f}".format(row.nunique_pct*100) + "%) unique"
# -- assign categorical to numerics --
if ((row._dtype in ['float64', 'int64']) and ( row['nunique'] <= 1024 )):
feature = "categorical"
message = "(" + "{:,}".format(row['nunique']) + ") unique"
# -- assign binary --
if (row['nunique'] == 2 ):
feature = "categorical"
message = "(" + "{:}".format(row['nunique']) + ") binary"
# -- single value --
if (row['nunique'] == 1):
rulehit = 1
feature = "exclude"
message = "(" + "{:}".format(row['nunique']) + ") single value"
# -- null pct --
if (row.null_pct >= 0.50 and (rulehit == 0)):
rulehit = 1
feature = "exclude"
message = "(" + "{:.2f}".format(row.null_pct*100) + "%) missing "
# -- categorical w. high % unique
if ((row._dtype == 'object') and ( row.nunique_pct >= 0.75)) and (rulehit == 0):
rulehit = 1
feature = "exclude"
message = "(" + "{:.2f}".format(row.nunique_pct*100) + "%) unique"
# -- numeric w. extreeme % unique
if ((row._dtype in ['float64', 'int64']) and ( row.nunique_pct >= 0.95)) and (rulehit == 0):
rulehit = 1
feature = "exclude"
message = "(" + "{:.2f}".format(row.nunique_pct*100) + "%) unique"
if ('EMAIL_ADDRESS' in required_features) and (row._column == required_features['EMAIL_ADDRESS']):
feature = "EMAIL_ADDRESS"
if ('IP_ADDRESS' in required_features) and (row._column == required_features['IP_ADDRESS']):
feature = "IP_ADDRESS"
if row._column == required_features['EVENT_TIMESTAMP']:
feature = "EVENT_TIMESTAMP"
if row._column == required_features['EVENT_LABEL']:
feature = "EVENT_LABEL"
return feature, message
def get_label(config, df):
""" returns stats on the label and performs intial label checks """
message = {}
label = config['required_features']['EVENT_LABEL']
label_summary = df[label].value_counts()
rowcnt = df.shape[0]
label_dict = {
"label_field" : label,
"label_values" : df[label].unique(),
"label_dtype" : label_summary.dtype,
"fraud_rate" : "{:.2f}".format((label_summary.min()/label_summary.sum())*100),
"fraud_label": str(label_summary.idxmin()),
"fraud_count": label_summary.min(),
"legit_rate" : "{:.2f}".format((label_summary.max()/label_summary.sum())*100),
"legit_count": label_summary.max(),
"legit_label": str(label_summary.idxmax()),
"null_count" : "{:,}".format(df[label].isnull().sum(axis = 0)),
"null_rate" : "{:.2f}".format(df[label].isnull().sum(axis = 0)/rowcnt),
}
"""
label checks
"""
if label_dict['fraud_count'] <= 500:
message['fraud_count'] = "Fraud count " + str(label_dict['fraud_count']) + " is less than 500\n"
if df[label].isnull().sum(axis = 0)/rowcnt >= 0.01:
message['label_nulls'] = "Your LABEL column contains " + label_dict["null_count"] +" a significant number of null values"
label_dict['warnings'] = len(message)
return label_dict, message
def get_partition(config, df):
""" evaluates your dataset partitions and checks the distribution of fraud lables """
df = df.copy()
row_count = df.shape[0]
required_features = config['required_features']
message = {}
stats ={}
try:
df['_event_timestamp'] = pd.to_datetime(df[required_features['EVENT_TIMESTAMP']])
df['_dt'] = pd.to_datetime(df['_event_timestamp'].dt.date)
except:
message['_event_timestamp'] = "could not parse " + required_features['EVENT_TIMESTAMP'] + " into a date or timestamp object"
df['_event_timestamp'] = df[required_features['EVENT_TIMESTAMP']]
df['_dt'] = df['_event_timestamp']
label_summary = df[required_features['EVENT_LABEL']].value_counts()
legit_label = label_summary.idxmax()
fraud_label = label_summary.idxmin()
df = df.sort_values(by=['_event_timestamp']).reset_index(drop=True)
ctab = pd.crosstab(df['_dt'].astype(str), df[required_features['EVENT_LABEL']]).reset_index()
stats['labels'] = ctab['_dt'].tolist()
stats['legit_rates'] = ctab[legit_label].tolist()
stats['fraud_rates'] = ctab[fraud_label].tolist()
# -- set partitions --
df['partition'] = 'training'
df.loc[math.ceil(row_count*.7):math.ceil(row_count*.85),'partition'] = 'evaluation'
df.loc[math.ceil(row_count*.85):,'partition'] = 'testing'
message = ""
return stats, message
def get_stats(config, df):
""" generates the key column analysis statistics calls set_features function """
df = df.copy()
rowcnt = len(df)
df_s1 = df.agg(['count', 'nunique',]).transpose().reset_index().rename(columns={"index":"_column"})
df_s1['count'] = df_s1['count'].astype('int64')
df_s1['nunique'] = df_s1['nunique'].astype('int64')
df_s1["null"] = (rowcnt - df_s1["count"]).astype('int64')
df_s1["not_null"] = rowcnt - df_s1["null"]
df_s1["null_pct"] = df_s1["null"] / rowcnt
df_s1["nunique_pct"] = df_s1['nunique'] / rowcnt
dt = pd.DataFrame(df.dtypes).reset_index().rename(columns={"index":"_column", 0:"_dtype"})
df_stats = pd.merge(dt, df_s1, on='_column', how='inner')
df_stats = df_stats.round(4)
df_stats[['_feature', '_message']] = df_stats.apply(lambda x: set_feature(x,config), axis = 1, result_type="expand")
return df_stats, df_stats.loc[df_stats["_feature"]=="exclude"]
def get_email(config, df):
""" gets the email statisitcs and performs email checks """
message = {}
required_features = config['required_features']
email = required_features['EMAIL_ADDRESS']
email_recs = df.shape[0]
email_null = df[email].isna().sum()
emails = pd.Series(pd.unique(df[email].values))
email_unique = len(emails)
email_valid = df[email].str.count('\w+\@\w+').sum()
email_invalid = email_recs - ( email_valid + email_null)
df['domain'] = df[email].str.split('@').str[1]
top_10 = df['domain'].value_counts().head(10)
top_dict = top_10.to_dict()
label_summary = df[required_features['EVENT_LABEL']].value_counts()
fraud_label = label_summary.idxmin()
legit_label = label_summary.idxmax()
ctab = pd.crosstab(df['domain'], df[required_features['EVENT_LABEL']],).reset_index()
ctab['tot'] = ctab[fraud_label] + ctab[legit_label]
ctab['fraud_rate'] = ctab[fraud_label]/ctab['tot']
ctab = ctab.sort_values(['tot'],ascending=False)
top_n= ctab.head(10)
domain_count = df['domain'].nunique()
domain_list = top_n['domain'].tolist()
domain_fraud = top_n[fraud_label].tolist()
domain_legit = top_n[legit_label].tolist()
# -- email checks --
if email_unique <= 100:
message['unique_count'] = "Low number of unique emails: " + str(email_unique)
if email_null/len(df) >= 0.20:
message['null_email'] = "High percentage of null emails: " + '{0: >#016.2f}'.format(email_null/len(df)) + "%"
if email_invalid/len(df) >= 0.5:
message['invalid_email'] = "High number of invalid emails: " + '{0: >#016.2f}'.format(email_invalid/len(df)) + "%"
domain_list = list(top_dict.keys())
#domain_value = list(top_dict.values())
email_dict = {
"email_addr" : email,
"email_recs" : "{:,}".format(email_recs),
"email_null" : "{:,}".format(email_null),
"email_pctnull" : "{:.2f}".format((email_null/email_recs)*100),
"email_unique" : "{:,}".format(email_unique),
"email_pctunq" : "{:.2f}".format((email_unique/email_recs)*100),
"email_valid" : "{:,}".format(email_valid),
"email_invalid" : "{:,}".format(email_invalid),
"email_warnings": len(message),
"domain_count" : "{:,}".format(domain_count),
"domain_list" : domain_list,
"domain_fraud" : domain_fraud,
"domain_legit" : domain_legit
}
return email_dict, message
def valid_ip(ip):
""" checks to insure we have a valid ip address """
try:
parts = ip.split('.')
return len(parts) == 4 and all(0 <= int(part) < 256 for part in parts)
except ValueError:
return False # one of the 'parts' not convertible to integer
except (AttributeError, TypeError):
return False # `ip` isn't even a string
def get_ip_address(config, df):
""" gets ip address statisitcs and performs ip address checks """
message = {}
required_features = config['required_features']
ip = required_features['IP_ADDRESS']
ip_recs = df.shape[0] - df[ip].isna().sum()
ip_null = df[ip].isna().sum()
ips = pd.Series(pd.unique(df[ip].values))
ip_unique = len(ips)
df['_ip'] = df[ip].apply(valid_ip)
ip_valid = df['_ip'].sum()
ip_invalid = ip_recs - ip_valid
print(ip_null)
label_summary = df[required_features['EVENT_LABEL']].value_counts()
fraud_label = label_summary.idxmin()
legit_label = label_summary.idxmax()
ctab = pd.crosstab(df[required_features['IP_ADDRESS']], df[required_features['EVENT_LABEL']],).reset_index()
ctab['tot'] = ctab[fraud_label] + ctab[legit_label]
ctab['fraud_rate'] = ctab[fraud_label]/ctab['tot']
ctab = ctab.sort_values(['tot'],ascending=False)
top_n= ctab.head(10)
ip_list = top_n[ip].tolist()
ip_fraud = top_n[fraud_label].tolist()
ip_legit = top_n[legit_label].tolist()
# -- ip checks --
if ip_unique <= 100:
message['unique_count'] = "Low number of unique ip addresses: " + str(ip_unique)
if ip_null/len(df) >= 0.20:
message['null_ip'] = "High percentage of null ip addresses: " + '{0: >#016.2f}'.format(ip_null/len(df)) + "%"
if ip_invalid/len(df) >= 0.5:
message['invalid_ip'] = "High number of invalid ip addresses: " + '{0: >#016.2f}'.format(ip_invalid/len(df)) + "%"
ip_dict = {
"ip_addr" : ip,
"ip_recs" : "{:,}".format(ip_recs),
"ip_null" : "{:,}".format(ip_null),
"ip_pctnull" : "{:.2f}".format((ip_null/ip_recs)*100),
"ip_unique" : "{:,}".format(ip_unique),
"ip_pctunq" : "{:.2f}".format((ip_unique/ip_recs)*100),
"ip_valid" : "{:,}".format(ip_valid),
"ip_invalid" : "{:,}".format(ip_invalid),
"ip_warnings": len(message),
"ip_list" : ip_list,
"ip_fraud" : ip_fraud,
"ip_legit" : ip_legit
}
return ip_dict, message
def col_stats(df, target, column):
""" generates column statisitcs for categorical columns """
legit = df[target].value_counts().idxmax()
fraud = df[target].value_counts().idxmin()
try:
cat_summary = | pd.crosstab(df[column],df[target]) | pandas.crosstab |
"""
Iterating through all sequences in a data directory, computing data stats for
each sequence (#instances, #activities, ...), cleaning stats and saving them
"""
import os
import copy
from tqdm import tqdm
import json
import argparse
import numpy as np
import pandas as pd
def get_args():
""" Reading command line arguments """
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--data_path",
required=True,
help="Directory with the sequences to extract the stats from. Relative to root directory",
)
cfg = parser.parse_args()
assert os.path.exists(os.path.join(os.getcwd(), cfg.data_path)), \
f"Data path {cfg.data_path} does not exist..."
return cfg
class DatasetStats:
"""
Object for computing and accumulating the dataset stats
"""
def __init__(self, data_path):
""" """
self.data_path = data_path
self.gt_stats = {
"num_frames": [],
"num_instances": [],
"freq_objects": []
}
self.info_stats = {
"num_pixels": [],
"bbox_size": [],
"pixel_visibility": [],
"bbox_visibility": []
}
self.all_stats = {}
return
# NOTE: Will the objects always be the same for all frames, or will that eventually change?
def compute_gt_stats(self, file):
"""
Computing some stats from the scene_gt.json file
"""
assert file.split("_")[-1] == "gt.json", f"Wrong gt file {os.path.basename(file)}..."
with open(file, "r") as f:
gt_data = json.load(f)
# fetching object ids and num objects for sequence
data = gt_data[list(gt_data.keys())[0]]
obj_ids = [obj["obj_id"] for obj in data]
unique_ids, counts = np.unique(obj_ids, return_counts=True)
num_instances = len(data)
num_frames = len(gt_data)
# accumulating
gt_stats = copy.deepcopy(self.gt_stats)
gt_stats["num_frames"] = num_frames
gt_stats["num_instances"] = num_instances
gt_stats["freq_objects"].append({int(id): int(count) for id, count in zip(unique_ids, counts)})
return gt_stats
def compute_info_stats(self, file):
"""
Computing some stats from the scene_gt_info.json file
"""
assert file.split("_")[-1] == "info.json", f"Wrong gt_info file {os.path.basename(file)}..."
with open(file, "r") as f:
info_data = json.load(f)
# NOTE: What frequency do we want, framewise or sequence wise?. Let"s go sequencewise for now
# fetching framewise pixel and bbox information
cur_pixels, cur_bbox, cur_pixel_vis, cur_bbox_vis = [], [], [], []
for _, data in info_data.items():
for obj in data:
cur_pixels.append(obj["px_count_all"])
cur_bbox.append(obj["bbox_obj"][2] * obj["bbox_obj"][3])
cur_pixel_vis.append(obj["visib_fract"])
cur_bbox_vis.append(
self._get_bbox_vis(bbox=obj["bbox_obj"], bbox_vis=obj["bbox_visib"])
)
info_stats = copy.deepcopy(self.info_stats)
info_stats["num_pixels"].append( np.mean(cur_pixels) )
info_stats["bbox_size"].append( np.mean(cur_bbox) )
info_stats["pixel_visibility"].append( np.mean(cur_pixel_vis) )
info_stats["bbox_visibility"].append( np.mean(cur_bbox_vis) )
return info_stats
def accumulate_stats(self, seq_path):
"""
Computing stats for sequence
"""
# files
seq_name = os.path.basename(seq_path)
scene = seq_name.split("_")[1]
gt_file = os.path.join(seq_path, "scene_gt.json")
info_file = os.path.join(seq_path, "scene_gt_info.json")
# computing statistics from each of the files
gt_stats = self.compute_gt_stats(gt_file)
info_stats = self.compute_info_stats(info_file)
# aggregating
seq_num = len(self.all_stats)
cur_stats = {
"scene": scene,
"seq_name": seq_name,
"gt_stats": gt_stats,
"info_stats": info_stats
}
self.all_stats[str(seq_num)] = cur_stats
return
def compute_avg_stats(self, scene=None):
""" Computing overall average stats considering all sequences """
stats = self.all_stats if scene is None else {k: v for k, v in self.all_stats.items() if v["scene"] == scene}
df = pd.DataFrame.from_dict(stats, orient="index")
avg_stats = {}
# frequency of each activity
scene_counts = df["scene"].value_counts().to_dict()
norm_scene_counts = (df["scene"].value_counts() / df["scene"].count()).to_dict()
avg_stats["scene_counts"] = scene_counts
avg_stats["norm_scene_counts"] = norm_scene_counts
# mean/min/max number of frames and instances
frames = [stats[n]["gt_stats"]["num_frames"] for n in stats.keys()]
instances = [stats[n]["gt_stats"]["num_instances"] for n in stats.keys()]
avg_stats["frames"] = self._add_stats(frames)
avg_stats["instances"] = self._add_stats(instances)
# frequency of each object
freqs = [freqs for n in stats.keys() for freqs in stats[n]["gt_stats"]["freq_objects"]]
df = | pd.DataFrame(freqs) | pandas.DataFrame |
import numpy as np
import pandas as pd
import os
import sys
import matplotlib.pyplot as plt
import matplotlib
import sklearn.datasets, sklearn.decomposition
from sklearn.cluster import KMeans
from sklearn_extra.cluster import KMedoids
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
import sklearn_extra
from scipy import stats
from scipy.stats import kurtosis, skew
from collections import defaultdict
import statistics
from itertools import chain
from scipy.interpolate import interp1d
from collections import defaultdict
from nested_dict import nested_dict
import clustring_kmean_forced
def kmedoid_clusters(path_test):
editable_data_path =os.path.join(path_test, 'editable_values.csv')
editable_data = pd.read_csv(editable_data_path, header=None, index_col=0, squeeze=True).to_dict()[1]
city = editable_data['city']
save_path = os.path.join(path_test, str('Scenario Generation') , city)
representative_days_path = os.path.join(save_path,'Operation Representative days')
if not os.path.exists(representative_days_path):
os.makedirs(representative_days_path)
folder_path = os.path.join(path_test,str(city))
range_data = ['low','medium','high']
scenario_genrated = {}
scenario_probability = defaultdict(list)
scenario_number = {}
num_scenario = 0
i_solar= range_data[1]
i_wind= range_data[1]
i_emission= range_data[1]
#laod the energy deamnd, solar, wind, and electricity emissions from scenario generation file
for i_demand in range_data:
if i_demand=='low':
p_demand = 0.277778
elif i_demand=='medium':
p_demand = 0.444444
elif i_demand=='high':
p_demand = 0.277778
for day in range(365):
scenario_probability['D:'+i_demand].append(p_demand)
scenario_number['D:'+i_demand]= num_scenario
num_scenario = num_scenario + 1
scenario_genrated['D:'+i_demand] = pd.read_csv(os.path.join(save_path, 'D_'+i_demand+'_S_'+i_solar+'_W_'+i_wind+'_C_'+i_emission+'.csv'), header=None)
features_scenarios = defaultdict(list)
features_scenarios_list = []
features_probability_list = []
features_scenarios_nested = nested_dict()
k=0
days= 365
for scenario in scenario_genrated.keys():
scenario_genrated[scenario]=scenario_genrated[scenario]
for i in range(days):
if i==0:
data = scenario_genrated[scenario][1:25]
else:
data = scenario_genrated[scenario][25+(i-1)*24:25+(i)*24]
#Total electricity, heating, solar, wind, EF.
daily_list =list(chain(data[0].astype('float', copy=False),data[1].astype('float', copy=False)))
features_scenarios[k*days+i] = daily_list
features_scenarios_nested[scenario][i] = features_scenarios[k*days+i]
features_scenarios_list.append(features_scenarios[k*days+i])
features_probability_list.append(scenario_probability[scenario][i])
k = k+1
A = np.asarray(features_scenarios_list)
#Convert the dictionary of features to Series
standardization_data = StandardScaler()
A_scaled = standardization_data.fit_transform(A)
inertia_list = []
search_optimum_cluster = editable_data['Search optimum clusters'] # if I want to search for the optimum number of clusters: 1 is yes, 0 is no
cluster_range = range(2,20,1)
SMALL_SIZE = 10
MEDIUM_SIZE = 12
BIGGER_SIZE = 14
plt.rcParams['axes.facecolor'] = 'white'
plt.rcParams['axes.grid'] = False
plt.rcParams['axes.edgecolor'] = 'black'
if search_optimum_cluster=='yes':
print('Defining the optimum number of clusters: ')
fig, ax = plt.subplots(figsize=(12, 6))
for cluster_numbers in cluster_range:
kmedoids = KMedoids(n_clusters=cluster_numbers, init="random",max_iter=1000,random_state=0).fit(A_scaled)
inertia_list.append(kmedoids.inertia_)
plt.scatter(cluster_numbers,kmedoids.inertia_)
print('Cluster number:', cluster_numbers, ' Inertia of the cluster:', int(kmedoids.inertia_))
ax.set_xlabel('Number of clusters',fontsize=BIGGER_SIZE)
ax.set_ylabel('Inertia',fontsize=BIGGER_SIZE)
ax.set_title('The user should use "Elbow method" to select the number of optimum clusters',fontsize=BIGGER_SIZE)
ax.plot(list(cluster_range),inertia_list)
ax.set_xticks(np.arange(2,20,1))
plt.savefig(os.path.join(sys.path[0], 'Inertia vs Clusters.png'),dpi=300,facecolor='w')
plt.close()
print('"Inertia vs Clusters" figure is saved in the directory folder')
print('You can use the figure to select the optimum number of clusters' )
print('You should enter the new optimum number of clusters in EditableFile.csv file and re-run this part')
cluster_numbers= int(editable_data['Cluster numbers'])
kmedoids = KMedoids(n_clusters=cluster_numbers, init="random",max_iter=1000,random_state=4).fit(A_scaled)
#kmedoids = KMedoids(n_clusters=cluster_numbers, init="random",max_iter=1000,random_state=4).fit(scores_pca)
label = kmedoids.fit_predict(A_scaled)
#filter rows of original data
probability_label = defaultdict(list)
index_label = defaultdict(list)
index_label_all = []
filtered_label={}
for i in range(cluster_numbers):
filtered_label[i] = A_scaled[label == i]
index_cluster=np.where(label==i)
if len(filtered_label[i])!=0:
index_cluster = index_cluster[0]
for j in index_cluster:
probability_label[i].append(features_probability_list[j])
index_label[i].append(j)
index_label_all.append(j)
else:
probability_label[i].append(0)
sum_probability = []
for key in probability_label.keys():
sum_probability.append(sum(probability_label[key]))
#print(kmedoids.predict([[0,0,0], [4,4,4]]))
#print(kmedoids.cluster_centers_,kmedoids.cluster_centers_[0],len(kmedoids.cluster_centers_))
A_scaled_list={}
clusters={}
clusters_list = []
label_list = []
data_labels={}
data_all_labels = defaultdict(list)
for center in range(len(kmedoids.cluster_centers_)):
clusters['cluster centers '+str(center)]= kmedoids.cluster_centers_[center]
clusters_list.append(kmedoids.cluster_centers_[center].tolist())
for scenario in range(len(A_scaled)):
A_scaled_list[scenario]=A_scaled[scenario].tolist()
data_all_labels[kmedoids.labels_[scenario]].append(standardization_data.inverse_transform(A_scaled_list[scenario]))
A_scaled_list[scenario].insert(0,kmedoids.labels_[scenario])
data_labels['labels '+str(scenario)]= A_scaled_list[scenario]
label_list.append(A_scaled[scenario].tolist())
df_clusters= pd.DataFrame(clusters)
df_labels = pd.DataFrame(data_labels)
df_clusters.to_csv(os.path.join(representative_days_path , 'cluster_centers_C_'+str(len(kmedoids.cluster_centers_))+'_L_'+str(len(kmedoids.labels_))+'.csv'), index=False)
df_labels.to_csv(os.path.join(representative_days_path , 'labels_C_'+str(len(kmedoids.cluster_centers_))+'_L_'+str(len(kmedoids.labels_))+'.csv'), index=False)
#Reversing PCA using two methods:
#Reversing the cluster centers using method 1 (their results are the same)
Scenario_generated_new = standardization_data.inverse_transform(kmedoids.cluster_centers_)
#print('15 representative days',clusters_reverse[0][0],Scenario_generated_new[0][0],standardization_data.mean_[0],standardization_data.var_[0])
representative_day_all = {}
total_labels = []
represent_gaps = {}
scenario_data = {}
for key in filtered_label.keys():
total_labels.append(len(filtered_label[key]))
#print(len(probability_label[0])) 1990
#print(len(filtered_label[0])) 1990
for representative_day in range(len(Scenario_generated_new)):
represent_gaps = {}
scenario_data = {}
for i in range(48):
if Scenario_generated_new[representative_day][i]<0:
Scenario_generated_new[representative_day][i] = 0
for k in range(2): # 2 uncertain inputs
scenario_data[k] = Scenario_generated_new[representative_day][24*k:24*(k+1)].copy()
min_non_z = np.min(np.nonzero(scenario_data[k]))
max_non_z = np.max(np.nonzero(scenario_data[k]))
represent_gaps[k]= [i for i, x in enumerate(scenario_data[k][min_non_z:max_non_z+1]) if x == 0]
ranges = sum((list(t) for t in zip(represent_gaps[k], represent_gaps[k][1:]) if t[0]+1 != t[1]), [])
iranges = iter(represent_gaps[k][0:1] + ranges + represent_gaps[k][-1:])
#print('Present gaps are: ', representative_day,k, 'gaps', ', '.join([str(n) + '-' + str(next(iranges)) for n in iranges]))
iranges = iter(represent_gaps[k][0:1] + ranges + represent_gaps[k][-1:])
for n in iranges:
next_n = next(iranges)
if (next_n-n) == 0: #for data gaps of 1 hour, get the average value
scenario_data[k][n+min_non_z] = (scenario_data[k][min_non_z+n+1]+scenario_data[k][min_non_z+n-1])/2
elif (next_n-n) > 0 and (next_n-n) <= 6: #for data gaps of 1 hour to 4 hr, use interpolation and extrapolation
f_interpol_short= interp1d([n-1,next_n+1], [scenario_data[k][min_non_z+n-1],scenario_data[k][min_non_z+next_n+1]])
for m in range(n,next_n+1):
scenario_data[k][m+min_non_z] = f_interpol_short(m)
data_represent_days_modified={'Electricity total (kWh)': scenario_data[0],
'Heating (kWh)': scenario_data[1],
'Percent %': round(sum_probability[representative_day]*100/sum(sum_probability),4)}
#print(np.mean(Scenario_generated_new[representative_day][0:24]))
df_represent_days_modified=pd.DataFrame(data_represent_days_modified)
df_represent_days_modified.to_csv(os.path.join(representative_days_path,'Represent_days_modified_'+str(representative_day)+ '.csv'), index=False)
print('cluster evaluation starts')
max_heating_scenarios_nested = nested_dict()
max_electricity_scenarios_nested = nested_dict()
total_heating_scenarios = []
total_electricity_scenarios = []
max_electricity_scenarios_nested_list = defaultdict(list)
max_heating_scenarios_nested_list = defaultdict(list)
accuracy_design_day = 0.99
design_day_heating = []
design_day_electricity = []
representative_day_max = {}
electricity_design_day = {}
heating_design_day = {}
i_demand=range_data[2]
i_solar=range_data[1]
i_wind=range_data[1]
i_emission=range_data[1]
scenario='D:'+i_demand
for day in range(365):
for i in range(24):
k_elect=0
list_k_electricity = []
k_heat=0
list_k_heating = []
for represent in range(cluster_numbers):
representative_day_max[represent] = pd.read_csv(os.path.join(representative_days_path ,'Represent_days_modified_'+str(represent)+'.csv'))
electricity_demand = representative_day_max[represent]['Electricity total (kWh)'] #kWh
heating_demand = representative_day_max[represent]['Heating (kWh)'] #kWh
if features_scenarios_nested[scenario][day][0:24][i]>electricity_demand[i]:
k_elect=1
list_k_electricity.append(k_elect)
k_elect=0
if features_scenarios_nested[scenario][day][24:48][i]>heating_demand[i]:
k_heat=1
list_k_heating.append(k_heat)
k_heat=0
if sum(list_k_electricity)==cluster_numbers: #This hour does not meet by any of the representative days
max_electricity_scenarios_nested_list[i].append(features_scenarios_nested[scenario][day][0:24][i])
total_electricity_scenarios.append(features_scenarios_nested[scenario][day][0:24][i])
if sum(list_k_heating)==cluster_numbers: #This hour does not meet by any of the representative days
max_heating_scenarios_nested_list[i].append(features_scenarios_nested[scenario][day][24:48][i])
total_heating_scenarios.append(features_scenarios_nested[scenario][day][24:48][i])
total_electricity_scenarios.sort(reverse=True)
total_heating_scenarios.sort(reverse=True)
max_electricity_hour = total_electricity_scenarios[35]
max_heating_hour = total_heating_scenarios[2]
print(max_heating_hour,len(total_heating_scenarios),np.min(total_heating_scenarios),np.max(total_heating_scenarios))
i_demand=range_data[2]
i_solar=range_data[1]
i_wind=range_data[1]
i_emission=range_data[1]
scenario='D:'+i_demand+'/S:'+i_solar+'/W:'+i_wind+'/C:'+i_emission
design_day_heating = []
design_day_electricity = []
for i in range(24):
design_day_electricity.append(np.max([j for j in max_electricity_scenarios_nested_list[i] if j<max_electricity_hour]))
print(i,len(max_heating_scenarios_nested_list[i]),max_heating_scenarios_nested_list[i])
heating_dd = [j for j in max_heating_scenarios_nested_list[i] if j<max_heating_hour]
print(heating_dd)
design_day_heating.append(np.max(heating_dd))
representative_day_max = {}
electricity_demand_total = defaultdict(list)
heating_demand_total = defaultdict(list)
heating_demand_max = {}
electricity_demand_max = {}
for represent in range(cluster_numbers):
representative_day_max[represent] = pd.read_csv(os.path.join(representative_days_path ,'Represent_days_modified_'+str(represent)+'.csv'))
electricity_demand = representative_day_max[represent]['Electricity total (kWh)'] #kWh
heating_demand = representative_day_max[represent]['Heating (kWh)'] #kWh
#hours_representative_day= round(sum_probability[representative_day]/sum(sum_probability),4)*8760
heating_demand_max[represent]= np.mean(heating_demand)
electricity_demand_max[represent]= np.mean(electricity_demand)
high_electricity_index = []
high_heating_index = []
high_electricity_value = []
high_heating_value = []
key_max_electricity=max(electricity_demand_max, key=electricity_demand_max.get)
key_max_heating=max(heating_demand_max, key=heating_demand_max.get)
for key, value in max_electricity_scenarios_nested.items():
for inner_key, inner_value in max_electricity_scenarios_nested[key].items():
if inner_value>electricity_demand_max[key_max_electricity]:
high_electricity_index.append(scenario_number[key]*365+inner_key)
high_electricity_value.append(inner_value)
for key, value in max_heating_scenarios_nested.items():
for inner_key, inner_value in max_heating_scenarios_nested[key].items():
if inner_value>heating_demand_max[key_max_heating]:
high_heating_index.append(scenario_number[key]*365+inner_key)
high_heating_value.append(inner_value)
sum_probability.append(0.5*len(total_electricity_scenarios)/len(index_label_all)*365)
sum_probability.append(len(total_heating_scenarios)/len(index_label_all)*365)
filtered_label[cluster_numbers]=len(total_electricity_scenarios)
filtered_label[cluster_numbers+1]=len(total_heating_scenarios)
representative_day = cluster_numbers
data_represent_days_modified={'Electricity total (kWh)': design_day_electricity,
'Heating (kWh)': representative_day_max[key_max_electricity]['Heating (kWh)'],
'Percent %': round(sum_probability[representative_day]*100/sum(sum_probability),4)}
df_represent_days_modified=pd.DataFrame(data_represent_days_modified)
df_represent_days_modified.to_csv(os.path.join(representative_days_path,'Represent_days_modified_'+str(representative_day)+ '.csv'), index=False)
representative_day = cluster_numbers+1
data_represent_days_modified={'Electricity total (kWh)': representative_day_max[key_max_heating]['Electricity total (kWh)'],
'Heating (kWh)': design_day_heating,
'Percent %': round(sum_probability[representative_day]*100/sum(sum_probability),4)}
df_represent_days_modified=pd.DataFrame(data_represent_days_modified)
df_represent_days_modified.to_csv(os.path.join(representative_days_path,'Represent_days_modified_'+str(representative_day)+ '.csv'), index=False)
for representative_day in range(len(Scenario_generated_new)):
represent_gaps = {}
scenario_data = {}
for i in range(48): #24*5=120 features in each day
if Scenario_generated_new[representative_day][i]<0:
Scenario_generated_new[representative_day][i] = 0
for k in range(2): # 5 uncertain inputs
scenario_data[k] = Scenario_generated_new[representative_day][24*k:24*(k+1)].copy()
min_non_z = np.min(np.nonzero(scenario_data[k]))
max_non_z = np.max(np.nonzero(scenario_data[k]))
represent_gaps[k]= [i for i, x in enumerate(scenario_data[k][min_non_z:max_non_z+1]) if x == 0]
ranges = sum((list(t) for t in zip(represent_gaps[k], represent_gaps[k][1:]) if t[0]+1 != t[1]), [])
iranges = iter(represent_gaps[k][0:1] + ranges + represent_gaps[k][-1:])
#print('Present gaps are: ', representative_day,k, 'gaps', ', '.join([str(n) + '-' + str(next(iranges)) for n in iranges]))
iranges = iter(represent_gaps[k][0:1] + ranges + represent_gaps[k][-1:])
for n in iranges:
next_n = next(iranges)
if (next_n-n) == 0: #for data gaps of 1 hour, get the average value
scenario_data[k][n+min_non_z] = (scenario_data[k][min_non_z+n+1]+scenario_data[k][min_non_z+n-1])/2
elif (next_n-n) > 0 and (next_n-n) <= 6: #for data gaps of 1 hour to 4 hr, use interpolation and extrapolation
f_interpol_short= interp1d([n-1,next_n+1], [scenario_data[k][min_non_z+n-1],scenario_data[k][min_non_z+next_n+1]])
for m in range(n,next_n+1):
scenario_data[k][m+min_non_z] = f_interpol_short(m)
data_represent_days_modified={'Electricity total (kWh)': scenario_data[0],
'Heating (kWh)': scenario_data[1],
'Percent %': round(sum_probability[representative_day]*100/sum(sum_probability),4)}
#print(np.mean(Scenario_generated_new[representative_day][0:24]))
df_represent_days_modified= | pd.DataFrame(data_represent_days_modified) | pandas.DataFrame |
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import os
import pandas
import numpy as np
import pyarrow
import pytest
import re
from modin.config import IsExperimental, Engine, StorageFormat
from modin.pandas.test.utils import io_ops_bad_exc
from .utils import eval_io, ForceOmnisciImport, set_execution_mode, run_and_compare
from pandas.core.dtypes.common import is_list_like
IsExperimental.put(True)
Engine.put("native")
StorageFormat.put("omnisci")
import modin.pandas as pd
from modin.pandas.test.utils import (
df_equals,
bool_arg_values,
to_pandas,
test_data_values,
test_data_keys,
generate_multiindex,
eval_general,
df_equals_with_non_stable_indices,
)
from modin.utils import try_cast_to_pandas
from modin.experimental.core.execution.native.implementations.omnisci_on_native.partitioning.partition_manager import (
OmnisciOnNativeDataframePartitionManager,
)
from modin.experimental.core.execution.native.implementations.omnisci_on_native.df_algebra import (
FrameNode,
)
@pytest.mark.usefixtures("TestReadCSVFixture")
class TestCSV:
from modin import __file__ as modin_root
root = os.path.dirname(
os.path.dirname(os.path.abspath(modin_root)) + ".."
) # root of modin repo
boston_housing_names = [
"index",
"CRIM",
"ZN",
"INDUS",
"CHAS",
"NOX",
"RM",
"AGE",
"DIS",
"RAD",
"TAX",
"PTRATIO",
"B",
"LSTAT",
"PRICE",
]
boston_housing_dtypes = {
"index": "int64",
"CRIM": "float64",
"ZN": "float64",
"INDUS": "float64",
"CHAS": "float64",
"NOX": "float64",
"RM": "float64",
"AGE": "float64",
"DIS": "float64",
"RAD": "float64",
"TAX": "float64",
"PTRATIO": "float64",
"B": "float64",
"LSTAT": "float64",
"PRICE": "float64",
}
def test_usecols_csv(self):
"""check with the following arguments: names, dtype, skiprows, delimiter"""
csv_file = os.path.join(self.root, "modin/pandas/test/data", "test_usecols.csv")
for kwargs in (
{"delimiter": ","},
{"sep": None},
{"skiprows": 1, "names": ["A", "B", "C", "D", "E"]},
{"dtype": {"a": "int32", "e": "string"}},
{"dtype": {"a": np.dtype("int32"), "b": np.dtype("int64"), "e": "string"}},
):
eval_io(
fn_name="read_csv",
md_extra_kwargs={"engine": "arrow"},
# read_csv kwargs
filepath_or_buffer=csv_file,
**kwargs,
)
def test_housing_csv(self):
csv_file = os.path.join(self.root, "examples/data/boston_housing.csv")
for kwargs in (
{
"skiprows": 1,
"names": self.boston_housing_names,
"dtype": self.boston_housing_dtypes,
},
):
eval_io(
fn_name="read_csv",
md_extra_kwargs={"engine": "arrow"},
# read_csv kwargs
filepath_or_buffer=csv_file,
**kwargs,
)
def test_time_parsing(self):
csv_file = os.path.join(
self.root, "modin/pandas/test/data", "test_time_parsing.csv"
)
for kwargs in (
{
"skiprows": 1,
"names": [
"timestamp",
"symbol",
"high",
"low",
"open",
"close",
"spread",
"volume",
],
"parse_dates": ["timestamp"],
"dtype": {"symbol": "string"},
},
):
rp = pandas.read_csv(csv_file, **kwargs)
rm = pd.read_csv(csv_file, engine="arrow", **kwargs)
with ForceOmnisciImport(rm):
rm = to_pandas(rm)
df_equals(rm["timestamp"].dt.year, rp["timestamp"].dt.year)
df_equals(rm["timestamp"].dt.month, rp["timestamp"].dt.month)
df_equals(rm["timestamp"].dt.day, rp["timestamp"].dt.day)
def test_csv_fillna(self):
csv_file = os.path.join(self.root, "examples/data/boston_housing.csv")
for kwargs in (
{
"skiprows": 1,
"names": self.boston_housing_names,
"dtype": self.boston_housing_dtypes,
},
):
eval_io(
fn_name="read_csv",
md_extra_kwargs={"engine": "arrow"},
comparator=lambda df1, df2: df_equals(
df1["CRIM"].fillna(1000), df2["CRIM"].fillna(1000)
),
# read_csv kwargs
filepath_or_buffer=csv_file,
**kwargs,
)
@pytest.mark.parametrize("null_dtype", ["category", "float64"])
def test_null_col(self, null_dtype):
csv_file = os.path.join(
self.root, "modin/pandas/test/data", "test_null_col.csv"
)
ref = pandas.read_csv(
csv_file,
names=["a", "b", "c"],
dtype={"a": "int64", "b": "int64", "c": null_dtype},
skiprows=1,
)
ref["a"] = ref["a"] + ref["b"]
exp = pd.read_csv(
csv_file,
names=["a", "b", "c"],
dtype={"a": "int64", "b": "int64", "c": null_dtype},
skiprows=1,
)
exp["a"] = exp["a"] + exp["b"]
# df_equals cannot compare empty categories
if null_dtype == "category":
ref["c"] = ref["c"].astype("string")
with ForceOmnisciImport(exp):
exp = to_pandas(exp)
exp["c"] = exp["c"].astype("string")
df_equals(ref, exp)
def test_read_and_concat(self):
csv_file = os.path.join(self.root, "modin/pandas/test/data", "test_usecols.csv")
ref1 = pandas.read_csv(csv_file)
ref2 = pandas.read_csv(csv_file)
ref = | pandas.concat([ref1, ref2]) | pandas.concat |
# Title: Data cleaning for improved NEWS2 paper
# Author: <NAME>
# Started: 2020-07-14
import os
from joblib import dump
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
verbose = False
raw = {}
for r in ['bloods', 'outcomes', 'vitals']:
raw[r] = pd.read_csv(os.path.join('data', 'raw', '2020-06-30 Updated 1500',
'1593424203.7842345_AST', r + '.csv'),
low_memory=False)
# Outcomes --------------------------------------------------------------------
oc = raw['outcomes'].rename(columns={'patient_pseudo_id': 'pid',
'Sx Date': 'symp_onset',
'Primary Endpoint': 'primary_end',
'Death Date': 'death_date',
'ITU date': 'itu_date',
'Admit Date': 'admit_date'})
oc.columns = [x.lower() for x in oc.columns]
# Derive BAME
oc['bame'] = np.select([oc['ethnicity'].isin(['Black', 'Asian']),
oc['ethnicity'] == 'Caucasian',
True],
[True, False, pd.NA])
# Derive outcomes for paper ---------------------------------------------------
for i in ['symp_onset', 'death_date', 'itu_date', 'admit_date']:
oc[i] = pd.to_datetime(oc[i])
# Set index date
# If nosocomial, use symptom onset; otherwise use admission date.
oc['nosoc'] = oc['symp_onset'] > oc['admit_date']
oc['index'] = np.where(oc['nosoc'], oc['symp_onset'], oc['admit_date'])
# Define endpoints
oc['end14'] = oc['index'] + pd.DateOffset(days=14)
oc['end3'] = oc['index'] + pd.DateOffset(hours=72)
# Check patients who died/ICU before symptom onset
oc['y_before_onset'] = ((oc['death_date'] < oc['symp_onset']) |
(oc['itu_date'] < oc['symp_onset']))
# Check patients who died/ICU before admission
oc['y_before_admit'] = ((oc['death_date'] < oc['admit_date']) |
(oc['itu_date'] < oc['admit_date']))
# Remove patients who died before admission
oc = oc[~oc['y_before_admit']]
# Define 14-day outcome
latest_extract = pd.to_datetime('2020-05-18')
oc['event14'] = np.select([oc['death_date'] <= oc['end14'],
oc['itu_date'] <= oc['end14'],
oc['end14'] <= latest_extract,
True],
['death', 'itu', 'other', pd.NA])
oc['y14'] = pd.NA
oc['y14'][(oc['event14'] == 'death') | (oc['event14'] == 'itu')] = 1
oc['y14'][(oc['event14'] == 'other')] = 0
# Define 3-day outcome
oc['event3'] = np.select([oc['death_date'] <= oc['end3'],
oc['itu_date'] <= oc['end3'],
oc['end3'] <= latest_extract,
True],
['death', 'itu', 'other', pd.NA])
oc['y3'] = pd.NA
oc['y3'][(oc['event3'] == 'death') | (oc['event3'] == 'itu')] = 1
oc['y3'][(oc['event3'] == 'other')] = 0
# Define survival outcomes ----------------------------------------------------
# Days until death
oc['td_days'] = (oc['death_date'] - oc['index']).dt.days
oc['td_cens'] = (oc['td_days'].isna()) | (oc['td_days'] > 14)
oc['td_days'] = np.select([oc['td_days'].isna(),
oc['td_days'] > 14,
True],
[14, 14, oc['td_days']])
# Days until ICU
oc['ti_days'] = (oc['itu_date'] - oc['index']).dt.days
oc['ti_cens'] = (oc['ti_days'].isna()) | (oc['ti_days'] > 14)
oc['ti_days'] = np.select([oc['ti_days'].isna(),
oc['ti_days'] > 14,
True],
[14, 14, oc['ti_days']])
# Days until death OR ICU
oc['either_date'] = oc[['itu_date', 'death_date']].min(axis=1)
oc['te_days'] = (oc['either_date'] - oc['index']).dt.days
oc['te_cens'] = (oc['te_days'].isna()) | (oc['te_days'] > 14)
oc['te_days'] = np.select([oc['te_days'].isna(),
oc['te_days'] > 14,
True],
[14, 14, oc['te_days']])
# Check that all patients have passed their 14-day endpoint
print(all((oc['end14'] < latest_extract)))
# Define 'number of comorbidities'
numcom = oc[['copd', 'asthma', 'hf',
'diabetes', 'ihd', 'ckd', 'htn']].sum(axis=1)
numcom[numcom > 4] = 4
oc['numcom'] = numcom
# Vitals ----------------------------------------------------------------------
vt = raw['vitals']
vt['ut'] = pd.to_datetime(vt['RECORDED DATE'])
# Derive GCS score
gcs = vt.loc[:, vt.columns.str.startswith('GCS ')].copy()
for v in gcs:
gcs[v] = gcs[v].str.extract('(\d+)').astype(float)
vt['gcs_score'] = gcs.sum(skipna=False, axis=1)
# Create oxygen measures
vt['oxlt'] = vt['Oxygen Litres']
vt['suppox'] = np.select([vt['Supplemental Oxygen'] == 'No (Air)',
vt['Supplemental Oxygen'] == 'Yes',
True],
[False, True, pd.NA])
vt['oxlt'][vt['Supplemental Oxygen'] == 'No (Air)'] = 0
vt['oxord'] = np.select([vt['oxlt'] == 0,
vt['oxlt'] <= 0.5,
vt['oxlt'] <= 1,
vt['oxlt'] <= 2,
vt['oxlt'] <= 3,
vt['oxlt'] <= 5,
vt['oxlt'] <= 10,
True],
[0, 1, 2, 3, 4, 5, 6, 7])
# Select required measures
vt = vt.rename(columns={'patient_pseudo_id': 'pid',
'Temperature': 'temp',
'Oxygen Saturation': 'oxsat',
'Respiration Rate': 'resp',
'Heart Rate': 'hr',
'Systolic BP': 'sbp',
'Diastolic BP': 'dbp',
'NEWS2 score': 'news2'})
keep = ['pid', 'temp', 'oxsat', 'resp', 'hr', 'sbp', 'dbp', 'news2', 'oxlt',
'suppox', 'oxord', 'gcs_score']
vt = vt[['ut'] + keep]
# Pick first non-missing value following hospital admission and symptom onset
vt = vt.merge(oc, how='inner', on='pid')
vt['latest_measure'] = vt['index'] + pd.DateOffset(hours=48)
vt = vt[(vt['ut'] >= vt['admit_date']) & # After hospital admission
(vt['ut'] >= vt['symp_onset']) & # After sympton onset
(vt['ut'] <= vt['latest_measure'])]
vt = vt.sort_values(['pid', 'ut'],
ascending=True).groupby('pid').first().reset_index()
vt = vt[keep]
# Select items with <30% missing data
pct_miss = vt.isna().sum() / len(vt)
vt = vt[pct_miss.index[pct_miss < 0.3]]
# Bloods ----------------------------------------------------------------------
blood = raw['bloods']
blood = blood.rename(columns={'Unnamed: 0.1': 'null',
'updatetime': 'ut',
'basicobs_itemname_analysed': 'item_raw',
'textualObs': 'notes',
'basicobs_value_analysed': 'value',
'basicobs_unitofmeasure': 'units',
'basicobs_referencelowerlimit': 'lowerlim',
'basicobs_referenceupperlimit': 'upperlim',
'updatetime_raw': 'ut_raw',
'patient_pseudo_id': 'pid'})
blood = blood[['pid', 'ut', 'item_raw', 'value', 'units']]
blood = blood[blood['units'].notna()]
# Clean values
blood['value'][blood['value'].str.contains('\.\.\.')] = pd.NA
blood['value'] = blood['value'].str.replace('>|<', '')
blood['value'] = pd.to_numeric(blood['value'], errors='coerce')
# Clean names
def clean_names(item):
for ch in [' ', '-', '/', '%', ':', '\'', '.']:
if ch in item:
item = item.replace(ch, '')
return(item.lower())
item = blood['item_raw'].apply(clean_names)
item[item.str.contains('hba1c')] = 'hba1c'
item[item == 'creactiveprotein'] = 'crp'
item[item == 'aspartatetransaminase'] = 'art'
item[item == 'wbccount'] = 'wbc'
item[item == 'po2(t)'] = 'po2'
item[item == 'pco2(t)'] = 'pco2'
item[item.str.contains('lymphocytes')] = 'lymphocytes'
blood['item'] = item
# Parse time
blood['ut'] = pd.to_datetime(blood['ut'])
# Select required columns
keepers = ['pid', 'ut', 'item', 'value']
blood = blood[keepers]
# Remove measurements taken before index date, or after 14-day endpoint
blood = blood.merge(oc, how='left', on='pid')
blood['latest_measure'] = blood['index'] + | pd.DateOffset(hours=48) | pandas.DateOffset |
################################################################################
# Module: trnsys.py
# Description: Convert EnergyPlus models to TrnBuild models
# License: MIT, see full license in LICENSE.txt
# Web: https://github.com/samuelduchesne/archetypal
################################################################################
import io
import logging as lg
import os
import re
import shutil
import subprocess
import sys
import time
import numpy as np
import pandas as pd
from geomeppy.geom.polygons import Polygon3D
from path import Path
from tqdm import tqdm
import archetypal.settings as settings
from archetypal import IDF
from archetypal.idfclass.util import hash_model
from archetypal.reportdata import ReportData
from archetypal.schedule import Schedule
from archetypal.utils import angle, check_unique_name, checkStr, log, recursive_len
def convert_idf_to_trnbuild(
idf_file,
weather_file,
window_lib=None,
return_idf=False,
return_b18=True,
return_t3d=False,
return_dck=False,
output_folder=None,
trnsidf_exe=None,
template=None,
log_clear_names=False,
schedule_as_input=True,
**kwargs
):
"""Convert regular IDF file (EnergyPlus) to TRNBuild file (TRNSYS)
There are three optional outputs:
* the path to the modified IDF with the new names, coordinates, etc. of
the IDF objects. It is an input file for EnergyPlus (.idf)
* the path to the TRNBuild file (.b18)
* the path to the TRNBuild input file (.idf)
* the path to the TRNSYS dck file (.dck)
Example:
>>> # Exemple of setting kwargs to be unwrapped in the function
>>> kwargs_dict = {'u_value': 2.5, 'shgc': 0.6, 't_vis': 0.78,
>>> 'tolerance': 0.05, "fframe": 0.0, "uframe": 0.5, 'ordered': True}
>>> # Exemple how to call the function
>>> idf_file = "/file.idf"
>>> window_filepath = "/W74-lib.dat"
>>> convert_idf_to_trnbuild(idf_file=idf_file, weather_file=weather_file,
>>> window_lib=window_filepath,
>>> **kwargs_dict)
Args:
idf_file (str): path to the idf file to convert
weather_file (str): To run EnergyPlus simulation and be able to get some
values (e.g. internal gain, infiltration, etc.)
window_lib (str): File path of the window library (from Berkeley Lab)
return_idf (bool, optional): If True, also return the path to the
modified IDF with the new names, coordinates, etc. of the IDF
objects. It is an input file for EnergyPlus (.idf)
return_b18 (bool, optional): If True, also return the path to the
TRNBuild file (.b18).
return_t3d (bool, optional): If True, also return the path to the
return_dck (bool, optional): If True, also return the path to the TRNSYS
dck file (.dck).
output_folder (str, optional): location where output files will be
trnsidf_exe (str): Path to *trnsidf.exe*.
template (str): Path to d18 template file.
log_clear_names (bool): If True, DOES NOT log the equivalence between
the old and new names in the console.
schedule_as_input (bool): If True, writes the schedules as INPUTS in the
BUI file. Then, the user would have to link in TRNSYS studio the csv
file with the schedules to those INPUTS. If False, the schedules are
written as SCHEDULES in the BUI file. Be aware that this last option
(False) can make crash TRNBuild because the schedules are too long
are there is too many schedules.
kwargs: keyword arguments sent to :func:`convert_idf_to_trnbuild()` or
:func:`trnbuild_idf()` or :func:`choose_window`. "ordered=True" to
have the name of idf objects in the outputfile in ascendant order.
See :func:`trnbuild_idf` or :func:`choose_window()` for other
parameter definition
Returns:
(tuple): A tuple containing:
* return_b18 (str): the path to the TRNBuild file (.b18). Only
provided if *return_b18* is True.
* return_trn (str): the path to the TRNBuild input file (.idf). Only
provided if *return_t3d* is True.
* retrun_dck (str): the path to the TRNSYS dck file (.dck). Only
provided if *return_dck* is True.
"""
# Assert all path needed exist
(
idf_file,
weather_file,
window_lib,
output_folder,
trnsidf_exe,
template,
) = _assert_files(
idf_file, weather_file, window_lib, output_folder, trnsidf_exe, template
)
# Run EnergyPlus Simulation
ep_version = kwargs.pop("as_version", None)
outputs = [
{
"key": "Output:Variable".upper(),
**dict(
Variable_Name="Zone Thermostat Heating Setpoint Temperature",
Reporting_Frequency="hourly",
),
},
{
"key": "Output:Variable".upper(),
**dict(
Variable_Name="Zone Thermostat Cooling Setpoint Temperature",
Reporting_Frequency="hourly",
),
},
]
idf = IDF(
idf_file,
epw=weather_file,
as_version=ep_version,
annual=True,
design_day=False,
prep_outputs=outputs,
)
# Check if cache exists
# idf = _load_idf_file_and_clean_names(idf_file, log_clear_names)
# Outpout reports
htm = idf.htm()
sql_file = idf.sql_file
# Clean names of idf objects (e.g. 'MATERIAL')
log("Cleaning names of the IDF objects...", lg.INFO)
start_time = time.time()
clear_name_idf_objects(idf, log_clear_names)
log(
"Cleaned IDF object names in {:,.2f} seconds".format(time.time() - start_time),
lg.INFO,
)
# Get old:new names equivalence
old_new_names = pd.read_csv(
os.path.join(
settings.data_folder,
Path(idf_file).basename().stripext() + "_old_new_names_equivalence.csv",
)
).to_dict()
# Read IDF_T3D template and write lines in variable
lines = io.TextIOWrapper(io.BytesIO(settings.template_BUI)).readlines()
# Get objects from IDF file
(
buildingSurfs,
buildings,
constructions,
equipments,
fenestrationSurfs,
globGeomRules,
lights,
locations,
materialAirGap,
materialNoMass,
materials,
peoples,
versions,
zones,
zonelists,
) = get_idf_objects(idf)
# Get all construction EXCEPT fenestration ones
constr_list = _get_constr_list(buildingSurfs)
# If ordered=True, ordering idf objects
ordered = kwargs.get("ordered", False)
(
buildingSurfs,
buildings,
constr_list,
constructions,
equipments,
fenestrationSurfs,
globGeomRules,
lights,
locations,
materialAirGap,
materialNoMass,
materials,
peoples,
zones,
zonelists,
) = _order_objects(
buildingSurfs,
buildings,
constr_list,
constructions,
equipments,
fenestrationSurfs,
globGeomRules,
lights,
locations,
materialAirGap,
materialNoMass,
materials,
peoples,
zones,
zonelists,
ordered,
)
# region Get schedules from IDF
schedule_names, schedules = _get_schedules(idf)
# Adds ground temperature to schedules
adds_sch_ground(htm, schedule_names, schedules)
# Adds "sch_setpoint_ZONES" to schedules
df_heating_setpoint = ReportData.from_sqlite(
sql_file, table_name="Zone Thermostat Heating Setpoint Temperature"
)
df_cooling_setpoint = ReportData.from_sqlite(
sql_file, table_name="Zone Thermostat Cooling Setpoint Temperature"
)
# Heating
adds_sch_setpoint(
zones, df_heating_setpoint, old_new_names, schedule_names, schedules, "h"
)
# Cooling
adds_sch_setpoint(
zones, df_cooling_setpoint, old_new_names, schedule_names, schedules, "c"
)
# Save schedules to csv file
_yearlySched_to_csv(idf_file, output_folder, schedule_names, schedules)
# endregion
# Gets and removes from IDF materials with resistance lower than 0.0007
mat_name = _remove_low_conductivity(constructions, idf, materials)
# Write data from IDF file to T3D file
start_time = time.time()
# Write VERSION from IDF to lines (T3D)
_write_version(lines, versions)
# Write BUILDING from IDF to lines (T3D)
_write_building(buildings, lines)
# Write LOCATION and GLOBALGEOMETRYRULES from IDF to lines (T3D) and
# define if coordinate system is "Relative"
coordSys = _write_location_geomrules(globGeomRules, lines, locations)
# Determine if coordsSystem is "World" (all zones at (0,0,0))
coordSys = _is_coordSys_world(coordSys, zones)
# Change coordinates from relative to absolute for building surfaces
_change_relative_coords(buildingSurfs, coordSys, idf)
# Adds or changes adjacent surface if needed
_add_change_adj_surf(buildingSurfs, idf)
buildingSurfs = idf.idfobjects["BUILDINGSURFACE:DETAILED"]
# region Write VARIABLEDICTONARY (Zone, BuildingSurf, FenestrationSurf)
# from IDF to lines (T3D)
# Get all surfaces having Outside boundary condition with the ground.
# To be used to find the window's slopes
n_ground = _get_ground_vertex(buildingSurfs)
# Writing zones in lines
win_slope_dict = _write_zone_buildingSurf_fenestrationSurf(
buildingSurfs,
coordSys,
fenestrationSurfs,
idf,
lines,
n_ground,
zones,
schedule_as_input,
)
# endregion
# region Write CONSTRUCTION from IDF to lines (T3D)
_write_constructions(constr_list, idf, lines, mat_name, materials)
# endregion
# Write CONSTRUCTION from IDF to lines, at the end of the T3D file
_write_constructions_end(constr_list, idf, lines)
# region Write LAYER from IDF to lines (T3D)
_write_materials(lines, materialAirGap, materialNoMass, materials)
# endregion
# region Write GAINS (People, Lights, Equipment) from IDF to lines (T3D)
_write_gains(equipments, lights, lines, peoples, htm, old_new_names)
# endregion
# region Write basic conditioning systems (HEATING and COOLING) from IDF to lines (T3D)
heat_dict, cool_dict = _write_conditioning(
htm, lines, schedules, old_new_names, schedule_as_input
)
# endregion
# region Write SCHEDULES from IDF to lines (T3D)
schedules_not_written = _write_schedules(
lines, schedule_names, schedules, schedule_as_input, idf_file
)
# endregion
# region Write WINDOWS chosen by the user (from Berkeley lab library) in
# lines (T3D)
# Get window from library
# window = (win_id, description, design, u_win, shgc_win, t_sol_win, rf_sol,
# t_vis_win, lay_win, width, window_bunches[win_id],
# and maybe tolerance)
log("Get windows info from window library...")
win_u_value = kwargs.get("u_value", 2.2)
win_shgc = kwargs.get("shgc", 0.64)
win_tvis = kwargs.get("t_vis", 0.8)
win_tolerance = kwargs.get("tolerance", 0.05)
win_fframe = kwargs.get("fframe", 0.15)
win_uframe = kwargs.get("uframe", 8.17)
window = choose_window(win_u_value, win_shgc, win_tvis, win_tolerance, window_lib)
# Write windows in lines
_write_window(lines, win_slope_dict, window, win_fframe, win_uframe)
# Write window pool in lines
_write_winPool(lines, window)
# endregion
# Save T3D file at output_folder
output_folder, t3d_path = _save_t3d(idf_file, lines, output_folder)
log(
"Write data from IDF to T3D in {:,.2f} seconds".format(
time.time() - start_time
),
lg.INFO,
)
# If asked by the user, save IDF file with modification done on the names,
# coordinates, etc. at
# output_folder
new_idf_path = os.path.join(output_folder, "MODIFIED_" + os.path.basename(idf_file))
if return_idf:
idf.saveas(filename=new_idf_path)
# Run trnsidf to convert T3D to BUI
log("Converting t3d file to bui file. Running trnsidf.exe...")
dck = return_dck
nonum = kwargs.pop("nonum", False)
N = kwargs.pop("N", False)
geo_floor = kwargs.pop("geo_floor", 0.6)
refarea = kwargs.pop("refarea", False)
volume = kwargs.pop("volume", False)
capacitance = kwargs.pop("capacitance", False)
trnbuild_idf(
t3d_path,
output_folder=output_folder,
template=template,
dck=dck,
nonum=nonum,
N=N,
geo_floor=geo_floor,
refarea=refarea,
volume=volume,
capacitance=capacitance,
trnsidf_exe=trnsidf_exe,
)
# Prepare return arguments
pre, ext = os.path.splitext(t3d_path)
b18_path = pre + ".b18"
dck_path = pre + ".dck"
from itertools import compress
return_path = tuple(
compress(
[new_idf_path, b18_path, t3d_path, dck_path],
[return_idf, return_b18, return_t3d, return_dck],
)
)
# region Modify B18 file
with open(b18_path) as b18_file:
b18_lines = b18_file.readlines()
# Adds conditionning to B18 file
conditioning_to_b18(b18_lines, heat_dict, cool_dict, zones, old_new_names)
# Adds infiltration to b18 file
infilt_to_b18(b18_lines, zones, htm)
# Adds internal gain to b18 file
gains_to_b18(
b18_lines,
zones,
zonelists,
peoples,
lights,
equipments,
schedules_not_written,
htm,
old_new_names,
schedule_as_input,
)
# T initial to b18
t_initial_to_b18(b18_lines, zones, schedules)
# Save B18 file at output_folder
if output_folder is None:
# User did not provide an output folder path. We use the default setting
output_folder = os.path.relpath(settings.data_folder)
if not os.path.isdir(output_folder):
os.makedirs(output_folder)
with open(b18_path, "w") as converted_file:
for line in b18_lines:
converted_file.writelines(str(line))
# endregion
return return_path
def t_initial_to_b18(b18_lines, zones, schedules):
"""
Args:
b18_lines:
zones:
schedules:
"""
for zone in zones:
t_ini = schedules["sch_h_setpoint_" + zone.Name]["all values"][0]
# Get line number where to write TINITIAL
f_count = checkStr(b18_lines, "Z o n e " + zone.Name)
tIniNum = checkStr(b18_lines, "TINITIAL", f_count)
ind_tini = b18_lines[tIniNum - 1].find("TINITIAL")
ind_phini = b18_lines[tIniNum - 1].find("PHINITIAL")
b18_lines[tIniNum - 1] = (
b18_lines[tIniNum - 1][: ind_tini + len("TINITIAL=")]
+ " "
+ str(t_ini)
+ " : "
+ b18_lines[tIniNum - 1][ind_phini:]
+ "\n"
)
def adds_sch_setpoint(
zones, report_sqlite, old_new_names, schedule_names, schedules, string, **kwargs
):
"""
Args:
zones:
report_sqlite:
old_new_names:
schedule_names:
schedules:
string:
**kwargs:
"""
if string == "h":
description = "Getting heating setpoints"
if string == "c":
description = "Getting cooling setpoints"
for zone in tqdm(zones, desc=description, **kwargs):
all_values = report_sqlite[
report_sqlite.loc[:, "KeyValue"]
== old_new_names[zone.Name.upper()][0].upper()
].Value.values
schedule_name = "sch_" + string + "_setpoint_" + zone.Name
schedule_names.append(schedule_name)
schedules[schedule_name] = {"all values": all_values}
def adds_sch_ground(htm, schedule_names, schedules):
# Get the monthly values from htm output file from EP simulation
"""
Args:
htm:
schedule_names:
schedules:
"""
values = np.append(
htm["Site:GroundTemperature:BuildingSurface"].values[0][1:],
htm["Site:GroundTemperature:BuildingSurface"].values[0][-1],
)
# Create array of 8760 values from monthly values
all_values = (
pd.DataFrame(
values, index=pd.date_range(freq="MS", start="01/01/2019", periods=13)
)
.resample("H")
.ffill()[:-1]
.T.values[0]
)
schedule_names.append("sch_ground")
# Adds "sch_ground" to schedules dict
schedules["sch_ground"] = {"all values": all_values}
def infilt_to_b18(b18_lines, zones, htm, **kwargs):
"""
Args:
b18_lines:
zones:
htm:
**kwargs:
"""
try:
mean_infilt = round(
np.average(
htm["ZoneInfiltration Airflow Stats Nominal"][
"ACH - Air Changes per Hour"
].values,
weights=htm["ZoneInfiltration Airflow Stats Nominal"][
"Zone Floor Area {m2}"
].values,
),
3,
)
except KeyError:
mean_infilt = 0
log("Writing infiltration info from idf file to b18 file...")
# Get line number where to write
infiltNum = checkStr(b18_lines, "I n f i l t r a t i o n")
# Write in infiltration section
b18_lines.insert(infiltNum + 1, "INFILTRATION Constant" + "\n")
b18_lines.insert(infiltNum + 2, "AIRCHANGE=" + str(mean_infilt) + "\n")
# Write in zone section
for zone in tqdm(zones, desc="Writing infiltration in BUI", **kwargs):
f_count = checkStr(b18_lines, "Z o n e " + zone.Name)
regimeInfiltNum = checkStr(b18_lines, "REGIME", f_count)
b18_lines.insert(regimeInfiltNum, " INFILTRATION = Constant" + "\n")
def gains_to_b18(
b18_lines,
zones,
zonelists,
peoples,
lights,
equipments,
schedules_not_written,
htm,
old_new_names,
schedule_as_input,
**kwargs
):
"""
Args:
b18_lines:
zones:
zonelists:
peoples:
lights:
equipments:
schedules_not_written:
htm:
old_new_names:
schedule_as_input:
**kwargs:
"""
peoples_in_zone = zone_where_gain_is(peoples, zones, zonelists)
lights_in_zone = zone_where_gain_is(lights, zones, zonelists)
equipments_in_zone = zone_where_gain_is(equipments, zones, zonelists)
for zone in tqdm(zones, desc="Writing internal gains in BUI", **kwargs):
# Write people gains
_write_gain_to_b18(
b18_lines,
zone,
peoples,
peoples_in_zone,
schedules_not_written,
htm,
old_new_names,
"People",
schedule_as_input,
)
# Write light gains
_write_gain_to_b18(
b18_lines,
zone,
lights,
lights_in_zone,
schedules_not_written,
htm,
old_new_names,
"Lights",
schedule_as_input,
)
# Write equipment gains
_write_gain_to_b18(
b18_lines,
zone,
equipments,
equipments_in_zone,
schedules_not_written,
htm,
old_new_names,
"ElectricEquipment",
schedule_as_input,
)
def _write_gain_to_b18(
b18_lines,
zone,
gains,
gains_in_zone,
schedules_not_written,
htm,
old_new_names,
string,
schedule_as_input,
):
"""
Args:
b18_lines:
zone:
gains:
gains_in_zone:
schedules_not_written:
htm:
old_new_names:
string:
schedule_as_input:
"""
for gain in gains:
if zone.Name in gains_in_zone[gain.Name]:
f_count = checkStr(b18_lines, "Z o n e " + zone.Name)
regimeNum = checkStr(b18_lines, "REGIME", f_count)
schedule = htm[string + " Internal Gains Nominal"][
htm[string + " Internal Gains Nominal"]["Name"].str.contains(
old_new_names[gain.Name.upper()][0]
)
]["Schedule Name"].values[0]
schedule = [
key for (key, value) in old_new_names.items() if value[0] == schedule
][0].lower()
if schedule in schedules_not_written:
continue
# Write
if schedule_as_input:
b18_lines.insert(
regimeNum,
" GAIN= "
+ gain.Name
+ " : SCALE= INPUT 1*"
+ schedule
+ " : GEOPOS=0 : SCALE2= 1 : FRAC_REFAREA= 1"
+ "\n",
)
else:
b18_lines.insert(
regimeNum,
" GAIN= "
+ gain.Name
+ " : SCALE= SCHEDULE 1*"
+ schedule
+ " : GEOPOS=0 : SCALE2= 1 : FRAC_REFAREA= 1"
+ "\n",
)
def conditioning_to_b18(
b18_lines, heat_dict, cool_dict, zones, old_new_names, **kwargs
):
"""
Args:
b18_lines:
heat_dict:
cool_dict:
zones:
old_new_names:
**kwargs:
"""
for zone in tqdm(zones, desc="Writing conditioning in BUI", **kwargs):
# Heating
_write_heat_cool_to_b18(heat_dict, old_new_names, zone, b18_lines, " HEATING")
# Cooling
_write_heat_cool_to_b18(cool_dict, old_new_names, zone, b18_lines, " COOLING")
def _write_heat_cool_to_b18(list_dict, old_new_names, zone, b18_lines, string):
"""
Args:
list_dict:
old_new_names:
zone:
b18_lines:
string:
"""
for key in list_dict.keys():
if old_new_names[zone.Name.upper()][0] in key:
f_count = checkStr(b18_lines, "Z o n e " + zone.Name)
regimeNum = checkStr(b18_lines, "REGIME", f_count)
# Write
if not isinstance(list_dict[key], list):
value = list_dict[key]
else:
value = list_dict[key][0]
b18_lines.insert(regimeNum, string + " = " + value + "\n")
def zone_where_gain_is(gains, zones, zonelists):
"""
Args:
gains:
zones:
zonelists:
"""
gain_in_zone = {}
for gain in gains:
list_zone = []
for zone in zones:
if zone.Name == gain.Zone_or_ZoneList_Name:
list_zone.append([zone.Name])
for zonelist in zonelists:
if zonelist.Name == gain.Zone_or_ZoneList_Name:
list_zone.append(zonelist.fieldvalues[2:])
flat_list = [item for sublist in list_zone for item in sublist]
gain_in_zone[gain.Name] = flat_list
return gain_in_zone
def _change_relative_coords(buildingSurfs, coordSys, idf):
"""
Args:
buildingSurfs:
coordSys:
idf:
"""
if coordSys == "Relative":
# Add zone coordinates to X, Y, Z vectors
for buildingSurf in buildingSurfs:
surf_zone = buildingSurf.Zone_Name
incrX, incrY, incrZ = zone_origin(idf.getobject("ZONE", surf_zone))
_relative_to_absolute(buildingSurf, incrX, incrY, incrZ)
def _yearlySched_to_csv(idf_file, output_folder, schedule_names, schedules, **kwargs):
"""
Args:
idf_file:
output_folder:
schedule_names:
schedules:
**kwargs:
"""
log("Saving yearly schedules in CSV file...")
idf_file = Path(idf_file)
df_sched = pd.DataFrame()
schedule_names.sort()
for schedule_name in tqdm(
schedule_names, desc="Writing schedules in csv", **kwargs
):
df_sched[schedule_name] = schedules[schedule_name]["all values"]
sched_file_name = "yearly_schedules_" + idf_file.basename().stripext() + ".csv"
output_folder = Path(output_folder)
if not output_folder.exists():
output_folder.mkdir_p()
df_sched.to_csv(path_or_buf=os.path.join(output_folder, sched_file_name))
def _get_constr_list(buildingSurfs):
"""
Args:
buildingSurfs:
"""
constr_list = []
for buildingSurf in buildingSurfs:
constr_list.append(buildingSurf.Construction_Name)
constr_list = list(set(constr_list))
constr_list.sort()
return constr_list
def _save_t3d(idf_file, lines, output_folder):
"""Saves T3D file
Args:
idf_file (str): path to the idf file to convert
lines (list): lines to copy in the T3D file
output_folder (str): path to the output folder (can be None)
Returns:
output_folder (str): path to the output folder t3d_path (str): path to
the T3D file
"""
if output_folder is None:
# User did not provide an output folder path. We use the default setting
output_folder = os.path.relpath(settings.data_folder)
if not os.path.isdir(output_folder):
os.makedirs(output_folder)
t3d_path = os.path.join(output_folder, "T3D_" + os.path.basename(idf_file))
with open(t3d_path, "w") as converted_file:
for line in lines:
converted_file.writelines(str(line))
return output_folder, t3d_path
def _remove_low_conductivity(constructions, idf, materials):
"""Removes materials form idf with conductivity too low (0.0007 kJ/h-m-K)
Args:
constructions (Idf_MSequence): CONSTRUCTION object from the IDF
idf (archetypal.idfclass.idf.IDF object at 0x11e3d3208): the IDf object
materials (Idf_MSequence): MATERIAL object from the IDF
Returns:
mat_name (list): list of name of the removed materials
"""
material_low_res = []
for material in materials:
if material.Thickness / (material.Conductivity * 3.6) < 0.0007:
material_low_res.append(material)
# Remove materials with resistance lower than 0.0007 from IDF
mat_name = []
for mat in material_low_res:
mat_name.append(mat.Name)
idf.removeidfobject(mat)
# Get constructions with only materials with resistance lower than 0.0007
construct_low_res = []
for i in range(0, len(constructions)):
if (
len(constructions[i].fieldvalues) == 3
and constructions[i].fieldvalues[2] in mat_name
):
construct_low_res.append(constructions[i])
# Remove constructions with only materials with resistance lower than
# 0.0007 from IDF
for construct in construct_low_res:
idf.removeidfobject(construct)
return mat_name
def _order_objects(
buildingSurfs,
buildings,
constr_list,
constructions,
equipments,
fenestrationSurfs,
globGeomRules,
lights,
locations,
materialAirGap,
materialNoMass,
materials,
peoples,
zones,
zonelists,
ordered=True,
):
"""
Args:
buildingSurfs (Idf_MSequence): BUILDINGSURFACE:DETAILED object from the
IDF
buildings (Idf_MSequence): BUILDING object from the IDF
constr_list:
constructions (Idf_MSequence): CONSTRUCTION object from the IDF
equipments (Idf_MSequence): EQUIPMENT object from the IDF
fenestrationSurfs (Idf_MSequence): FENESTRATIONSURFACE:DETAILED object
from the IDF
globGeomRules (Idf_MSequence): GLOBALGEOMETRYRULES object from the IDF
lights (Idf_MSequence): LIGHTs object from the IDF
locations (Idf_MSequence): SITE:LOCATION object from the IDF
materialAirGap (Idf_MSequence): MATERIAL:AIRGAP object from the IDF
materialNoMass (Idf_MSequence): MATERIAL:NOMASS object from the IDF
materials (Idf_MSequence): MATERIAL object from the IDF
peoples (Idf_MSequence): PEOPLE object from the IDF
zones (Idf_MSequence): ZONE object from the IDF
zonelists:
ordered:
Returns:
IDF objects (see Args) with their order reversed
"""
if ordered:
materials = list(reversed(materials))
materialNoMass = list(reversed(materialNoMass))
materialAirGap = list(reversed(materialAirGap))
buildings = list(reversed(buildings))
locations = list(reversed(locations))
globGeomRules = list(reversed(globGeomRules))
constructions = list(reversed(constructions))
fenestrationSurfs = list(reversed(fenestrationSurfs))
buildingSurfs = list(reversed(buildingSurfs))
zones = list(reversed(zones))
zonelists = list(reversed(zonelists))
peoples = list(reversed(peoples))
lights = list(reversed(lights))
equipments = list(reversed(equipments))
constr_list = list(reversed(constr_list))
return (
buildingSurfs,
buildings,
constr_list,
constructions,
equipments,
fenestrationSurfs,
globGeomRules,
lights,
locations,
materialAirGap,
materialNoMass,
materials,
peoples,
zones,
zonelists,
)
def get_idf_objects(idf):
"""Gets idf objects
Args:
idf (archetypal.idfclass.idf.IDF object at 0x11e3d3208): the IDf object
Returns:
materials (Idf_MSequence): MATERIAL object from the IDF materialNoMass
(Idf_MSequence): MATERIAL:NOMASS object from the IDF materialAirGap
(Idf_MSequence): MATERIAL:AIRGAP object from the IDF versions
(Idf_MSequence): VERSION object from the IDF buildings (Idf_MSequence):
BUILDING object from the IDF locations (Idf_MSequence): SITE:LOCATION
object from the IDF globGeomRules (Idf_MSequence): GLOBALGEOMETRYRULES
object from the IDF constructions (Idf_MSequence): CONSTRUCTION object
from the IDF buildingSurfs (Idf_MSequence): BUILDINGSURFACE:DETAILED
object
from the IDF
fenestrationSurfs (Idf_MSequence): FENESTRATIONSURFACE:DETAILED object
from the IDF
zones (Idf_MSequence): ZONE object from the IDF peoples (Idf_MSequence):
PEOPLE object from the IDF lights (Idf_MSequence): LIGHTs object from
the IDF equipments (Idf_MSequence): EQUIPMENT object from the IDF
"""
materials = idf.idfobjects["MATERIAL"]
materialNoMass = idf.idfobjects["MATERIAL:NOMASS"]
materialAirGap = idf.idfobjects["MATERIAL:AIRGAP"]
versions = idf.idfobjects["VERSION"]
buildings = idf.idfobjects["BUILDING"]
locations = idf.idfobjects["SITE:LOCATION"]
globGeomRules = idf.idfobjects["GLOBALGEOMETRYRULES"]
constructions = idf.idfobjects["CONSTRUCTION"]
fenestrationSurfs = idf.idfobjects["FENESTRATIONSURFACE:DETAILED"]
buildingSurfs = idf.idfobjects["BUILDINGSURFACE:DETAILED"]
zones = idf.idfobjects["ZONE"]
peoples = idf.idfobjects["PEOPLE"]
lights = idf.idfobjects["LIGHTS"]
equipments = idf.idfobjects["ELECTRICEQUIPMENT"]
zonelists = idf.idfobjects["ZONELIST"]
return (
buildingSurfs,
buildings,
constructions,
equipments,
fenestrationSurfs,
globGeomRules,
lights,
locations,
materialAirGap,
materialNoMass,
materials,
peoples,
versions,
zones,
zonelists,
)
def load_idf_file_and_clean_names(idf_file, log_clear_names):
"""Load idf file from cache if cache exist and user ask for use_cache=True.
Moreover cleans idf object names and log in the console the equivalence
between the old and new names if log_clear_names=False
Args:
idf_file (str): Path to the idf file
log_clear_names (bool): If True, DOES NOT log the equivalence between
the old and new names in the console.
Returns:
idf (archetypal.idfclass.idf.IDF object at 0x11e3d3208): the IDf object
"""
log("Loading IDF file...", lg.INFO)
start_time = time.time()
cache_filename = hash_model(idf_file)
# Load IDF file(s)
idf = IDF(idf_file, prep_outputs=False)
log("IDF files loaded in {:,.2f} seconds".format(time.time() - start_time), lg.INFO)
# Clean names of idf objects (e.g. 'MATERIAL')
log("Cleaning names of the IDF objects...", lg.INFO)
start_time = time.time()
clear_name_idf_objects(idf, log_clear_names)
# save_idf_object_to_cache(idf, idf_file, cache_filename, 'pickle')
log(
"Cleaned IDF object names in {:,.2f} seconds".format(time.time() - start_time),
lg.INFO,
)
return idf
def _assert_files(
idf_file, weather_file, window_lib, output_folder, trnsidf_exe, template
):
"""Ensure the files and directory are here
Args:
idf_file (str or Path): path to the idf file to convert
weather_file:
window_lib (str or Path): File path of the window library (from Berkeley
Lab)
output_folder (str or Path): path to the output folder (can be None)
trnsidf_exe (str or Path): Path to *trnsidf.exe*.
template (str or Path): Path to d18 template file.
"""
if isinstance(idf_file, (str, Path)):
if not os.path.isfile(idf_file):
raise IOError("idf_file file not found")
else:
raise IOError("idf_file file is not a string (path)")
if isinstance(weather_file, (str, Path)):
if not os.path.isfile(weather_file):
raise IOError("weather file not found")
else:
raise IOError("weather file is not a string (path)")
if window_lib:
if isinstance(window_lib, (str, Path)):
if not os.path.isfile(window_lib):
raise IOError("window_lib file not found")
else:
raise IOError("window_lib file is not a string (path)")
if not output_folder:
output_folder = os.path.relpath(settings.data_folder)
if not os.path.exists(output_folder):
os.mkdir(output_folder)
if not template:
template = settings.path_template_d18
if not os.path.isfile(template):
raise IOError("template file not found")
if not trnsidf_exe:
trnsidf_exe = settings.trnsys_default_folder / Path(
"Building/trnsIDF/trnsidf.exe"
)
if not os.path.isfile(trnsidf_exe):
raise IOError("trnsidf.exe not found")
return idf_file, weather_file, window_lib, output_folder, trnsidf_exe, template
def _add_change_adj_surf(buildingSurfs, idf):
"""Adds or changes adjacent surfaces if needed
Args:
buildingSurfs (idf_MSequence): IDF object from idf.idfobjects().
List of
building surfaces ("BUILDINGSURFACE:DETAILED" in the IDF). Building
surfaces to iterate over and determine if either a change on an
adjacent surface is needed or the creation of a new one
idf (archetypal.IDF): IDF object
"""
adj_surfs_to_change = {}
adj_surfs_to_make = []
for buildingSurf in buildingSurfs:
if "zone" in buildingSurf.Outside_Boundary_Condition.lower():
# Get the surface EpBunch that is adjacent to the building surface
outside_bound_zone = buildingSurf.Outside_Boundary_Condition_Object
surfs_in_bound_zone = [
surf for surf in buildingSurfs if surf.Zone_Name == outside_bound_zone
]
poly_buildingSurf = Polygon3D(buildingSurf.coords)
n_buildingSurf = poly_buildingSurf.normal_vector
area_build = poly_buildingSurf.area
centroid_build = poly_buildingSurf.centroid
# Check if buildingSurf has an adjacent surface
for surf in surfs_in_bound_zone:
if surf.Outside_Boundary_Condition.lower() == "outdoors":
poly_surf_bound = Polygon3D(surf.coords)
n_surf_bound = poly_surf_bound.normal_vector
area_bound = poly_surf_bound.area
centroid_bound = poly_surf_bound.centroid
# Check if boundary surface already exist: sum of normal
# vectors must be equal to 0 AND surfaces must have the
# same centroid AND surfaces must have the same area
if (
round(n_surf_bound.x + n_buildingSurf.x, 3) == 0
and round(n_surf_bound.y + n_buildingSurf.y, 3) == 0
and round(n_surf_bound.z + n_buildingSurf.z, 3) == 0
and round(centroid_bound.x, 3) == round(centroid_build.x, 3)
and round(centroid_bound.y, 3) == round(centroid_build.y, 3)
and round(centroid_bound.z, 3) == round(centroid_build.z, 3)
and round(area_bound, 3) == round(area_build, 3)
):
# If boundary surface exists, append the list of surface
# to change
if not surf.Name in adj_surfs_to_change:
adj_surfs_to_change[buildingSurf.Name] = surf.Name
break
# If boundary surface does not exist, append the list of surface
# to create
if not buildingSurf.Name in adj_surfs_to_change:
if not buildingSurf.Name in adj_surfs_to_make:
adj_surfs_to_make.append(buildingSurf.Name)
# If adjacent surface found, check if Outside boundary
# condition is a Zone and not "Outdoors"
for key, value in adj_surfs_to_change.items():
idf.getobject(
"BUILDINGSURFACE:DETAILED", value
).Outside_Boundary_Condition = "Zone"
idf.getobject(
"BUILDINGSURFACE:DETAILED", value
).Outside_Boundary_Condition_Object = idf.getobject(
"BUILDINGSURFACE:DETAILED", key
).Zone_Name
idf.getobject(
"BUILDINGSURFACE:DETAILED", value
).Construction_Name = idf.getobject(
"BUILDINGSURFACE:DETAILED", key
).Construction_Name
# If did not find any adjacent surface
for adj_surf_to_make in adj_surfs_to_make:
buildSurf = idf.getobject("BUILDINGSURFACE:DETAILED", adj_surf_to_make)
surf_type = buildSurf.Surface_Type
if surf_type.lower() == "wall":
surf_type_bound = "Wall"
if surf_type.lower() == "floor":
surf_type_bound = "Ceiling"
if surf_type.lower() == "ceiling":
surf_type_bound = "Floor"
if surf_type.lower() == "roof":
surf_type_bound = "Floor"
# Create a new surface
idf.newidfobject(
"BUILDINGSURFACE:DETAILED",
Name=buildSurf.Name + "_adj",
Surface_Type=surf_type_bound,
Construction_Name=buildSurf.Construction_Name,
Zone_Name=buildSurf.Outside_Boundary_Condition_Object,
Outside_Boundary_Condition="Zone",
Outside_Boundary_Condition_Object=buildSurf.Zone_Name,
Sun_Exposure="NoSun",
Wind_Exposure="NoWind",
View_Factor_to_Ground="autocalculate",
Number_of_Vertices=buildSurf.Number_of_Vertices,
Vertex_1_Xcoordinate=buildSurf.Vertex_4_Xcoordinate,
Vertex_1_Ycoordinate=buildSurf.Vertex_4_Ycoordinate,
Vertex_1_Zcoordinate=buildSurf.Vertex_4_Zcoordinate,
Vertex_2_Xcoordinate=buildSurf.Vertex_3_Xcoordinate,
Vertex_2_Ycoordinate=buildSurf.Vertex_3_Ycoordinate,
Vertex_2_Zcoordinate=buildSurf.Vertex_3_Zcoordinate,
Vertex_3_Xcoordinate=buildSurf.Vertex_2_Xcoordinate,
Vertex_3_Ycoordinate=buildSurf.Vertex_2_Ycoordinate,
Vertex_3_Zcoordinate=buildSurf.Vertex_2_Zcoordinate,
Vertex_4_Xcoordinate=buildSurf.Vertex_1_Xcoordinate,
Vertex_4_Ycoordinate=buildSurf.Vertex_1_Ycoordinate,
Vertex_4_Zcoordinate=buildSurf.Vertex_1_Zcoordinate,
)
def _get_schedules(idf, **kwargs):
"""Get schedules from IDF
Args:
idf (archetypal.IDF): IDF object
**kwargs:
"""
start_time = time.time()
log("Reading schedules from the IDF file...")
schedule_names = []
used_schedules = idf.get_used_schedules(yearly_only=True)
schedules = {}
for schedule_name in tqdm(used_schedules, desc="Getting schedules", **kwargs):
s = Schedule(
schedule_name, idf, start_day_of_the_week=idf.day_of_week_for_start_day
)
schedule_names.append(schedule_name)
schedules[schedule_name] = {}
year, weeks, days = s.to_year_week_day()
schedules[schedule_name]["all values"] = s.all_values
schedules[schedule_name]["year"] = year
log(
"Got yearly, weekly and daily schedules in {:,.2f} seconds".format(
time.time() - start_time
),
lg.INFO,
)
return schedule_names, schedules
def clear_name_idf_objects(idfFile, log_clear_names=False, **kwargs):
"""Clean names of IDF objects.
Replaces variable names with a unique name, easy to refer to the original
object. For example : if object is the n-th "Schedule Type Limit", then the
new name will be "stl_00000n" - limits length to 10 characters
Args:
idfFile (archetypal.IDF): IDF object where to clean names
log_clear_names:
**kwargs:
"""
uniqueList = []
old_name_list = []
old_new_eq = {}
# For all categories of objects in the IDF file
for obj in tqdm(idfFile.idfobjects, desc="Cleaning names", **kwargs):
epObjects = idfFile.idfobjects[obj]
# For all objects in Category
count_name = 0
for epObject in epObjects:
# Do not take fenestration, to be treated later
try:
fenestration = [
s
for s in ["fenestration", "shgc", "window", "glazing"]
if s in epObject.Name.lower() or s in epObject.key.lower()
]
except:
fenestration = []
if not fenestration:
try:
old_name = epObject.Name
# For TRNBuild compatibility we oblige the new name to
# begin by a lowercase letter and the new name is max 10
# characters. The new name is done with the uppercase of
# the epObject type and an increment depending on the number
# of this epObject type. Making sure we
# have an unique new name
list_word_epObject_type = re.sub(
r"([A-Z])", r" \1", epObject.fieldvalues[0]
).split()
# Making sure new name will be max 10 characters
if len(list_word_epObject_type) > 4:
list_word_epObject_type = list_word_epObject_type[:4]
first_letters = "".join(
word[0].lower() for word in list_word_epObject_type
)
end_count = "%06d" % count_name
new_name = first_letters + "_" + end_count
# Make sure new name does not already exist
new_name, count_name = check_unique_name(
first_letters, count_name, new_name, uniqueList
)
uniqueList.append(new_name)
old_name_list.append(old_name)
old_new_eq[new_name.upper()] = old_name.upper()
# Changing the name in the IDF object
idfFile.rename(obj, old_name, new_name)
except:
pass
else:
continue
# Save equivalence between old and new names
df = | pd.DataFrame([old_new_eq]) | pandas.DataFrame |
import os
from typing import TypeVar
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from scipy import stats
try:
from sentence_transformers import SentenceTransformer
import umap
import hdbscan
import torch
except ModuleNotFoundError as e:
print('Please install the dependencies for the visualization routines, using `pip install semanticlayertools[embeddml]`.')
raise e
smoothing = TypeVar('smoothing', bool, float)
def gaussian_smooth(x, y, grid, sd):
weights = np.transpose([stats.norm.pdf(grid, m, sd) for m in x])
weights = weights / weights.sum(0)
return (weights * y).sum(1)
def streamgraph(
filepath: str, smooth: smoothing = False,
minClusterSize: int = 1000, showNthGrid: int = 5
):
"""Plot streamgraph of cluster sizes vs years.
Based on https://www.python-graph-gallery.com/streamchart-basic-matplotlib
"""
basedf = | pd.read_csv(filepath) | pandas.read_csv |
#coding=utf-8
import os
import CSZLData
import CSZLFeatureEngineering as FE
import CSZLModel
import CSZLDisplay
import CSZLUtils
import pandas as pd
import datetime
import time
class CSZLWorkflow(object):
"""各种workflow 主要就是back testing"""
def BackTesting(self):
#Default_folder_path='./temp/'
Default_folder_path='D:/temp2/'
#zzzz=CSZLData.CSZLData("20220101","20220301")
#zzzz.getDataSet_all(Default_folder_path)
#"20150801","20220425"
dayA='20130101'#nomal/small
dayB='20170301'
#dayB='20200101'
#dayA='20150801'#nomal/small
#dayB='20220425'
dayC='20170301'
dayD='20220425'
dayA='20150801'#nomal/small
dayB='20220425'
dayC='20220201'
dayD='20220513'
#dayA='20190101'#nomal/small
#dayB='20190601'
#dayC='20210101'
#dayD='20220425'
zzzz=FE.CSZLFeatureEngineering(dayA,dayB,Default_folder_path)
trainpath=zzzz.FE05()
#zzzz=FE.CSZLFeatureEngineering("20170301","20220301",Default_folder_path)
#testpath=zzzz.FE03()
zzzz=FE.CSZLFeatureEngineering(dayC,dayD,Default_folder_path)
testpath=zzzz.FE05()
#zzzz=FE.CSZLFeatureEngineering("20220101","20220408",Default_folder_path)
#testpath=zzzz.FE03()
#zzzz=FE.CSZLFeatureEngineering("20190101","20190301",Default_folder_path)
#trainpath=zzzz.FE03()
#zzzz=FE.CSZLFeatureEngineering("20220101","20220301",Default_folder_path)
#testpath=zzzz.FE03()
#zzzz=FE.CSZLFeatureEngineering("20190101","20210101",Default_folder_path)
#trainpath=zzzz.FE03()
#zzzz=FE.CSZLFeatureEngineering("20220101","20220401",Default_folder_path)
#testpath=zzzz.FE03()
cur_model=CSZLModel.CSZLModel()
cur_model_path=cur_model.LGBmodeltrain(trainpath)
#resultpath2=cur_model.LGBmodelpredict(trainpath,cur_model_path)
resultpath=cur_model.LGBmodelpredict(testpath,cur_model_path)
#cur_model_path2=cur_model.LGBmodelretrain(trainpath,resultpath2)
#resultpath3=cur_model.LGBmodelrepredict(testpath,resultpath,cur_model_path2)
resultpath=cur_model.MixOutputresult_groupbalence(testpath,cur_model_path)
today_df = pd.read_csv(resultpath,index_col=0,header=0)
#lastday=today_df['trade_date'].max()
#today_df['ts_code']=today_df['ts_code'].apply(lambda x : x[:-3])
#copy_df=today_df[today_df['trade_date']==lastday]
#copy_df.to_csv("Today_NEXT_predict.csv")
curdisplay=CSZLDisplay.CSZLDisplay()
curdisplay.Topk_nextopen(resultpath)
pass
def BackTesting_static_0501(self):
#生成需要的数据集
nowTime=datetime.datetime.now()
delta = datetime.timedelta(days=63)
delta_one = datetime.timedelta(days=1)
LastTime=nowTime-delta_one
month_ago = LastTime - delta
month_ago_next=month_ago+delta_one
Day_start=month_ago_next.strftime('%Y%m%d')
Day_end=LastTime.strftime('%Y%m%d')
Day_now=nowTime.strftime('%Y%m%d')
#Default_folder_path='./temp2/'
Default_folder_path='D:/temp2/'
dayA='20150801'#nomal/small
dayB='20220425'
#dayA='20150801'#nomal/small
#dayB='20220425'
dayC=Day_start
dayD=Day_now
#dayD='20220506'
zzzz=FE.CSZLFeatureEngineering(dayA,dayB,Default_folder_path)
trainpath=zzzz.FE03()
zzzz=FE.CSZLFeatureEngineering(dayC,dayD,Default_folder_path)
testpath=zzzz.FE03()
cur_model=CSZLModel.CSZLModel()
cur_model_path=cur_model.LGBmodeltrain(trainpath)
cur_model.LGBmodelpredict(testpath,cur_model_path)
resultpath=cur_model.MixOutputresult_groupbalence(testpath,cur_model_path)
today_df = pd.read_csv(resultpath,index_col=0,header=0)
lastday=today_df['trade_date'].max()
today_df['ts_code']=today_df['ts_code'].apply(lambda x : x[:-3])
copy_df=today_df[today_df['trade_date']==lastday]
copy_df.to_csv("Today_NEXT_predict.csv")
#curdisplay=CSZLDisplay.CSZLDisplay()
#curdisplay.Topk_nextopen(resultpath)
pass
def BackTesting_static_0515(self):
#生成需要的数据集
nowTime=datetime.datetime.now()
delta = datetime.timedelta(days=63)
delta_one = datetime.timedelta(days=1)
LastTime=nowTime-delta_one
month_ago = LastTime - delta
month_ago_next=month_ago+delta_one
Day_start=month_ago_next.strftime('%Y%m%d')
Day_end=LastTime.strftime('%Y%m%d')
Day_now=nowTime.strftime('%Y%m%d')
#Default_folder_path='./temp2/'
Default_folder_path='D:/temp2/'
dayA='20150801'#nomal/small
dayB='20220425'
#dayA='20150801'#nomal/small
#dayB='20220425'
dayC=Day_start
dayD=Day_now
dayD='20220513'
zzzz=FE.CSZLFeatureEngineering(dayA,dayB,Default_folder_path)
trainpath=zzzz.FE05()
zzzz=FE.CSZLFeatureEngineering(dayC,dayD,Default_folder_path)
testpath=zzzz.FE05()
cur_model=CSZLModel.CSZLModel()
cur_model_path=cur_model.LGBmodeltrain(trainpath)
cur_model.LGBmodelpredict(testpath,cur_model_path)
resultpath=cur_model.MixOutputresult_groupbalence(testpath,cur_model_path)
today_df = pd.read_csv(resultpath,index_col=0,header=0)
lastday=today_df['trade_date'].max()
today_df['ts_code']=today_df['ts_code'].apply(lambda x : x[:-3])
copy_df=today_df[today_df['trade_date']==lastday]
copy_df.to_csv("Today_NEXT_predict.csv")
#curdisplay=CSZLDisplay.CSZLDisplay()
#curdisplay.Topk_nextopen(resultpath)
pass
def BackTesting2(self):
#Default_folder_path='./temp/'
Default_folder_path='D:/temp2/'
#zzzz=CSZLData.CSZLData("20220101","20220301")
#zzzz.getDataSet_all(Default_folder_path)
zzzz=FE.CSZLFeatureEngineering("20130101","20170301",Default_folder_path)
trainpath=zzzz.FE03()
zzzz=FE.CSZLFeatureEngineering("20170301","20220301",Default_folder_path)
testpath=zzzz.FE03()
#zzzz=FE.CSZLFeatureEngineering("20220101","20220408",Default_folder_path)
#testpath=zzzz.FE03()
#zzzz=FE.CSZLFeatureEngineering("20190101","20190301",Default_folder_path)
#trainpath=zzzz.FE03()
#zzzz=FE.CSZLFeatureEngineering("20220101","20220301",Default_folder_path)
#testpath=zzzz.FE03()
#zzzz=FE.CSZLFeatureEngineering("20190101","20200301",Default_folder_path)
#trainpath=zzzz.FE03()
#zzzz=FE.CSZLFeatureEngineering("20210101","20220301",Default_folder_path)
#testpath=zzzz.FE03()
cur_model=CSZLModel.CSZLModel()
cur_model_path=cur_model.LGBmodeltrain(trainpath)
resultpath2=cur_model.LGBmodelpredict(trainpath,cur_model_path)
resultpath=cur_model.LGBmodelpredict(testpath,cur_model_path)
cur_model_path2=cur_model.LGBmodelretrain(trainpath,resultpath2)
resultpath3=cur_model.LGBmodelrepredict(testpath,resultpath,cur_model_path2)
#resultpath=cur_model.MixOutputresult(testpath,cur_model_path)
curdisplay=CSZLDisplay.CSZLDisplay()
curdisplay.Topk_nextopen(resultpath3)
pass
def RealTimePredict(self):
Default_folder_path='./temp2/'
#Default_folder_path='D:/temp2/'
#cur_model_path="D:/temp2/FE0320190101to20210101_0/LGBmodeltrainLGBmodel_003"
#cur_model_path="D:/temp2/FE0320150801to20220425_0/LGBmodeltrainLGBmodel_003"
cur_model_path="./temp2/FE0520150801to20220425_0/LGBmodeltrainLGBmodel_003"
#是否需要重新生成
if False:
#zzzz=FE.CSZLFeatureEngineering("20190101","20210101",Default_folder_path)
#trainpath=zzzz.FE03()
zzzz=FE.CSZLFeatureEngineering("20150801","20220425",Default_folder_path)
trainpath=zzzz.FE05()
cur_model=CSZLModel.CSZLModel()
cur_model_path=cur_model.LGBmodeltrain(trainpath)
#生成需要的数据集
nowTime=datetime.datetime.now()
delta = datetime.timedelta(days=63)
delta_one = datetime.timedelta(days=1)
LastTime=nowTime-delta_one
month_ago = LastTime - delta
month_ago_next=month_ago+delta_one
Day_start=month_ago_next.strftime('%Y%m%d')
Day_end=LastTime.strftime('%Y%m%d')
Day_now=nowTime.strftime('%Y%m%d')
CSZLData.CSZLDataWithoutDate.get_realtime_quotes(Default_folder_path,Day_start,Day_end)
zzzz=FE.CSZLFeatureEngineering(Day_start,Day_end,Default_folder_path)
#zzzz=FE.CSZLFeatureEngineering("20220301","20220420",Default_folder_path)
#trainpath=zzzz.FE03()
#bbbb=pd.read_pickle(trainpath)
#aaaa=bbbb.head(10)
#aaaa=aaaa.to_csv("tttt.csv")
zzzz.FE05_real(int(Day_now))
featurepath="Today_Joinfeature.csv"
cur_model=CSZLModel.CSZLModel()
#resultpath2=cur_model.LGBmodelpredict(trainpath,cur_model_path)
resultpath=cur_model.LGBmodelpredict(featurepath,cur_model_path)
resultpath=cur_model.MixOutputresult_groupbalence(featurepath,cur_model_path,resultpath)
pass
def RealTimePredict_CB(self):
Default_folder_path='./temp2/'
#Default_folder_path='D:/temp2/'
#cur_model_path="D:/temp2/FE0320190101to20210101_0/LGBmodeltrainLGBmodel_003"
#cur_model_path="D:/temp2/FE0320150801to20220425_0/LGBmodeltrainLGBmodel_003"
cur_model_path="./temp2/FECB0320130101to20220501_0/LGBmodeltrain_CBLGBmodel_003"
#是否需要重新生成
if False:
dayA='20130101'#nomal/small
dayB='20220501'
zzzz=FE.CSZLFeatureEngineering(dayA,dayB,Default_folder_path)
trainpath=zzzz.FECB02()
cur_model=CSZLModel.CSZLModel()
cur_model_path=cur_model.LGBmodeltrain_CB(trainpath)
#生成需要的数据集
nowTime=datetime.datetime.now()
delta = datetime.timedelta(days=63)
delta_one = datetime.timedelta(days=1)
LastTime=nowTime-delta_one
month_ago = LastTime - delta
month_ago_next=month_ago+delta_one
Day_start=month_ago_next.strftime('%Y%m%d')
Day_end=LastTime.strftime('%Y%m%d')
Day_now=nowTime.strftime('%Y%m%d')
CSZLData.CSZLDataWithoutDate.get_realtime_quotes_CB(Default_folder_path,Day_start,Day_end)
zzzz=FE.CSZLFeatureEngineering(Day_start,Day_end,Default_folder_path)
zzzz.FECB03_real(int(Day_now))
featurepath="Today_Joinfeature_CB.csv"
cur_model=CSZLModel.CSZLModel()
#resultpath2=cur_model.LGBmodelpredict(trainpath,cur_model_path)
resultpath=cur_model.LGBmodelpredict_CB(featurepath,cur_model_path)
resultpath=cur_model.MixOutputresult_groupbalence_CB(featurepath,cur_model_path,resultpath)
pass
def CBBackTesting(self):
Default_folder_path='D:/temp2/'
dayA='20130101'#nomal/small
dayB='20220501'
dayC='20220301'
dayD='20220505'
#dayD='20220506'
dayD='20220513'
zzzz=FE.CSZLFeatureEngineering(dayA,dayB,Default_folder_path)
trainpath=zzzz.FECB03()
zzzz=FE.CSZLFeatureEngineering(dayC,dayD,Default_folder_path)
testpath=zzzz.FECB03()
cur_model=CSZLModel.CSZLModel()
cur_model_path=cur_model.LGBmodeltrain_CB(trainpath)
cur_model.LGBmodelpredict_CB(testpath,cur_model_path)
resultpath=cur_model.MixOutputresult_groupbalence_CB(testpath,cur_model_path)
curdisplay=CSZLDisplay.CSZLDisplay()
curdisplay.Topk_nextopen_CB(resultpath)
pass
def CBBackTesting_static_0508(self):
#生成需要的数据集
nowTime=datetime.datetime.now()
delta = datetime.timedelta(days=63)
delta_one = datetime.timedelta(days=1)
LastTime=nowTime-delta_one
month_ago = LastTime - delta
month_ago_next=month_ago+delta_one
Day_start=month_ago_next.strftime('%Y%m%d')
Day_end=LastTime.strftime('%Y%m%d')
Day_now=nowTime.strftime('%Y%m%d')
#Default_folder_path='./temp2/'
Default_folder_path='D:/temp2/'
dayA='20130101'#nomal/small
dayB='20220301'
dayC=Day_start
dayD=Day_now
zzzz=FE.CSZLFeatureEngineering(dayA,dayB,Default_folder_path)
trainpath=zzzz.FECB02()
zzzz=FE.CSZLFeatureEngineering(dayC,dayD,Default_folder_path)
testpath=zzzz.FECB02()
cur_model=CSZLModel.CSZLModel()
cur_model_path=cur_model.LGBmodeltrain_CB(trainpath)
cur_model.LGBmodelpredict_CB(testpath,cur_model_path)
resultpath=cur_model.MixOutputresult_groupbalence_CB(testpath,cur_model_path)
today_df = pd.read_csv(resultpath,index_col=0,header=0)
lastday=today_df['trade_date'].max()
today_df['ts_code']=today_df['ts_code'].apply(lambda x : x[:-3])
copy_df=today_df[today_df['trade_date']==lastday]
copy_df.to_csv("Today_NEXT_predict_CB.csv")
#curdisplay=CSZLDisplay.CSZLDisplay()
#curdisplay.Topk_nextopen_CB(resultpath)
pass
def CBBackTesting_static_0515(self):
#生成需要的数据集
nowTime=datetime.datetime.now()
delta = datetime.timedelta(days=63)
delta_one = datetime.timedelta(days=1)
LastTime=nowTime-delta_one
month_ago = LastTime - delta
month_ago_next=month_ago+delta_one
Day_start=month_ago_next.strftime('%Y%m%d')
Day_end=LastTime.strftime('%Y%m%d')
Day_now=nowTime.strftime('%Y%m%d')
#Default_folder_path='./temp2/'
Default_folder_path='D:/temp2/'
dayA='20130101'#nomal/small
dayB='20220501'
dayC=Day_start
dayD=Day_now
dayD='20220513'
zzzz=FE.CSZLFeatureEngineering(dayA,dayB,Default_folder_path)
trainpath=zzzz.FECB03()
zzzz=FE.CSZLFeatureEngineering(dayC,dayD,Default_folder_path)
testpath=zzzz.FECB03()
cur_model=CSZLModel.CSZLModel()
cur_model_path=cur_model.LGBmodeltrain_CB(trainpath)
cur_model.LGBmodelpredict_CB(testpath,cur_model_path)
resultpath=cur_model.MixOutputresult_groupbalence_CB(testpath,cur_model_path)
today_df = pd.read_csv(resultpath,index_col=0,header=0)
lastday=today_df['trade_date'].max()
today_df['ts_code']=today_df['ts_code'].apply(lambda x : x[:-3])
copy_df=today_df[today_df['trade_date']==lastday]
copy_df.to_csv("Today_NEXT_predict_CB.csv")
#curdisplay=CSZLDisplay.CSZLDisplay()
#curdisplay.Topk_nextopen_CB(resultpath)
pass
def Todays_action(self,last_path,Today_result_path,changenum_max,singleamout,Auto=False):
#最高换手个数
#changenum_max=2
##total_amount=2000000
##单个买入额
#singleamout=1000
#修改显示行列数
pd.set_option('display.width', 5000)
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
if False:
df_stocklist=pd.read_csv(CSZLData.CSZLDataWithoutDate.get_stocklist(),index_col=0,header=0)
else:
df_stocklist=pd.read_csv("./Database/stocklist.csv",index_col=0,header=0)
df_stocklist_merge=df_stocklist[['ts_code','name']]
df_stocklist_merge['ts_code']=df_stocklist_merge['ts_code'].map(lambda x : x[:6])
df_stocklist_merge['ts_code']=df_stocklist_merge['ts_code'].fillna(0).apply(pd.to_numeric)
#print(df_stocklist_merge)
df_last=pd.read_csv(last_path,index_col=0,header=0)
df_hold=df_last[['ts_code','hold']]
#print(df_last)
df=pd.read_csv(Today_result_path,index_col=0,header=0)
#删除科创版和北交所以及ST含有的股票
df=df[df['ts_code']<688000]
df= | pd.merge(df, df_stocklist_merge, how='left', on=['ts_code']) | pandas.merge |
from unittest.mock import patch
import numpy as np
import pandas as pd
import pytest
from pytest import importorskip
from evalml.model_family import ModelFamily
from evalml.pipelines.components import ARIMARegressor
from evalml.problem_types import ProblemTypes
sktime_arima = importorskip(
"sktime.forecasting.arima", reason="Skipping test because sktime not installed"
)
forecasting = importorskip(
"sktime.forecasting.base", reason="Skipping test because sktime not installed"
)
def test_model_family():
assert ARIMARegressor.model_family == ModelFamily.ARIMA
def test_problem_types():
assert set(ARIMARegressor.supported_problem_types) == {
ProblemTypes.TIME_SERIES_REGRESSION
}
def test_model_instance(ts_data):
X, y = ts_data
clf = ARIMARegressor()
fitted = clf.fit(X, y)
assert isinstance(fitted, ARIMARegressor)
def test_get_dates_fit_and_predict(ts_data):
X, y = ts_data
clf = ARIMARegressor()
date_col, X_ = clf._get_dates(X, y)
assert isinstance(date_col, pd.DatetimeIndex)
assert X_.equals(X)
def test_match_indices(ts_data):
X, y = ts_data
date_index = pd.date_range("2020-10-02", "2020-11-01")
clf = ARIMARegressor()
X_, y_ = clf._match_indices(X, y, date_index)
assert isinstance(X_.index, pd.DatetimeIndex)
assert isinstance(y_.index, pd.DatetimeIndex)
assert X_.index.equals(y_.index)
assert X_.index.equals(date_index)
@pytest.mark.parametrize("predict", [True, False])
@pytest.mark.parametrize("dates_shape", [0, 1, 2])
def test_format_dates(predict, dates_shape, ts_data):
X, y = ts_data
date_index = pd.date_range("2020-10-02", "2020-11-01")
if dates_shape == 1:
date_index = | pd.DataFrame(date_index) | pandas.DataFrame |
import hashlib
import json
import os
import pickle
import re
import shutil
from datetime import date, datetime
import pandas as pd
from catalyst.assets._assets import TradingPair
from six import string_types
from six.moves.urllib import request
from catalyst.constants import DATE_FORMAT, SYMBOLS_URL
from catalyst.exchange.exchange_errors import ExchangeSymbolsNotFound, \
InvalidHistoryFrequencyError, InvalidHistoryFrequencyAlias
from catalyst.utils.paths import data_root, ensure_directory, \
last_modified_time
def get_sid(symbol):
"""
Create a sid by hashing the symbol of a currency pair.
Parameters
----------
symbol: str
Returns
-------
int
The resulting sid.
"""
sid = int(
hashlib.sha256(symbol.encode('utf-8')).hexdigest(), 16
) % 10 ** 6
return sid
def get_exchange_folder(exchange_name, environ=None):
"""
The root path of an exchange folder.
Parameters
----------
exchange_name: str
environ:
Returns
-------
str
"""
if not environ:
environ = os.environ
root = data_root(environ)
exchange_folder = os.path.join(root, 'exchanges', exchange_name)
ensure_directory(exchange_folder)
return exchange_folder
def get_exchange_symbols_filename(exchange_name, is_local=False, environ=None):
"""
The absolute path of the exchange's symbol.json file.
Parameters
----------
exchange_name:
environ:
Returns
-------
str
"""
name = 'symbols.json' if not is_local else 'symbols_local.json'
exchange_folder = get_exchange_folder(exchange_name, environ)
return os.path.join(exchange_folder, name)
def download_exchange_symbols(exchange_name, environ=None):
"""
Downloads the exchange's symbols.json from the repository.
Parameters
----------
exchange_name: str
environ:
Returns
-------
str
"""
filename = get_exchange_symbols_filename(exchange_name)
url = SYMBOLS_URL.format(exchange=exchange_name)
response = request.urlretrieve(url=url, filename=filename)
return response
def symbols_parser(asset_def):
for key, value in asset_def.items():
match = isinstance(value, string_types) \
and re.search(r'(\d{4}-\d{2}-\d{2})', value)
if match:
try:
asset_def[key] = pd.to_datetime(value, utc=True)
except ValueError:
pass
return asset_def
def get_exchange_symbols(exchange_name, is_local=False, environ=None):
"""
The de-serialized content of the exchange's symbols.json.
Parameters
----------
exchange_name: str
is_local: bool
environ:
Returns
-------
Object
"""
filename = get_exchange_symbols_filename(exchange_name, is_local)
if not is_local and (not os.path.isfile(filename) or pd.Timedelta(
pd.Timestamp('now', tz='UTC') - last_modified_time(
filename)).days > 1):
download_exchange_symbols(exchange_name, environ)
if os.path.isfile(filename):
with open(filename) as data_file:
try:
data = json.load(data_file, object_hook=symbols_parser)
return data
except ValueError:
return dict()
else:
raise ExchangeSymbolsNotFound(
exchange=exchange_name,
filename=filename
)
def save_exchange_symbols(exchange_name, assets, is_local=False, environ=None):
"""
Save assets into an exchange_symbols file.
Parameters
----------
exchange_name: str
assets: list[dict[str, object]]
is_local: bool
environ
Returns
-------
"""
asset_dicts = dict()
for symbol in assets:
asset_dicts[symbol] = assets[symbol].to_dict()
filename = get_exchange_symbols_filename(
exchange_name, is_local, environ
)
with open(filename, 'wt') as handle:
json.dump(asset_dicts, handle, indent=4, default=symbols_serial)
def get_symbols_string(assets):
"""
A concatenated string of symbols from a list of assets.
Parameters
----------
assets: list[TradingPair]
Returns
-------
str
"""
array = [assets] if isinstance(assets, TradingPair) else assets
return ', '.join([asset.symbol for asset in array])
def get_exchange_auth(exchange_name, environ=None):
"""
The de-serialized contend of the exchange's auth.json file.
Parameters
----------
exchange_name: str
environ:
Returns
-------
Object
"""
exchange_folder = get_exchange_folder(exchange_name, environ)
filename = os.path.join(exchange_folder, 'auth.json')
if os.path.isfile(filename):
with open(filename) as data_file:
data = json.load(data_file)
return data
else:
data = dict(name=exchange_name, key='', secret='')
with open(filename, 'w') as f:
json.dump(data, f, sort_keys=False, indent=2,
separators=(',', ':'))
return data
def delete_algo_folder(algo_name, environ=None):
"""
Delete the folder containing the algo state.
Parameters
----------
algo_name: str
environ:
Returns
-------
str
"""
folder = get_algo_folder(algo_name, environ)
shutil.rmtree(folder)
def get_algo_folder(algo_name, environ=None):
"""
The algorithm root folder of the algorithm.
Parameters
----------
algo_name: str
environ:
Returns
-------
str
"""
if not environ:
environ = os.environ
root = data_root(environ)
algo_folder = os.path.join(root, 'live_algos', algo_name)
ensure_directory(algo_folder)
return algo_folder
def get_algo_object(algo_name, key, environ=None, rel_path=None):
"""
The de-serialized object of the algo name and key.
Parameters
----------
algo_name: str
key: str
environ:
rel_path: str
Returns
-------
Object
"""
if algo_name is None:
return None
folder = get_algo_folder(algo_name, environ)
if rel_path is not None:
folder = os.path.join(folder, rel_path)
filename = os.path.join(folder, key + '.p')
if os.path.isfile(filename):
try:
with open(filename, 'rb') as handle:
return pickle.load(handle)
except Exception:
return None
else:
return None
def save_algo_object(algo_name, key, obj, environ=None, rel_path=None):
"""
Serialize and save an object by algo name and key.
Parameters
----------
algo_name: str
key: str
obj: Object
environ:
rel_path: str
"""
folder = get_algo_folder(algo_name, environ)
if rel_path is not None:
folder = os.path.join(folder, rel_path)
ensure_directory(folder)
filename = os.path.join(folder, key + '.p')
with open(filename, 'wb') as handle:
pickle.dump(obj, handle, protocol=pickle.HIGHEST_PROTOCOL)
def get_algo_df(algo_name, key, environ=None, rel_path=None):
"""
The de-serialized DataFrame of an algo name and key.
Parameters
----------
algo_name: str
key: str
environ:
rel_path: str
Returns
-------
DataFrame
"""
folder = get_algo_folder(algo_name, environ)
if rel_path is not None:
folder = os.path.join(folder, rel_path)
filename = os.path.join(folder, key + '.csv')
if os.path.isfile(filename):
try:
with open(filename, 'rb') as handle:
return | pd.read_csv(handle, index_col=0, parse_dates=True) | pandas.read_csv |
import pandas as pd
def main(type):
df = pd.read_csv('./data/servant_data_'+type+'.csv')
df_event = pd.read_csv('./data/event_list_bk.csv')
event_list = tuple(df_event['事件名称'].values)
print(event_list)
df1 = df[df['强化'] == '强化前']
df2 = df[df['强化'] == '强化后']
df3 = df[df['强化'] == '未强化']
df_jp = pd.concat([df2, df3])
df_furture = pd.DataFrame()
for event in event_list:
df_temp1 = df[df['强化时间'] == event]
df_furture = pd.concat([df_furture, df_temp1])
print(df_furture)
df_enhanced = | pd.concat([df1, df2]) | pandas.concat |
# -*- coding: utf-8 -*-
import unittest
import platform
import pandas as pd
import numpy as np
import pyarrow.parquet as pq
import hpat
from hpat.tests.test_utils import (
count_array_REPs, count_parfor_REPs, count_array_OneDs, get_start_end)
from hpat.tests.gen_test_data import ParquetGenerator
from numba import types
from numba.config import IS_32BITS
from numba.errors import TypingError
_cov_corr_series = [(pd.Series(x), pd.Series(y)) for x, y in [
(
[np.nan, -2., 3., 9.1],
[np.nan, -2., 3., 5.0],
),
# TODO(quasilyte): more intricate data for complex-typed series.
# Some arguments make assert_almost_equal fail.
# Functions that yield mismaching results:
# _column_corr_impl and _column_cov_impl.
(
[complex(-2., 1.0), complex(3.0, 1.0)],
[complex(-3., 1.0), complex(2.0, 1.0)],
),
(
[complex(-2.0, 1.0), complex(3.0, 1.0)],
[1.0, -2.0],
),
(
[1.0, -4.5],
[complex(-4.5, 1.0), complex(3.0, 1.0)],
),
]]
min_float64 = np.finfo('float64').min
max_float64 = np.finfo('float64').max
test_global_input_data_float64 = [
[1., np.nan, -1., 0., min_float64, max_float64],
[np.nan, np.inf, np.NINF, np.NZERO]
]
min_int64 = np.iinfo('int64').min
max_int64 = np.iinfo('int64').max
max_uint64 = np.iinfo('uint64').max
test_global_input_data_integer64 = [
[1, -1, 0],
[min_int64, max_int64],
[max_uint64]
]
test_global_input_data_numeric = test_global_input_data_integer64 + test_global_input_data_float64
test_global_input_data_unicode_kind4 = [
'ascii',
'12345',
'1234567890',
'¡Y tú quién te crees?',
'🐍⚡',
'大处着眼,小处着手。',
]
test_global_input_data_unicode_kind1 = [
'ascii',
'12345',
'1234567890',
]
def _make_func_from_text(func_text, func_name='test_impl'):
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars[func_name]
return test_impl
def _make_func_use_binop1(operator):
func_text = "def test_impl(A, B):\n"
func_text += " return A {} B\n".format(operator)
return _make_func_from_text(func_text)
def _make_func_use_binop2(operator):
func_text = "def test_impl(A, B):\n"
func_text += " A {} B\n".format(operator)
func_text += " return A\n"
return _make_func_from_text(func_text)
def _make_func_use_method_arg1(method):
func_text = "def test_impl(A, B):\n"
func_text += " return A.{}(B)\n".format(method)
return _make_func_from_text(func_text)
GLOBAL_VAL = 2
class TestSeries(unittest.TestCase):
def test_create1(self):
def test_impl():
df = pd.DataFrame({'A': [1, 2, 3]})
return (df.A == 1).sum()
hpat_func = hpat.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
@unittest.skip('Feature request: implement Series::ctor with list(list(type))')
def test_create_list_list_unicode(self):
def test_impl():
S = pd.Series([
['abc', 'defg', 'ijk'],
['lmn', 'opq', 'rstuvwxyz']
])
return S
hpat_func = hpat.jit(test_impl)
result_ref = test_impl()
result = hpat_func()
pd.testing.assert_series_equal(result, result_ref)
@unittest.skip('Feature request: implement Series::ctor with list(list(type))')
def test_create_list_list_integer(self):
def test_impl():
S = pd.Series([
[123, 456, -789],
[-112233, 445566, 778899]
])
return S
hpat_func = hpat.jit(test_impl)
result_ref = test_impl()
result = hpat_func()
pd.testing.assert_series_equal(result, result_ref)
@unittest.skip('Feature request: implement Series::ctor with list(list(type))')
def test_create_list_list_float(self):
def test_impl():
S = pd.Series([
[1.23, -4.56, 7.89],
[11.2233, 44.5566, -778.899]
])
return S
hpat_func = hpat.jit(test_impl)
result_ref = test_impl()
result = hpat_func()
pd.testing.assert_series_equal(result, result_ref)
def test_create2(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n)})
return (df.A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
def test_create_series1(self):
def test_impl():
A = pd.Series([1, 2, 3])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_create_series_index1(self):
# create and box an indexed Series
def test_impl():
A = pd.Series([1, 2, 3], ['A', 'C', 'B'])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_create_series_index2(self):
def test_impl():
A = pd.Series([1, 2, 3], index=['A', 'C', 'B'])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_create_series_index3(self):
def test_impl():
A = pd.Series([1, 2, 3], index=['A', 'C', 'B'], name='A')
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_create_series_index4(self):
def test_impl(name):
A = pd.Series([1, 2, 3], index=['A', 'C', 'B'], name=name)
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func('A'), test_impl('A'))
def test_create_str(self):
def test_impl():
df = pd.DataFrame({'A': ['a', 'b', 'c']})
return (df.A == 'a').sum()
hpat_func = hpat.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
def test_pass_df1(self):
def test_impl(df):
return (df.A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df), test_impl(df))
def test_pass_df_str(self):
def test_impl(df):
return (df.A == 'a').sum()
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': ['a', 'b', 'c']})
self.assertEqual(hpat_func(df), test_impl(df))
def test_pass_series1(self):
# TODO: check to make sure it is series type
def test_impl(A):
return (A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_pass_series2(self):
# test creating dataframe from passed series
def test_impl(A):
df = pd.DataFrame({'A': A})
return (df.A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_pass_series_str(self):
def test_impl(A):
return (A == 'a').sum()
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': ['a', 'b', 'c']})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_pass_series_index1(self):
def test_impl(A):
return A
hpat_func = hpat.jit(test_impl)
S = pd.Series([3, 5, 6], ['a', 'b', 'c'], name='A')
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_size(self):
def test_impl(S):
return S.size
hpat_func = hpat.jit(test_impl)
n = 11
for S, expected in [
(pd.Series(), 0),
(pd.Series([]), 0),
(pd.Series(np.arange(n)), n),
(pd.Series([np.nan, 1, 2]), 3),
(pd.Series(['1', '2', '3']), 3),
]:
with self.subTest(S=S, expected=expected):
self.assertEqual(hpat_func(S), expected)
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_attr2(self):
def test_impl(A):
return A.copy().values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_attr3(self):
def test_impl(A):
return A.min()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_series_attr4(self):
def test_impl(A):
return A.cumsum().values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_argsort1(self):
def test_impl(A):
return A.argsort()
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.random.ranf(n))
pd.testing.assert_series_equal(hpat_func(A), test_impl(A))
def test_series_attr6(self):
def test_impl(A):
return A.take([2, 3]).values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_attr7(self):
def test_impl(A):
return A.astype(np.float64)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_getattr_ndim(self):
'''Verifies getting Series attribute ndim is supported'''
def test_impl(S):
return S.ndim
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_getattr_T(self):
'''Verifies getting Series attribute T is supported'''
def test_impl(S):
return S.T
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
def test_series_copy_str1(self):
def test_impl(A):
return A.copy()
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_copy_int1(self):
def test_impl(A):
return A.copy()
hpat_func = hpat.jit(test_impl)
S = pd.Series([1, 2, 3])
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
def test_series_copy_deep(self):
def test_impl(A, deep):
return A.copy(deep=deep)
hpat_func = hpat.jit(test_impl)
for S in [
pd.Series([1, 2]),
pd.Series([1, 2], index=["a", "b"]),
]:
with self.subTest(S=S):
for deep in (True, False):
with self.subTest(deep=deep):
actual = hpat_func(S, deep)
expected = test_impl(S, deep)
pd.testing.assert_series_equal(actual, expected)
self.assertEqual(actual.values is S.values, expected.values is S.values)
self.assertEqual(actual.values is S.values, not deep)
# Shallow copy of index is not supported yet
if deep:
self.assertEqual(actual.index is S.index, expected.index is S.index)
self.assertEqual(actual.index is S.index, not deep)
def test_series_astype_int_to_str1(self):
'''Verifies Series.astype implementation with function 'str' as argument
converts integer series to series of strings
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_int_to_str2(self):
'''Verifies Series.astype implementation with a string literal dtype argument
converts integer series to series of strings
'''
def test_impl(S):
return S.astype('str')
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_to_str1(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_to_str2(self):
'''Verifies Series.astype implementation with a string literal dtype argument
handles string series not changing it
'''
def test_impl(S):
return S.astype('str')
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_to_str_index_str(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'], index=['d', 'e', 'f'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_to_str_index_int(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'], index=[1, 2, 3])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('TODO: requires str(datetime64) support in Numba')
def test_series_astype_dt_to_str1(self):
'''Verifies Series.astype implementation with function 'str' as argument
converts datetime series to series of strings
'''
def test_impl(A):
return A.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series([pd.Timestamp('20130101 09:00:00'),
pd.Timestamp('20130101 09:00:02'),
pd.Timestamp('20130101 09:00:03')
])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('AssertionError: Series are different'
'[left]: [0.000000, 1.000000, 2.000000, 3.000000, ...'
'[right]: [0.0, 1.0, 2.0, 3.0, ...'
'TODO: needs alignment to NumPy on Numba side')
def test_series_astype_float_to_str1(self):
'''Verifies Series.astype implementation with function 'str' as argument
converts float series to series of strings
'''
def test_impl(A):
return A.astype(str)
hpat_func = hpat.jit(test_impl)
n = 11.0
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_int32_to_int64(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts series with dtype=int32 to series with dtype=int64
'''
def test_impl(A):
return A.astype(np.int64)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n), dtype=np.int32)
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_int_to_float64(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts integer series to series of float
'''
def test_impl(A):
return A.astype(np.float64)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_float_to_int32(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts float series to series of integers
'''
def test_impl(A):
return A.astype(np.int32)
hpat_func = hpat.jit(test_impl)
n = 11.0
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('TODO: needs Numba astype impl support string literal as dtype arg')
def test_series_astype_literal_dtype1(self):
'''Verifies Series.astype implementation with a string literal dtype argument
converts float series to series of integers
'''
def test_impl(A):
return A.astype('int32')
hpat_func = hpat.jit(test_impl)
n = 11.0
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('TODO: needs Numba astype impl support converting unicode_type to int')
def test_series_astype_str_to_int32(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts series of strings to series of integers
'''
import numba
def test_impl(A):
return A.astype(np.int32)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series([str(x) for x in np.arange(n) - n // 2])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('TODO: needs Numba astype impl support converting unicode_type to float')
def test_series_astype_str_to_float64(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts series of strings to series of float
'''
def test_impl(A):
return A.astype(np.float64)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['3.24', '1E+05', '-1', '-1.3E-01', 'nan', 'inf'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_index_str(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'], index=['a', 'b', 'c'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_index_int(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'], index=[2, 3, 5])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_np_call_on_series1(self):
def test_impl(A):
return np.min(A)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_values(self):
def test_impl(A):
return A.values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_values1(self):
def test_impl(A):
return (A == 2).values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_shape1(self):
def test_impl(A):
return A.shape
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_static_setitem_series1(self):
def test_impl(A):
A[0] = 2
return (A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_setitem_series1(self):
def test_impl(A, i):
A[i] = 2
return (A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A.copy(), 0), test_impl(df.A.copy(), 0))
def test_setitem_series2(self):
def test_impl(A, i):
A[i] = 100
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
A1 = df.A.copy()
A2 = df.A
hpat_func(A1, 0)
test_impl(A2, 0)
pd.testing.assert_series_equal(A1, A2)
@unittest.skip("enable after remove dead in hiframes is removed")
def test_setitem_series3(self):
def test_impl(A, i):
S = pd.Series(A)
S[i] = 100
hpat_func = hpat.jit(test_impl)
n = 11
A = np.arange(n)
A1 = A.copy()
A2 = A
hpat_func(A1, 0)
test_impl(A2, 0)
np.testing.assert_array_equal(A1, A2)
def test_setitem_series_bool1(self):
def test_impl(A):
A[A > 3] = 100
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
A1 = df.A.copy()
A2 = df.A
hpat_func(A1)
test_impl(A2)
pd.testing.assert_series_equal(A1, A2)
def test_setitem_series_bool2(self):
def test_impl(A, B):
A[A > 3] = B[A > 3]
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n), 'B': np.arange(n)**2})
A1 = df.A.copy()
A2 = df.A
hpat_func(A1, df.B)
test_impl(A2, df.B)
pd.testing.assert_series_equal(A1, A2)
def test_static_getitem_series1(self):
def test_impl(A):
return A[0]
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
self.assertEqual(hpat_func(A), test_impl(A))
def test_getitem_series1(self):
def test_impl(A, i):
return A[i]
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A, 0), test_impl(df.A, 0))
def test_getitem_series_str1(self):
def test_impl(A, i):
return A[i]
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': ['aa', 'bb', 'cc']})
self.assertEqual(hpat_func(df.A, 0), test_impl(df.A, 0))
def test_series_iat1(self):
def test_impl(A):
return A.iat[3]
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n)**2)
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_iat2(self):
def test_impl(A):
A.iat[3] = 1
return A
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_iloc1(self):
def test_impl(A):
return A.iloc[3]
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n)**2)
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_iloc2(self):
def test_impl(A):
return A.iloc[3:8]
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(
hpat_func(S), test_impl(S).reset_index(drop=True))
def test_series_op1(self):
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(1, n), 'B': np.ones(n - 1)})
pd.testing.assert_series_equal(hpat_func(df.A, df.B), test_impl(df.A, df.B), check_names=False)
def test_series_op2(self):
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = hpat.jit(test_impl)
n = 11
if platform.system() == 'Windows' and not IS_32BITS:
df = pd.DataFrame({'A': np.arange(1, n, dtype=np.int64)})
else:
df = pd.DataFrame({'A': np.arange(1, n)})
pd.testing.assert_series_equal(hpat_func(df.A, 1), test_impl(df.A, 1), check_names=False)
def test_series_op3(self):
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop2(operator)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(1, n), 'B': np.ones(n - 1)})
pd.testing.assert_series_equal(hpat_func(df.A, df.B), test_impl(df.A, df.B), check_names=False)
def test_series_op4(self):
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop2(operator)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(1, n)})
pd.testing.assert_series_equal(hpat_func(df.A, 1), test_impl(df.A, 1), check_names=False)
def test_series_op5(self):
arithmetic_methods = ('add', 'sub', 'mul', 'div', 'truediv', 'floordiv', 'mod', 'pow')
for method in arithmetic_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(1, n), 'B': np.ones(n - 1)})
pd.testing.assert_series_equal(hpat_func(df.A, df.B), test_impl(df.A, df.B), check_names=False)
@unittest.skipIf(platform.system() == 'Windows', 'Series values are different (20.0 %)'
'[left]: [1, 1024, 59049, 1048576, 9765625, 60466176, 282475249, 1073741824, 3486784401, 10000000000]'
'[right]: [1, 1024, 59049, 1048576, 9765625, 60466176, 282475249, 1073741824, -808182895, 1410065408]')
def test_series_op5_integer_scalar(self):
arithmetic_methods = ('add', 'sub', 'mul', 'div', 'truediv', 'floordiv', 'mod', 'pow')
for method in arithmetic_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
if platform.system() == 'Windows' and not IS_32BITS:
operand_series = pd.Series(np.arange(1, n, dtype=np.int64))
else:
operand_series = pd.Series(np.arange(1, n))
operand_scalar = 10
pd.testing.assert_series_equal(
hpat_func(operand_series, operand_scalar),
test_impl(operand_series, operand_scalar),
check_names=False)
def test_series_op5_float_scalar(self):
arithmetic_methods = ('add', 'sub', 'mul', 'div', 'truediv', 'floordiv', 'mod', 'pow')
for method in arithmetic_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
operand_series = pd.Series(np.arange(1, n))
operand_scalar = .5
pd.testing.assert_series_equal(
hpat_func(operand_series, operand_scalar),
test_impl(operand_series, operand_scalar),
check_names=False)
def test_series_op6(self):
def test_impl(A):
return -A
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(A), test_impl(A))
def test_series_op7(self):
comparison_binops = ('<', '>', '<=', '>=', '!=', '==')
for operator in comparison_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_names=False)
def test_series_op8(self):
comparison_methods = ('lt', 'gt', 'le', 'ge', 'ne', 'eq')
for method in comparison_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_names=False)
@unittest.skipIf(platform.system() == 'Windows', "Attribute dtype are different: int64, int32")
def test_series_op8_integer_scalar(self):
comparison_methods = ('lt', 'gt', 'le', 'ge', 'eq', 'ne')
for method in comparison_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
operand_series = pd.Series(np.arange(1, n))
operand_scalar = 10
pd.testing.assert_series_equal(
hpat_func(operand_series, operand_scalar),
test_impl(operand_series, operand_scalar),
check_names=False)
def test_series_op8_float_scalar(self):
comparison_methods = ('lt', 'gt', 'le', 'ge', 'eq', 'ne')
for method in comparison_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
operand_series = pd.Series(np.arange(1, n))
operand_scalar = .5
pd.testing.assert_series_equal(
hpat_func(operand_series, operand_scalar),
test_impl(operand_series, operand_scalar),
check_names=False)
def test_series_inplace_binop_array(self):
def test_impl(A, B):
A += B
return A
hpat_func = hpat.jit(test_impl)
n = 11
A = np.arange(n)**2.0 # TODO: use 2 for test int casting
B = pd.Series(np.ones(n))
np.testing.assert_array_equal(hpat_func(A.copy(), B), test_impl(A, B))
def test_series_fusion1(self):
def test_impl(A, B):
return A + B + 1
hpat_func = hpat.jit(test_impl)
n = 11
if platform.system() == 'Windows' and not IS_32BITS:
A = pd.Series(np.arange(n), dtype=np.int64)
B = pd.Series(np.arange(n)**2, dtype=np.int64)
else:
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B))
self.assertEqual(count_parfor_REPs(), 1)
def test_series_fusion2(self):
# make sure getting data var avoids incorrect single def assumption
def test_impl(A, B):
S = B + 2
if A[0] == 0:
S = A + 1
return S + B
hpat_func = hpat.jit(test_impl)
n = 11
if platform.system() == 'Windows' and not IS_32BITS:
A = pd.Series(np.arange(n), dtype=np.int64)
B = pd.Series(np.arange(n)**2, dtype=np.int64)
else:
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B))
self.assertEqual(count_parfor_REPs(), 3)
def test_series_len(self):
def test_impl(A, i):
return len(A)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A, 0), test_impl(df.A, 0))
def test_series_box(self):
def test_impl():
A = pd.Series([1, 2, 3])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_series_box2(self):
def test_impl():
A = pd.Series(['1', '2', '3'])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_series_list_str_unbox1(self):
def test_impl(A):
return A.iloc[0]
hpat_func = hpat.jit(test_impl)
S = pd.Series([['aa', 'b'], ['ccc'], []])
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
# call twice to test potential refcount errors
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
def test_np_typ_call_replace(self):
# calltype replacement is tricky for np.typ() calls since variable
# type can't provide calltype
def test_impl(i):
return np.int32(i)
hpat_func = hpat.jit(test_impl)
self.assertEqual(hpat_func(1), test_impl(1))
def test_series_ufunc1(self):
def test_impl(A, i):
return np.isinf(A).values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A, 1), test_impl(df.A, 1))
def test_list_convert(self):
def test_impl():
df = pd.DataFrame({'one': np.array([-1, np.nan, 2.5]),
'two': ['foo', 'bar', 'baz'],
'three': [True, False, True]})
return df.one.values, df.two.values, df.three.values
hpat_func = hpat.jit(test_impl)
one, two, three = hpat_func()
self.assertTrue(isinstance(one, np.ndarray))
self.assertTrue(isinstance(two, np.ndarray))
self.assertTrue(isinstance(three, np.ndarray))
@unittest.skip("needs empty_like typing fix in npydecl.py")
def test_series_empty_like(self):
def test_impl(A):
return np.empty_like(A)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertTrue(isinstance(hpat_func(df.A), np.ndarray))
def test_series_fillna1(self):
def test_impl(A):
return A.fillna(5.0)
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0]})
pd.testing.assert_series_equal(hpat_func(df.A),
test_impl(df.A), check_names=False)
# test inplace fillna for named numeric series (obtained from DataFrame)
def test_series_fillna_inplace1(self):
def test_impl(A):
A.fillna(5.0, inplace=True)
return A
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0]})
pd.testing.assert_series_equal(hpat_func(df.A),
test_impl(df.A), check_names=False)
def test_series_fillna_str1(self):
def test_impl(A):
return A.fillna("dd")
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': ['aa', 'b', None, 'ccc']})
pd.testing.assert_series_equal(hpat_func(df.A),
test_impl(df.A), check_names=False)
def test_series_fillna_str_inplace1(self):
def test_impl(A):
A.fillna("dd", inplace=True)
return A
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'ccc'])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
# TODO: handle string array reflection
# hpat_func(S1)
# test_impl(S2)
# np.testing.assert_array_equal(S1, S2)
def test_series_fillna_str_inplace_empty1(self):
def test_impl(A):
A.fillna("", inplace=True)
return A
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'ccc'])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('Unsupported functionality: failed to handle index')
def test_series_fillna_index_str(self):
def test_impl(S):
return S.fillna(5.0)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2.0, np.nan, 1.0], index=['a', 'b', 'c', 'd'])
pd.testing.assert_series_equal(hpat_func(S),
test_impl(S), check_names=False)
@unittest.skip('Unsupported functionality: failed to handle index')
def test_series_fillna_index_int(self):
def test_impl(S):
return S.fillna(5.0)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2.0, np.nan, 1.0], index=[2, 3, 4, 5])
pd.testing.assert_series_equal(hpat_func(S),
test_impl(S), check_names=False)
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'No support of axis argument in old-style Series.dropna() impl')
def test_series_dropna_axis1(self):
'''Verifies Series.dropna() implementation handles 'index' as axis argument'''
def test_impl(S):
return S.dropna(axis='index')
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'No support of axis argument in old-style Series.dropna() impl')
def test_series_dropna_axis2(self):
'''Verifies Series.dropna() implementation handles 0 as axis argument'''
def test_impl(S):
return S.dropna(axis=0)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'No support of axis argument in old-style Series.dropna() impl')
def test_series_dropna_axis3(self):
'''Verifies Series.dropna() implementation handles correct non-literal axis argument'''
def test_impl(S, axis):
return S.dropna(axis=axis)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
axis_values = [0, 'index']
for value in axis_values:
pd.testing.assert_series_equal(hpat_func(S1, value), test_impl(S2, value))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_float_index1(self):
'''Verifies Series.dropna() implementation for float series with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
for data in test_global_input_data_float64:
S1 = pd.Series(data)
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_float_index2(self):
'''Verifies Series.dropna() implementation for float series with string index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf], ['a', 'b', 'c', 'd', 'e'])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_str_index1(self):
'''Verifies Series.dropna() implementation for series of strings with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_str_index2(self):
'''Verifies Series.dropna() implementation for series of strings with string index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''], ['a', 'b', 'c', 'd', 'e'])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_str_index3(self):
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''], index=[1, 2, 5, 7, 10])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('BUG: old-style dropna impl returns series without index, in new-style inplace is unsupported')
def test_series_dropna_float_inplace_no_index1(self):
'''Verifies Series.dropna() implementation for float series with default index and inplace argument True'''
def test_impl(S):
S.dropna(inplace=True)
return S
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('TODO: add reflection support and check method return value')
def test_series_dropna_float_inplace_no_index2(self):
'''Verifies Series.dropna(inplace=True) results are reflected back in the original float series'''
def test_impl(S):
return S.dropna(inplace=True)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
self.assertIsNone(hpat_func(S1))
self.assertIsNone(test_impl(S2))
pd.testing.assert_series_equal(S1, S2)
@unittest.skip('BUG: old-style dropna impl returns series without index, in new-style inplace is unsupported')
def test_series_dropna_str_inplace_no_index1(self):
'''Verifies Series.dropna() implementation for series of strings
with default index and inplace argument True
'''
def test_impl(S):
S.dropna(inplace=True)
return S
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('TODO: add reflection support and check method return value')
def test_series_dropna_str_inplace_no_index2(self):
'''Verifies Series.dropna(inplace=True) results are reflected back in the original string series'''
def test_impl(S):
return S.dropna(inplace=True)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''])
S2 = S1.copy()
self.assertIsNone(hpat_func(S1))
self.assertIsNone(test_impl(S2))
pd.testing.assert_series_equal(S1, S2)
def test_series_dropna_str_parallel1(self):
'''Verifies Series.dropna() distributed work for series of strings with default index'''
def test_impl(A):
B = A.dropna()
return (B == 'gg').sum()
hpat_func = hpat.jit(distributed=['A'])(test_impl)
S1 = pd.Series(['aa', 'b', None, 'ccc', 'dd', 'gg'])
start, end = get_start_end(len(S1))
# TODO: gatherv
self.assertEqual(hpat_func(S1[start:end]), test_impl(S1))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
self.assertTrue(count_array_OneDs() > 0)
@unittest.skip('AssertionError: Series are different\n'
'Series length are different\n'
'[left]: 3, Int64Index([0, 1, 2], dtype=\'int64\')\n'
'[right]: 2, Int64Index([1, 2], dtype=\'int64\')')
def test_series_dropna_dt_no_index1(self):
'''Verifies Series.dropna() implementation for datetime series with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([pd.NaT, pd.Timestamp('1970-12-01'), pd.Timestamp('2012-07-25')])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
def test_series_dropna_bool_no_index1(self):
'''Verifies Series.dropna() implementation for bool series with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([True, False, False, True])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_int_no_index1(self):
'''Verifies Series.dropna() implementation for integer series with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
n = 11
S1 = pd.Series(np.arange(n, dtype=np.int64))
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('numba.errors.TypingError - fix needed\n'
'Failed in hpat mode pipeline'
'(step: convert to distributed)\n'
'Invalid use of Function(<built-in function len>)'
'with argument(s) of type(s): (none)\n')
def test_series_rename1(self):
def test_impl(A):
return A.rename('B')
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0]})
pd.testing.assert_series_equal(hpat_func(df.A), test_impl(df.A))
def test_series_sum_default(self):
def test_impl(S):
return S.sum()
hpat_func = hpat.jit(test_impl)
S = pd.Series([1., 2., 3.])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_sum_nan(self):
def test_impl(S):
return S.sum()
hpat_func = hpat.jit(test_impl)
# column with NA
S = pd.Series([np.nan, 2., 3.])
self.assertEqual(hpat_func(S), test_impl(S))
# all NA case should produce 0
S = pd.Series([np.nan, np.nan])
self.assertEqual(hpat_func(S), test_impl(S))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default, "Old style Series.sum() does not support parameters")
def test_series_sum_skipna_false(self):
def test_impl(S):
return S.sum(skipna=False)
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, 2., 3.])
self.assertEqual(np.isnan(hpat_func(S)), np.isnan(test_impl(S)))
@unittest.skipIf(not hpat.config.config_pipeline_hpat_default,
"Series.sum() operator + is not implemented yet for Numba")
def test_series_sum2(self):
def test_impl(S):
return (S + S).sum()
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, 2., 3.])
self.assertEqual(hpat_func(S), test_impl(S))
S = pd.Series([np.nan, np.nan])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_prod(self):
def test_impl(S, skipna):
return S.prod(skipna=skipna)
hpat_func = hpat.jit(test_impl)
data_samples = [
[6, 6, 2, 1, 3, 3, 2, 1, 2],
[1.1, 0.3, 2.1, 1, 3, 0.3, 2.1, 1.1, 2.2],
[6, 6.1, 2.2, 1, 3, 3, 2.2, 1, 2],
[6, 6, np.nan, 2, np.nan, 1, 3, 3, np.inf, 2, 1, 2, np.inf],
[1.1, 0.3, np.nan, 1.0, np.inf, 0.3, 2.1, np.nan, 2.2, np.inf],
[1.1, 0.3, np.nan, 1, np.inf, 0, 1.1, np.nan, 2.2, np.inf, 2, 2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.inf],
]
for data in data_samples:
S = pd.Series(data)
for skipna_var in [True, False]:
actual = hpat_func(S, skipna=skipna_var)
expected = test_impl(S, skipna=skipna_var)
if np.isnan(actual) or np.isnan(expected):
# con not compare Nan != Nan directly
self.assertEqual(np.isnan(actual), np.isnan(expected))
else:
self.assertEqual(actual, expected)
def test_series_prod_skipna_default(self):
def test_impl(S):
return S.prod()
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, 2, 3.])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_count1(self):
def test_impl(S):
return S.count()
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, 2., 3.])
self.assertEqual(hpat_func(S), test_impl(S))
S = pd.Series([np.nan, np.nan])
self.assertEqual(hpat_func(S), test_impl(S))
S = pd.Series(['aa', 'bb', np.nan])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_mean(self):
def test_impl(S):
return S.mean()
hpat_func = hpat.jit(test_impl)
data_samples = [
[6, 6, 2, 1, 3, 3, 2, 1, 2],
[1.1, 0.3, 2.1, 1, 3, 0.3, 2.1, 1.1, 2.2],
[6, 6.1, 2.2, 1, 3, 3, 2.2, 1, 2],
[6, 6, np.nan, 2, np.nan, 1, 3, 3, np.inf, 2, 1, 2, np.inf],
[1.1, 0.3, np.nan, 1.0, np.inf, 0.3, 2.1, np.nan, 2.2, np.inf],
[1.1, 0.3, np.nan, 1, np.inf, 0, 1.1, np.nan, 2.2, np.inf, 2, 2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.inf],
]
for data in data_samples:
with self.subTest(data=data):
S = pd.Series(data)
actual = hpat_func(S)
expected = test_impl(S)
if np.isnan(actual) or np.isnan(expected):
self.assertEqual(np.isnan(actual), np.isnan(expected))
else:
self.assertEqual(actual, expected)
@unittest.skipIf(hpat.config.config_pipeline_hpat_default, "Series.mean() any parameters unsupported")
def test_series_mean_skipna(self):
def test_impl(S, skipna):
return S.mean(skipna=skipna)
hpat_func = hpat.jit(test_impl)
data_samples = [
[6, 6, 2, 1, 3, 3, 2, 1, 2],
[1.1, 0.3, 2.1, 1, 3, 0.3, 2.1, 1.1, 2.2],
[6, 6.1, 2.2, 1, 3, 3, 2.2, 1, 2],
[6, 6, np.nan, 2, np.nan, 1, 3, 3, np.inf, 2, 1, 2, np.inf],
[1.1, 0.3, np.nan, 1.0, np.inf, 0.3, 2.1, np.nan, 2.2, np.inf],
[1.1, 0.3, np.nan, 1, np.inf, 0, 1.1, np.nan, 2.2, np.inf, 2, 2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.inf],
]
for skipna in [True, False]:
for data in data_samples:
S = pd.Series(data)
actual = hpat_func(S, skipna)
expected = test_impl(S, skipna)
if np.isnan(actual) or np.isnan(expected):
self.assertEqual(np.isnan(actual), np.isnan(expected))
else:
self.assertEqual(actual, expected)
def test_series_var1(self):
def test_impl(S):
return S.var()
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, 2., 3.])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_min(self):
def test_impl(S):
return S.min()
hpat_func = hpat.jit(test_impl)
# TODO type_min/type_max
for input_data in [[np.nan, 2., np.nan, 3., np.inf, 1, -1000],
[8, 31, 1123, -1024],
[2., 3., 1, -1000, np.inf]]:
S = pd.Series(input_data)
result_ref = test_impl(S)
result = hpat_func(S)
self.assertEqual(result, result_ref)
@unittest.skipIf(hpat.config.config_pipeline_hpat_default, "Series.min() any parameters unsupported")
def test_series_min_param(self):
def test_impl(S, param_skipna):
return S.min(skipna=param_skipna)
hpat_func = hpat.jit(test_impl)
for input_data, param_skipna in [([np.nan, 2., np.nan, 3., 1, -1000, np.inf], True),
([2., 3., 1, np.inf, -1000], False)]:
S = pd.Series(input_data)
result_ref = test_impl(S, param_skipna)
result = hpat_func(S, param_skipna)
self.assertEqual(result, result_ref)
def test_series_max(self):
def test_impl(S):
return S.max()
hpat_func = hpat.jit(test_impl)
# TODO type_min/type_max
for input_data in [[np.nan, 2., np.nan, 3., np.inf, 1, -1000],
[8, 31, 1123, -1024],
[2., 3., 1, -1000, np.inf]]:
S = pd.Series(input_data)
result_ref = test_impl(S)
result = hpat_func(S)
self.assertEqual(result, result_ref)
@unittest.skipIf(hpat.config.config_pipeline_hpat_default, "Series.max() any parameters unsupported")
def test_series_max_param(self):
def test_impl(S, param_skipna):
return S.max(skipna=param_skipna)
hpat_func = hpat.jit(test_impl)
for input_data, param_skipna in [([np.nan, 2., np.nan, 3., 1, -1000, np.inf], True),
([2., 3., 1, np.inf, -1000], False)]:
S = pd.Series(input_data)
result_ref = test_impl(S, param_skipna)
result = hpat_func(S, param_skipna)
self.assertEqual(result, result_ref)
def test_series_value_counts(self):
def test_impl(S):
return S.value_counts()
hpat_func = hpat.jit(test_impl)
S = pd.Series(['AA', 'BB', 'C', 'AA', 'C', 'AA'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_dist_input1(self):
'''Verify distribution of a Series without index'''
def test_impl(S):
return S.max()
hpat_func = hpat.jit(distributed={'S'})(test_impl)
n = 111
S = pd.Series(np.arange(n))
start, end = get_start_end(n)
self.assertEqual(hpat_func(S[start:end]), test_impl(S))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
def test_series_dist_input2(self):
'''Verify distribution of a Series with integer index'''
def test_impl(S):
return S.max()
hpat_func = hpat.jit(distributed={'S'})(test_impl)
n = 111
S = pd.Series(np.arange(n), 1 + np.arange(n))
start, end = get_start_end(n)
self.assertEqual(hpat_func(S[start:end]), test_impl(S))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@unittest.skip("Passed if run single")
def test_series_dist_input3(self):
'''Verify distribution of a Series with string index'''
def test_impl(S):
return S.max()
hpat_func = hpat.jit(distributed={'S'})(test_impl)
n = 111
S = pd.Series(np.arange(n), ['abc{}'.format(id) for id in range(n)])
start, end = get_start_end(n)
self.assertEqual(hpat_func(S[start:end]), test_impl(S))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
def test_series_tuple_input1(self):
def test_impl(s_tup):
return s_tup[0].max()
hpat_func = hpat.jit(test_impl)
n = 111
S = pd.Series(np.arange(n))
S2 = pd.Series(np.arange(n) + 1.0)
s_tup = (S, 1, S2)
self.assertEqual(hpat_func(s_tup), test_impl(s_tup))
@unittest.skip("pending handling of build_tuple in dist pass")
def test_series_tuple_input_dist1(self):
def test_impl(s_tup):
return s_tup[0].max()
hpat_func = hpat.jit(locals={'s_tup:input': 'distributed'})(test_impl)
n = 111
S = pd.Series(np.arange(n))
S2 = pd.Series(np.arange(n) + 1.0)
start, end = get_start_end(n)
s_tup = (S, 1, S2)
h_s_tup = (S[start:end], 1, S2[start:end])
self.assertEqual(hpat_func(h_s_tup), test_impl(s_tup))
def test_series_rolling1(self):
def test_impl(S):
return S.rolling(3).sum()
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2., 3., 4., 5.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_concat1(self):
def test_impl(S1, S2):
return pd.concat([S1, S2]).values
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2., 3., 4., 5.])
S2 = pd.Series([6., 7.])
np.testing.assert_array_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_map1(self):
def test_impl(S):
return S.map(lambda a: 2 * a)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2., 3., 4., 5.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_map_global1(self):
def test_impl(S):
return S.map(lambda a: a + GLOBAL_VAL)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2., 3., 4., 5.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_map_tup1(self):
def test_impl(S):
return S.map(lambda a: (a, 2 * a))
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2., 3., 4., 5.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_map_tup_map1(self):
def test_impl(S):
A = S.map(lambda a: (a, 2 * a))
return A.map(lambda a: a[1])
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2., 3., 4., 5.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_combine(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2., 3., 4., 5.])
S2 = pd.Series([6.0, 21., 3.6, 5.])
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_combine_float3264(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([np.float64(1), np.float64(2),
np.float64(3), np.float64(4), np.float64(5)])
S2 = pd.Series([np.float32(1), np.float32(2),
np.float32(3), np.float32(4), np.float32(5)])
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_combine_assert1(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1, 2, 3])
S2 = pd.Series([6., 21., 3., 5.])
with self.assertRaises(AssertionError):
hpat_func(S1, S2)
def test_series_combine_assert2(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([6., 21., 3., 5.])
S2 = pd.Series([1, 2, 3])
with self.assertRaises(AssertionError):
hpat_func(S1, S2)
def test_series_combine_integer(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b, 16)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1, 2, 3, 4, 5])
S2 = pd.Series([6, 21, 3, 5])
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_combine_different_types(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([6.1, 21.2, 3.3, 5.4, 6.7])
S2 = pd.Series([1, 2, 3, 4, 5])
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_combine_integer_samelen(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1, 2, 3, 4, 5])
S2 = pd.Series([6, 21, 17, -5, 4])
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_combine_samelen(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2., 3., 4., 5.])
S2 = pd.Series([6.0, 21., 3.6, 5., 0.0])
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_combine_value(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b, 1237.56)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2., 3., 4., 5.])
S2 = pd.Series([6.0, 21., 3.6, 5.])
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_combine_value_samelen(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b, 1237.56)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2., 3., 4., 5.])
S2 = pd.Series([6.0, 21., 3.6, 5., 0.0])
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_apply1(self):
def test_impl(S):
return S.apply(lambda a: 2 * a)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2., 3., 4., 5.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_abs1(self):
def test_impl(S):
return S.abs()
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, -2., 3., 0.5E-01, 0xFF, 0o7, 0b101])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_cov1(self):
def test_impl(S1, S2):
return S1.cov(S2)
hpat_func = hpat.jit(test_impl)
for pair in _cov_corr_series:
S1, S2 = pair
np.testing.assert_almost_equal(
hpat_func(S1, S2), test_impl(S1, S2),
err_msg='S1={}\nS2={}'.format(S1, S2))
def test_series_corr1(self):
def test_impl(S1, S2):
return S1.corr(S2)
hpat_func = hpat.jit(test_impl)
for pair in _cov_corr_series:
S1, S2 = pair
np.testing.assert_almost_equal(
hpat_func(S1, S2), test_impl(S1, S2),
err_msg='S1={}\nS2={}'.format(S1, S2))
def test_series_str_len1(self):
def test_impl(S):
return S.str.len()
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'abc', 'c', 'cccd'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_str2str(self):
str2str_methods = ('capitalize', 'lower', 'lstrip', 'rstrip',
'strip', 'swapcase', 'title', 'upper')
for method in str2str_methods:
func_text = "def test_impl(S):\n"
func_text += " return S.str.{}()\n".format(method)
test_impl = _make_func_from_text(func_text)
hpat_func = hpat.jit(test_impl)
S = pd.Series([' \tbbCD\t ', 'ABC', ' mCDm\t', 'abc'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_append1(self):
def test_impl(S, other):
return S.append(other).values
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([-2., 3., 9.1])
S2 = pd.Series([-2., 5.0])
# Test single series
np.testing.assert_array_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_append2(self):
def test_impl(S1, S2, S3):
return S1.append([S2, S3]).values
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([-2., 3., 9.1])
S2 = pd.Series([-2., 5.0])
S3 = pd.Series([1.0])
# Test series tuple
np.testing.assert_array_equal(hpat_func(S1, S2, S3),
test_impl(S1, S2, S3))
def test_series_isin_list1(self):
def test_impl(S, values):
return S.isin(values)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
values = [1, 2, 5, 7, 8]
pd.testing.assert_series_equal(hpat_func(S, values), test_impl(S, values))
def test_series_isin_list2(self):
def test_impl(S, values):
return S.isin(values)
hpat_func = hpat.jit(test_impl)
n = 11.0
S = pd.Series(np.arange(n))
values = [1., 2., 5., 7., 8.]
pd.testing.assert_series_equal(hpat_func(S, values), test_impl(S, values))
def test_series_isin_list3(self):
def test_impl(S, values):
return S.isin(values)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['a', 'b', 'q', 'w', 'c', 'd', 'e', 'r'])
values = ['a', 'q', 'c', 'd', 'e']
pd.testing.assert_series_equal(hpat_func(S, values), test_impl(S, values))
def test_series_isin_set1(self):
def test_impl(S, values):
return S.isin(values)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
values = {1, 2, 5, 7, 8}
pd.testing.assert_series_equal(hpat_func(S, values), test_impl(S, values))
def test_series_isin_set2(self):
def test_impl(S, values):
return S.isin(values)
hpat_func = hpat.jit(test_impl)
n = 11.0
S = pd.Series(np.arange(n))
values = {1., 2., 5., 7., 8.}
pd.testing.assert_series_equal(hpat_func(S, values), test_impl(S, values))
@unittest.skip('TODO: requires hashable unicode strings in Numba')
def test_series_isin_set3(self):
def test_impl(S, values):
return S.isin(values)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['a', 'b', 'c', 'd', 'e'] * 2)
values = {'b', 'c', 'e'}
pd.testing.assert_series_equal(hpat_func(S, values), test_impl(S, values))
def test_series_isna1(self):
def test_impl(S):
return S.isna()
hpat_func = hpat.jit(test_impl)
# column with NA
S = pd.Series([np.nan, 2., 3., np.inf])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_isnull1(self):
def test_impl(S):
return S.isnull()
hpat_func = hpat.jit(test_impl)
# column with NA
S = pd.Series([np.nan, 2., 3.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_isnull_full(self):
def test_impl(series):
return series.isnull()
hpat_func = hpat.jit(test_impl)
for data in test_global_input_data_numeric + [test_global_input_data_unicode_kind4]:
series = pd.Series(data * 3)
ref_result = test_impl(series)
jit_result = hpat_func(series)
pd.testing.assert_series_equal(ref_result, jit_result)
def test_series_notna1(self):
def test_impl(S):
return S.notna()
hpat_func = hpat.jit(test_impl)
# column with NA
S = pd.Series([np.nan, 2., 3.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_notna_noidx_float(self):
def test_impl(S):
return S.notna()
hpat_func = hpat.jit(test_impl)
for input_data in test_global_input_data_float64:
S = pd.Series(input_data)
result_ref = test_impl(S)
result_jit = hpat_func(S)
pd.testing.assert_series_equal(result_jit, result_ref)
@unittest.skip("Need fix test_global_input_data_integer64")
def test_series_notna_noidx_int(self):
def test_impl(S):
return S.notna()
hpat_func = hpat.jit(test_impl)
for input_data in test_global_input_data_integer64:
S = pd.Series(input_data)
result_ref = test_impl(S)
result_jit = hpat_func(S)
pd.testing.assert_series_equal(result_jit, result_ref)
@unittest.skip("Need fix test_global_input_data_integer64")
def test_series_notna_noidx_num(self):
def test_impl(S):
return S.notna()
hpat_func = hpat.jit(test_impl)
for input_data in test_global_input_data_numeric:
S = pd.Series(input_data)
result_ref = test_impl(S)
result_jit = hpat_func(S)
pd.testing.assert_series_equal(result_jit, result_ref)
def test_series_notna_noidx_str(self):
def test_impl(S):
return S.notna()
hpat_func = hpat.jit(test_impl)
input_data = test_global_input_data_unicode_kind4
S = pd.Series(input_data)
result_ref = test_impl(S)
result_jit = hpat_func(S)
pd.testing.assert_series_equal(result_jit, result_ref)
def test_series_str_notna(self):
def test_impl(S):
return S.notna()
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', None, 'c', 'cccd'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_str_isna1(self):
def test_impl(S):
return S.isna()
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', None, 'c', 'cccd'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('AssertionError: Series are different')
def test_series_dt_isna1(self):
def test_impl(S):
return S.isna()
hpat_func = hpat.jit(test_impl)
S = pd.Series([pd.NaT, pd.Timestamp('1970-12-01'), pd.Timestamp('2012-07-25')])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_nlargest1(self):
def test_impl(S):
return S.nlargest(4)
hpat_func = hpat.jit(test_impl)
m = 100
np.random.seed(0)
S = pd.Series(np.random.randint(-30, 30, m))
np.testing.assert_array_equal(hpat_func(S).values, test_impl(S).values)
def test_series_nlargest_default1(self):
def test_impl(S):
return S.nlargest()
hpat_func = hpat.jit(test_impl)
m = 100
np.random.seed(0)
S = pd.Series(np.random.randint(-30, 30, m))
np.testing.assert_array_equal(hpat_func(S).values, test_impl(S).values)
def test_series_nlargest_nan1(self):
def test_impl(S):
return S.nlargest(4)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, np.nan, 3.0, 2.0, np.nan, 4.0])
np.testing.assert_array_equal(hpat_func(S).values, test_impl(S).values)
def test_series_nlargest_parallel1(self):
# create `kde.parquet` file
ParquetGenerator.gen_kde_pq()
def test_impl():
df = pq.read_table('kde.parquet').to_pandas()
S = df.points
return S.nlargest(4)
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func().values, test_impl().values)
@unittest.skip('Unsupported functionality: failed to handle index')
def test_series_nlargest_index_str(self):
def test_impl(S):
return S.nlargest(4)
hpat_func = hpat.jit(test_impl)
S = pd.Series([73, 21, 10005, 5, 1], index=['a', 'b', 'c', 'd', 'e'])
np.testing.assert_array_equal(hpat_func(S).values, test_impl(S).values)
@unittest.skip('Unsupported functionality: failed to handle index')
def test_series_nlargest_index_int(self):
def test_impl(S):
return S.nlargest(4)
hpat_func = hpat.jit(test_impl)
S = pd.Series([73, 21, 10005, 5, 1], index=[2, 3, 4, 5, 6])
np.testing.assert_array_equal(hpat_func(S).values, test_impl(S).values)
def test_series_nsmallest1(self):
def test_impl(S):
return S.nsmallest(4)
hpat_func = hpat.jit(test_impl)
m = 100
np.random.seed(0)
S = pd.Series(np.random.randint(-30, 30, m))
np.testing.assert_array_equal(hpat_func(S).values, test_impl(S).values)
def test_series_nsmallest_default1(self):
def test_impl(S):
return S.nsmallest()
hpat_func = hpat.jit(test_impl)
m = 100
np.random.seed(0)
S = pd.Series(np.random.randint(-30, 30, m))
np.testing.assert_array_equal(hpat_func(S).values, test_impl(S).values)
def test_series_nsmallest_nan1(self):
def test_impl(S):
return S.nsmallest(4)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, np.nan, 3.0, 2.0, np.nan, 4.0])
np.testing.assert_array_equal(hpat_func(S).values, test_impl(S).values)
def test_series_nsmallest_parallel1(self):
# create `kde.parquet` file
ParquetGenerator.gen_kde_pq()
def test_impl():
df = pq.read_table('kde.parquet').to_pandas()
S = df.points
return S.nsmallest(4)
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func().values, test_impl().values)
@unittest.skip('Unsupported functionality: failed to handle index')
def test_series_nsmallest_index_str(self):
def test_impl(S):
return S.nsmallest(3)
hpat_func = hpat.jit(test_impl)
S = pd.Series([41, 32, 33, 4, 5], index=['a', 'b', 'c', 'd', 'e'])
np.testing.assert_array_equal(hpat_func(S).values, test_impl(S).values)
@unittest.skip('Unsupported functionality: failed to handle index')
def test_series_nsmallest_index_int(self):
def test_impl(S):
return S.nsmallest(3)
hpat_func = hpat.jit(test_impl)
S = pd.Series([41, 32, 33, 4, 5], index=[1, 2, 3, 4, 5])
np.testing.assert_array_equal(hpat_func(S).values, test_impl(S).values)
def test_series_head1(self):
def test_impl(S):
return S.head(4)
hpat_func = hpat.jit(test_impl)
m = 100
np.random.seed(0)
S = pd.Series(np.random.randint(-30, 30, m))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_head_default1(self):
'''Verifies default head method for non-distributed pass of Series with no index'''
def test_impl(S):
return S.head()
hpat_func = hpat.jit(test_impl)
m = 100
np.random.seed(0)
S = pd.Series(np.random.randint(-30, 30, m))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_head_index1(self):
'''Verifies head method for Series with integer index created inside jitted function'''
def test_impl():
S = pd.Series([6, 9, 2, 3, 6, 4, 5], [8, 1, 6, 0, 9, 1, 3])
return S.head(3)
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_series_head_index2(self):
'''Verifies head method for Series with string index created inside jitted function'''
def test_impl():
S = pd.Series([6, 9, 2, 3, 6, 4, 5], ['a', 'ab', 'abc', 'c', 'f', 'hh', ''])
return S.head(3)
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_series_head_index3(self):
'''Verifies head method for non-distributed pass of Series with integer index'''
def test_impl(S):
return S.head(3)
hpat_func = hpat.jit(test_impl)
S = pd.Series([6, 9, 2, 3, 6, 4, 5], [8, 1, 6, 0, 9, 1, 3])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip("Passed if run single")
def test_series_head_index4(self):
'''Verifies head method for non-distributed pass of Series with string index'''
def test_impl(S):
return S.head(3)
hpat_func = hpat.jit(test_impl)
S = pd.Series([6, 9, 2, 4, 6, 4, 5], ['a', 'ab', 'abc', 'c', 'f', 'hh', ''])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_head_parallel1(self):
'''Verifies head method for distributed Series with string data and no index'''
def test_impl(S):
return S.head(7)
hpat_func = hpat.jit(distributed={'S'})(test_impl)
# need to test different lenghts, as head's size is fixed and implementation
# depends on relation of size of the data per processor to output data size
for n in range(1, 5):
S = pd.Series(['a', 'ab', 'abc', 'c', 'f', 'hh', ''] * n)
start, end = get_start_end(len(S))
pd.testing.assert_series_equal(hpat_func(S[start:end]), test_impl(S))
self.assertTrue(count_array_OneDs() > 0)
def test_series_head_index_parallel1(self):
'''Verifies head method for distributed Series with integer index'''
def test_impl(S):
return S.head(3)
hpat_func = hpat.jit(distributed={'S'})(test_impl)
S = pd.Series([6, 9, 2, 3, 6, 4, 5], [8, 1, 6, 0, 9, 1, 3])
start, end = get_start_end(len(S))
pd.testing.assert_series_equal(hpat_func(S[start:end]), test_impl(S))
self.assertTrue(count_array_OneDs() > 0)
@unittest.skip("Passed if run single")
def test_series_head_index_parallel2(self):
'''Verifies head method for distributed Series with string index'''
def test_impl(S):
return S.head(3)
hpat_func = hpat.jit(distributed={'S'})(test_impl)
S = pd.Series([6, 9, 2, 3, 6, 4, 5], ['a', 'ab', 'abc', 'c', 'f', 'hh', ''])
start, end = get_start_end(len(S))
pd.testing.assert_series_equal(hpat_func(S[start:end]), test_impl(S))
self.assertTrue(count_array_OneDs() > 0)
def test_series_head_noidx_float(self):
def test_impl(S, n):
return S.head(n)
hpat_func = hpat.jit(test_impl)
for input_data in test_global_input_data_float64:
S = pd.Series(input_data)
for n in [-1, 0, 2, 3]:
result_ref = test_impl(S, n)
result_jit = hpat_func(S, n)
pd.testing.assert_series_equal(result_jit, result_ref)
@unittest.skip("Need fix test_global_input_data_integer64")
def test_series_head_noidx_int(self):
def test_impl(S, n):
return S.head(n)
hpat_func = hpat.jit(test_impl)
for input_data in test_global_input_data_integer64:
S = pd.Series(input_data)
for n in [-1, 0, 2, 3]:
result_ref = test_impl(S, n)
result_jit = hpat_func(S, n)
pd.testing.assert_series_equal(result_jit, result_ref)
@unittest.skip("Need fix test_global_input_data_integer64")
def test_series_head_noidx_num(self):
def test_impl(S, n):
return S.head(n)
hpat_func = hpat.jit(test_impl)
for input_data in test_global_input_data_numeric:
S = pd.Series(input_data)
for n in [-1, 0, 2, 3]:
result_ref = test_impl(S, n)
result_jit = hpat_func(S, n)
pd.testing.assert_series_equal(result_jit, result_ref)
@unittest.skip("Old implementation not work with n negative and data str")
def test_series_head_noidx_str(self):
def test_impl(S, n):
return S.head(n)
hpat_func = hpat.jit(test_impl)
input_data = test_global_input_data_unicode_kind4
S = pd.Series(input_data)
for n in [-1, 0, 2, 3]:
result_ref = test_impl(S, n)
result_jit = hpat_func(S, n)
pd.testing.assert_series_equal(result_jit, result_ref)
@unittest.skip("Broke another three tests")
def test_series_head_idx(self):
def test_impl(S):
return S.head()
def test_impl_param(S, n):
return S.head(n)
hpat_func = hpat.jit(test_impl)
data_test = [[6, 6, 2, 1, 3, 3, 2, 1, 2],
[1.1, 0.3, 2.1, 1, 3, 0.3, 2.1, 1.1, 2.2],
[6, 6.1, 2.2, 1, 3, 0, 2.2, 1, 2],
['as', 'b', 'abb', 'sss', 'ytr65', '', 'qw', 'a', 'b'],
[6, 6, 2, 1, 3, np.inf, np.nan, np.nan, np.nan],
[3., 5.3, np.nan, np.nan, np.inf, np.inf, 4.4, 3.7, 8.9]
]
for input_data in data_test:
for index_data in data_test:
S = pd.Series(input_data, index_data)
result_ref = test_impl(S)
result = hpat_func(S)
pd.testing.assert_series_equal(result, result_ref)
hpat_func_param1 = hpat.jit(test_impl_param)
for param1 in [1, 3, 7]:
result_param1_ref = test_impl_param(S, param1)
result_param1 = hpat_func_param1(S, param1)
pd.testing.assert_series_equal(result_param1, result_param1_ref)
def test_series_median1(self):
'''Verifies median implementation for float and integer series of random data'''
def test_impl(S):
return S.median()
hpat_func = hpat.jit(test_impl)
m = 100
np.random.seed(0)
S = pd.Series(np.random.randint(-30, 30, m))
self.assertEqual(hpat_func(S), test_impl(S))
S = pd.Series(np.random.ranf(m))
self.assertEqual(hpat_func(S), test_impl(S))
# odd size
m = 101
S = pd.Series(np.random.randint(-30, 30, m))
self.assertEqual(hpat_func(S), test_impl(S))
S = pd.Series(np.random.ranf(m))
self.assertEqual(hpat_func(S), test_impl(S))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
"BUG: old-style median implementation doesn't filter NaNs")
def test_series_median_skipna_default1(self):
'''Verifies median implementation with default skipna=True argument on a series with NA values'''
def test_impl(S):
return S.median()
hpat_func = hpat.jit(test_impl)
S = pd.Series([2., 3., 5., np.nan, 5., 6., 7.])
self.assertEqual(hpat_func(S), test_impl(S))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
"Skipna argument is not supported in old-style")
def test_series_median_skipna_false1(self):
'''Verifies median implementation with skipna=False on a series with NA values'''
def test_impl(S):
return S.median(skipna=False)
hpat_func = hpat.jit(test_impl)
# np.inf is not NaN, so verify that a correct number is returned
S1 = pd.Series([2., 3., 5., np.inf, 5., 6., 7.])
self.assertEqual(hpat_func(S1), test_impl(S1))
# TODO: both return values are 'nan', but HPAT's is not np.nan, hence checking with
# assertIs() doesn't work - check if it's Numba relatated
S2 = pd.Series([2., 3., 5., np.nan, 5., 6., 7.])
self.assertEqual(np.isnan(hpat_func(S2)), np.isnan(test_impl(S2)))
def test_series_median_parallel1(self):
# create `kde.parquet` file
ParquetGenerator.gen_kde_pq()
def test_impl():
df = pq.read_table('kde.parquet').to_pandas()
S = df.points
return S.median()
hpat_func = hpat.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
self.assertTrue(count_array_OneDs() > 0)
def test_series_argsort_parallel(self):
# create `kde.parquet` file
ParquetGenerator.gen_kde_pq()
def test_impl():
df = pq.read_table('kde.parquet').to_pandas()
S = df.points
return S.argsort().values
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func(), test_impl())
def test_series_idxmin1(self):
def test_impl(A):
return A.idxmin()
hpat_func = hpat.jit(test_impl)
n = 11
np.random.seed(0)
S = pd.Series(np.random.ranf(n))
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
def test_series_idxmin_str(self):
def test_impl(S):
return S.idxmin()
hpat_func = hpat.jit(test_impl)
S = pd.Series([8, 6, 34, np.nan], ['a', 'ab', 'abc', 'c'])
self.assertEqual(hpat_func(S), test_impl(S))
@unittest.skip("Skipna is not implemented")
def test_series_idxmin_str_idx(self):
def test_impl(S):
return S.idxmin(skipna=False)
hpat_func = hpat.jit(test_impl)
S = pd.Series([8, 6, 34, np.nan], ['a', 'ab', 'abc', 'c'])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_idxmin_no(self):
def test_impl(S):
return S.idxmin()
hpat_func = hpat.jit(test_impl)
S = pd.Series([8, 6, 34, np.nan])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_idxmin_int(self):
def test_impl(S):
return S.idxmin()
hpat_func = hpat.jit(test_impl)
S = pd.Series([1, 2, 3], [4, 45, 14])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_idxmin_noidx(self):
def test_impl(S):
return S.idxmin()
hpat_func = hpat.jit(test_impl)
data_test = [[6, 6, 2, 1, 3, 3, 2, 1, 2],
[1.1, 0.3, 2.1, 1, 3, 0.3, 2.1, 1.1, 2.2],
[6, 6.1, 2.2, 1, 3, 0, 2.2, 1, 2],
[6, 6, 2, 1, 3, np.inf, np.nan, np.nan, np.nan],
[3., 5.3, np.nan, np.nan, np.inf, np.inf, 4.4, 3.7, 8.9]
]
for input_data in data_test:
S = pd.Series(input_data)
result_ref = test_impl(S)
result = hpat_func(S)
self.assertEqual(result, result_ref)
def test_series_idxmin_idx(self):
def test_impl(S):
return S.idxmin()
hpat_func = hpat.jit(test_impl)
data_test = [[6, 6, 2, 1, 3, 3, 2, 1, 2],
[1.1, 0.3, 2.1, 1, 3, 0.3, 2.1, 1.1, 2.2],
[6, 6.1, 2.2, 1, 3, 0, 2.2, 1, 2],
[6, 6, 2, 1, 3, -np.inf, np.nan, np.inf, np.nan],
[3., 5.3, np.nan, np.nan, np.inf, np.inf, 4.4, 3.7, 8.9]
]
for input_data in data_test:
for index_data in data_test:
S = pd.Series(input_data, index_data)
result_ref = test_impl(S)
result = hpat_func(S)
if np.isnan(result) or np.isnan(result_ref):
self.assertEqual(np.isnan(result), np.isnan(result_ref))
else:
self.assertEqual(result, result_ref)
def test_series_idxmax1(self):
def test_impl(A):
return A.idxmax()
hpat_func = hpat.jit(test_impl)
n = 11
np.random.seed(0)
S = pd.Series(np.random.ranf(n))
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
@unittest.skip("Skipna is not implemented")
def test_series_idxmax_str_idx(self):
def test_impl(S):
return S.idxmax(skipna=False)
hpat_func = hpat.jit(test_impl)
S = pd.Series([8, 6, 34, np.nan], ['a', 'ab', 'abc', 'c'])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_idxmax_noidx(self):
def test_impl(S):
return S.idxmax()
hpat_func = hpat.jit(test_impl)
data_test = [[6, 6, 2, 1, 3, 3, 2, 1, 2],
[1.1, 0.3, 2.1, 1, 3, 0.3, 2.1, 1.1, 2.2],
[6, 6.1, 2.2, 1, 3, 0, 2.2, 1, 2],
[6, 6, 2, 1, 3, np.inf, np.nan, np.inf, np.nan],
[3., 5.3, np.nan, np.nan, np.inf, np.inf, 4.4, 3.7, 8.9]
]
for input_data in data_test:
S = pd.Series(input_data)
result_ref = test_impl(S)
result = hpat_func(S)
self.assertEqual(result, result_ref)
def test_series_idxmax_idx(self):
def test_impl(S):
return S.idxmax()
hpat_func = hpat.jit(test_impl)
data_test = [[6, 6, 2, 1, 3, 3, 2, 1, 2],
[1.1, 0.3, 2.1, 1, 3, 0.3, 2.1, 1.1, 2.2],
[6, 6.1, 2.2, 1, 3, 0, 2.2, 1, 2],
[6, 6, 2, 1, 3, np.nan, np.nan, np.nan, np.nan],
[3., 5.3, np.nan, np.nan, np.inf, np.inf, 4.4, 3.7, 8.9]
]
for input_data in data_test:
for index_data in data_test:
S = pd.Series(input_data, index_data)
result_ref = test_impl(S)
result = hpat_func(S)
if np.isnan(result) or np.isnan(result_ref):
self.assertEqual(np.isnan(result), np.isnan(result_ref))
else:
self.assertEqual(result, result_ref)
def test_series_sort_values1(self):
def test_impl(A):
return A.sort_values()
hpat_func = hpat.jit(test_impl)
n = 11
np.random.seed(0)
S = pd.Series(np.random.ranf(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_sort_values_index1(self):
def test_impl(A, B):
S = pd.Series(A, B)
return S.sort_values()
hpat_func = hpat.jit(test_impl)
n = 11
np.random.seed(0)
# TODO: support passing Series with Index
# S = pd.Series(np.random.ranf(n), np.random.randint(0, 100, n))
A = np.random.ranf(n)
B = np.random.ranf(n)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B))
def test_series_sort_values_parallel1(self):
# create `kde.parquet` file
ParquetGenerator.gen_kde_pq()
def test_impl():
df = pq.read_table('kde.parquet').to_pandas()
S = df.points
return S.sort_values()
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func(), test_impl())
def test_series_shift(self):
def pyfunc():
series = pd.Series([1.0, np.nan, -1.0, 0.0, 5e-324])
return series.shift()
cfunc = hpat.jit(pyfunc)
pd.testing.assert_series_equal(cfunc(), pyfunc())
def test_series_shift_unboxing(self):
def pyfunc(series):
return series.shift()
cfunc = hpat.jit(pyfunc)
for data in test_global_input_data_float64:
series = pd.Series(data)
pd.testing.assert_series_equal(cfunc(series), pyfunc(series))
def test_series_shift_full(self):
def pyfunc(series, periods, freq, axis, fill_value):
return series.shift(periods=periods, freq=freq, axis=axis, fill_value=fill_value)
cfunc = hpat.jit(pyfunc)
freq = None
axis = 0
for data in test_global_input_data_float64:
series = pd.Series(data)
for periods in [-2, 0, 3]:
for fill_value in [9.1, np.nan, -3.3, None]:
jit_result = cfunc(series, periods, freq, axis, fill_value)
ref_result = pyfunc(series, periods, freq, axis, fill_value)
pd.testing.assert_series_equal(jit_result, ref_result)
def test_series_shift_str(self):
def pyfunc(series):
return series.shift()
cfunc = hpat.jit(pyfunc)
series = pd.Series(test_global_input_data_unicode_kind4)
with self.assertRaises(TypingError) as raises:
cfunc(series)
msg = 'Method shift(). The object must be a number. Given self.data.dtype: {}'
self.assertIn(msg.format(types.unicode_type), str(raises.exception))
def test_series_shift_fill_str(self):
def pyfunc(series, fill_value):
return series.shift(fill_value=fill_value)
cfunc = hpat.jit(pyfunc)
series = pd.Series(test_global_input_data_float64[0])
with self.assertRaises(TypingError) as raises:
cfunc(series, fill_value='unicode')
msg = 'Method shift(). The object must be a number. Given fill_value: {}'
self.assertIn(msg.format(types.unicode_type), str(raises.exception))
def test_series_shift_unsupported_params(self):
def pyfunc(series, freq, axis):
return series.shift(freq=freq, axis=axis)
cfunc = hpat.jit(pyfunc)
series = pd.Series(test_global_input_data_float64[0])
with self.assertRaises(TypingError) as raises:
cfunc(series, freq='12H', axis=0)
msg = 'Method shift(). Unsupported parameters. Given freq: {}'
self.assertIn(msg.format(types.unicode_type), str(raises.exception))
with self.assertRaises(TypingError) as raises:
cfunc(series, freq=None, axis=1)
msg = 'Method shift(). Unsupported parameters. Given axis != 0'
self.assertIn(msg, str(raises.exception))
@unittest.skip('Unsupported functionality: failed to handle index')
def test_series_shift_index_str(self):
def test_impl(S):
return S.shift()
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, 2., 3., 5., np.nan, 6., 7.], index=['a', 'b', 'c', 'd', 'e', 'f', 'g'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('Unsupported functionality: failed to handle index')
def test_series_shift_index_int(self):
def test_impl(S):
return S.shift()
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, 2., 3., 5., np.nan, 6., 7.], index=[1, 2, 3, 4, 5, 6, 7])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_index1(self):
def test_impl():
A = pd.Series([1, 2, 3], index=['A', 'C', 'B'])
return A.index
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func(), test_impl())
def test_series_index2(self):
def test_impl():
A = pd.Series([1, 2, 3], index=[0, 1, 2])
return A.index
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func(), test_impl())
def test_series_index3(self):
def test_impl():
A = pd.Series([1, 2, 3])
return A.index
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func(), test_impl())
def test_series_take_index_default(self):
def pyfunc():
series = pd.Series([1.0, 13.0, 9.0, -1.0, 7.0])
indices = [1, 3]
return series.take(indices)
cfunc = hpat.jit(pyfunc)
ref_result = pyfunc()
result = cfunc()
pd.testing.assert_series_equal(ref_result, result)
def test_series_take_index_default_unboxing(self):
def pyfunc(series, indices):
return series.take(indices)
cfunc = hpat.jit(pyfunc)
series = pd.Series([1.0, 13.0, 9.0, -1.0, 7.0])
indices = [1, 3]
ref_result = pyfunc(series, indices)
result = cfunc(series, indices)
pd.testing.assert_series_equal(ref_result, result)
def test_series_take_index_int(self):
def pyfunc():
series = pd.Series([1.0, 13.0, 9.0, -1.0, 7.0], index=[3, 0, 4, 2, 1])
indices = [1, 3]
return series.take(indices)
cfunc = hpat.jit(pyfunc)
ref_result = pyfunc()
result = cfunc()
pd.testing.assert_series_equal(ref_result, result)
def test_series_take_index_int_unboxing(self):
def pyfunc(series, indices):
return series.take(indices)
cfunc = hpat.jit(pyfunc)
series = pd.Series([1.0, 13.0, 9.0, -1.0, 7.0], index=[3, 0, 4, 2, 1])
indices = [1, 3]
ref_result = pyfunc(series, indices)
result = cfunc(series, indices)
pd.testing.assert_series_equal(ref_result, result)
def test_series_take_index_str(self):
def pyfunc():
series = pd.Series([1.0, 13.0, 9.0, -1.0, 7.0], index=['test', 'series', 'take', 'str', 'index'])
indices = [1, 3]
return series.take(indices)
cfunc = hpat.jit(pyfunc)
ref_result = pyfunc()
result = cfunc()
pd.testing.assert_series_equal(ref_result, result)
def test_series_take_index_str_unboxing(self):
def pyfunc(series, indices):
return series.take(indices)
cfunc = hpat.jit(pyfunc)
series = pd.Series([1.0, 13.0, 9.0, -1.0, 7.0], index=['test', 'series', 'take', 'str', 'index'])
indices = [1, 3]
ref_result = pyfunc(series, indices)
result = cfunc(series, indices)
pd.testing.assert_series_equal(ref_result, result)
def test_series_iterator_int(self):
def test_impl(A):
return [i for i in A]
A = pd.Series([3, 2, 1, 5, 4])
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func(A), test_impl(A))
def test_series_iterator_float(self):
def test_impl(A):
return [i for i in A]
A = pd.Series([0.3, 0.2222, 0.1756, 0.005, 0.4])
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func(A), test_impl(A))
def test_series_iterator_boolean(self):
def test_impl(A):
return [i for i in A]
A = pd.Series([True, False])
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func(A), test_impl(A))
def test_series_iterator_string(self):
def test_impl(A):
return [i for i in A]
A = pd.Series(['a', 'ab', 'abc', '', 'dddd'])
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func(A), test_impl(A))
def test_series_iterator_one_value(self):
def test_impl(A):
return [i for i in A]
A = pd.Series([5])
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func(A), test_impl(A))
@unittest.skip("Fails when NUMA_PES>=2 due to unimplemented sync of such construction after distribution")
def test_series_iterator_no_param(self):
def test_impl():
A = pd.Series([3, 2, 1, 5, 4])
return [i for i in A]
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func(), test_impl())
def test_series_iterator_empty(self):
def test_impl(A):
return [i for i in A]
A = pd.Series([np.int64(x) for x in range(0)])
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func(A), test_impl(A))
def test_series_default_index(self):
def test_impl():
A = pd.Series([3, 2, 1, 5, 4])
return A.index
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func(), test_impl())
@unittest.skip("Implement drop_duplicates for Series")
def test_series_drop_duplicates(self):
def test_impl():
A = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'])
return A.drop_duplicates()
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_series_quantile(self):
def test_impl():
A = pd.Series([1, 2.5, .5, 3, 5])
return A.quantile()
hpat_func = hpat.jit(test_impl)
np.testing.assert_equal(hpat_func(), test_impl())
@unittest.skipIf(hpat.config.config_pipeline_hpat_default, "Series.quantile() parameter as a list unsupported")
def test_series_quantile_q_vector(self):
def test_series_quantile_q_vector_impl(S, param1):
return S.quantile(param1)
S = pd.Series(np.random.ranf(100))
hpat_func = hpat.jit(test_series_quantile_q_vector_impl)
param1 = [0.0, 0.25, 0.5, 0.75, 1.0]
result_ref = test_series_quantile_q_vector_impl(S, param1)
result = hpat_func(S, param1)
np.testing.assert_equal(result, result_ref)
@unittest.skip("Implement unique without sorting like in pandas")
def test_unique(self):
def test_impl(S):
return S.unique()
hpat_func = hpat.jit(test_impl)
S = pd.Series([2, 1, 3, 3])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_unique_sorted(self):
def test_impl(S):
return S.unique()
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
S[2] = 0
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
def test_unique_str(self):
def test_impl():
data = pd.Series(['aa', 'aa', 'b', 'b', 'cccc', 'dd', 'ddd', 'dd'])
return data.unique()
hpat_func = hpat.jit(test_impl)
# since the orider of the elements are diffrent - check count of elements only
ref_result = test_impl().size
result = hpat_func().size
np.testing.assert_array_equal(ref_result, result)
def test_series_groupby_count(self):
def test_impl():
A = pd.Series([13, 11, 21, 13, 13, 51, 42, 21])
grouped = A.groupby(A, sort=False)
return grouped.count()
hpat_func = hpat.jit(test_impl)
ref_result = test_impl()
result = hpat_func()
pd.testing.assert_series_equal(result, ref_result)
@unittest.skip("getiter for this type is not implemented yet")
def test_series_groupby_iterator_int(self):
def test_impl():
A = pd.Series([13, 11, 21, 13, 13, 51, 42, 21])
grouped = A.groupby(A)
return [i for i in grouped]
hpat_func = hpat.jit(test_impl)
ref_result = test_impl()
result = hpat_func()
np.testing.assert_array_equal(result, ref_result)
def test_series_std(self):
def pyfunc():
series = pd.Series([1.0, np.nan, -1.0, 0.0, 5e-324])
return series.std()
cfunc = hpat.jit(pyfunc)
ref_result = pyfunc()
result = cfunc()
np.testing.assert_equal(ref_result, result)
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'Series.std() parameters "skipna" and "ddof" unsupported')
def test_series_std_unboxing(self):
def pyfunc(series, skipna, ddof):
return series.std(skipna=skipna, ddof=ddof)
cfunc = hpat.jit(pyfunc)
for data in test_global_input_data_numeric + [[]]:
series = pd.Series(data)
for ddof in [0, 1]:
for skipna in [True, False]:
ref_result = pyfunc(series, skipna=skipna, ddof=ddof)
result = cfunc(series, skipna=skipna, ddof=ddof)
np.testing.assert_equal(ref_result, result)
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'Series.std() strings as input data unsupported')
def test_series_std_str(self):
def pyfunc(series):
return series.std()
cfunc = hpat.jit(pyfunc)
series = pd.Series(test_global_input_data_unicode_kind4)
with self.assertRaises(TypingError) as raises:
cfunc(series)
msg = 'Method std(). The object must be a number. Given self.data.dtype: {}'
self.assertIn(msg.format(types.unicode_type), str(raises.exception))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'Series.std() parameters "axis", "level", "numeric_only" unsupported')
def test_series_std_unsupported_params(self):
def pyfunc(series, axis, level, numeric_only):
return series.std(axis=axis, level=level, numeric_only=numeric_only)
cfunc = hpat.jit(pyfunc)
series = pd.Series(test_global_input_data_float64[0])
msg = 'Method std(). Unsupported parameters. Given {}: {}'
with self.assertRaises(TypingError) as raises:
cfunc(series, axis=1, level=None, numeric_only=None)
self.assertIn(msg.format('axis', 'int'), str(raises.exception))
with self.assertRaises(TypingError) as raises:
cfunc(series, axis=None, level=1, numeric_only=None)
self.assertIn(msg.format('level', 'int'), str(raises.exception))
with self.assertRaises(TypingError) as raises:
cfunc(series, axis=None, level=None, numeric_only=True)
self.assertIn(msg.format('numeric_only', 'bool'), str(raises.exception))
def test_series_nunique(self):
def test_series_nunique_impl(S):
return S.nunique()
def test_series_nunique_param1_impl(S, dropna):
return S.nunique(dropna)
hpat_func = hpat.jit(test_series_nunique_impl)
the_same_string = "the same string"
test_input_data = []
data_simple = [[6, 6, 2, 1, 3, 3, 2, 1, 2],
[1.1, 0.3, 2.1, 1, 3, 0.3, 2.1, 1.1, 2.2],
[6, 6.1, 2.2, 1, 3, 3, 2.2, 1, 2],
['aa', 'aa', 'b', 'b', 'cccc', 'dd', 'ddd', 'dd'],
['aa', 'copy aa', the_same_string, 'b', 'b', 'cccc', the_same_string, 'dd', 'ddd', 'dd', 'copy aa', 'copy aa'],
[]
]
data_extra = [[6, 6, np.nan, 2, np.nan, 1, 3, 3, np.inf, 2, 1, 2, np.inf],
[1.1, 0.3, np.nan, 1.0, np.inf, 0.3, 2.1, np.nan, 2.2, np.inf],
[1.1, 0.3, np.nan, 1, np.inf, 0, 1.1, np.nan, 2.2, np.inf, 2, 2],
# unsupported ['aa', np.nan, 'b', 'b', 'cccc', np.nan, 'ddd', 'dd'],
# unsupported [np.nan, 'copy aa', the_same_string, 'b', 'b', 'cccc', the_same_string, 'dd', 'ddd', 'dd', 'copy aa', 'copy aa'],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.inf],
]
if hpat.config.config_pipeline_hpat_default:
"""
HPAT pipeline Series.nunique() does not support numpy.nan
"""
test_input_data = data_simple
else:
test_input_data = data_simple + data_extra
for input_data in test_input_data:
S = pd.Series(input_data)
result_ref = test_series_nunique_impl(S)
result = hpat_func(S)
self.assertEqual(result, result_ref)
if not hpat.config.config_pipeline_hpat_default:
"""
HPAT pipeline does not support parameter to Series.nunique(dropna=True)
"""
hpat_func_param1 = hpat.jit(test_series_nunique_param1_impl)
for param1 in [True, False]:
result_param1_ref = test_series_nunique_param1_impl(S, param1)
result_param1 = hpat_func_param1(S, param1)
self.assertEqual(result_param1, result_param1_ref)
def test_series_var(self):
def pyfunc():
series = pd.Series([1.0, np.nan, -1.0, 0.0, 5e-324])
return series.var()
cfunc = hpat.jit(pyfunc)
np.testing.assert_equal(pyfunc(), cfunc())
def test_series_var_unboxing(self):
def pyfunc(series):
return series.var()
cfunc = hpat.jit(pyfunc)
for data in test_global_input_data_numeric + [[]]:
series = pd.Series(data)
np.testing.assert_equal(pyfunc(series), cfunc(series))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'Series.var() parameters "ddof" and "skipna" unsupported')
def test_series_var_full(self):
def pyfunc(series, skipna, ddof):
return series.var(skipna=skipna, ddof=ddof)
cfunc = hpat.jit(pyfunc)
for data in test_global_input_data_numeric + [[]]:
series = pd.Series(data)
for ddof in [0, 1]:
for skipna in [True, False]:
ref_result = pyfunc(series, skipna=skipna, ddof=ddof)
result = cfunc(series, skipna=skipna, ddof=ddof)
np.testing.assert_equal(ref_result, result)
def test_series_var_str(self):
def pyfunc(series):
return series.var()
cfunc = hpat.jit(pyfunc)
series = pd.Series(test_global_input_data_unicode_kind4)
with self.assertRaises(TypingError) as raises:
cfunc(series)
msg = 'Method var(). The object must be a number. Given self.data.dtype: {}'
self.assertIn(msg.format(types.unicode_type), str(raises.exception))
def test_series_var_unsupported_params(self):
def pyfunc(series, axis, level, numeric_only):
return series.var(axis=axis, level=level, numeric_only=numeric_only)
cfunc = hpat.jit(pyfunc)
series = pd.Series(test_global_input_data_float64[0])
msg = 'Method var(). Unsupported parameters. Given {}: {}'
with self.assertRaises(TypingError) as raises:
cfunc(series, axis=1, level=None, numeric_only=None)
self.assertIn(msg.format('axis', 'int'), str(raises.exception))
with self.assertRaises(TypingError) as raises:
cfunc(series, axis=None, level=1, numeric_only=None)
self.assertIn(msg.format('level', 'int'), str(raises.exception))
with self.assertRaises(TypingError) as raises:
cfunc(series, axis=None, level=None, numeric_only=True)
self.assertIn(msg.format('numeric_only', 'bool'), str(raises.exception))
def test_series_count(self):
def test_series_count_impl(S):
return S.count()
hpat_func = hpat.jit(test_series_count_impl)
the_same_string = "the same string"
test_input_data = [[6, 6, 2, 1, 3, 3, 2, 1, 2],
[1.1, 0.3, 2.1, 1, 3, 0.3, 2.1, 1.1, 2.2],
[6, 6.1, 2.2, 1, 3, 3, 2.2, 1, 2],
['aa', 'aa', 'b', 'b', 'cccc', 'dd', 'ddd', 'dd'],
['aa', 'copy aa', the_same_string, 'b', 'b', 'cccc', the_same_string, 'dd', 'ddd', 'dd',
'copy aa', 'copy aa'],
[],
[6, 6, np.nan, 2, np.nan, 1, 3, 3, np.inf, 2, 1, 2, np.inf],
[1.1, 0.3, np.nan, 1.0, np.inf, 0.3, 2.1, np.nan, 2.2, np.inf],
[1.1, 0.3, np.nan, 1, np.inf, 0, 1.1, np.nan, 2.2, np.inf, 2, 2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.inf]
]
for input_data in test_input_data:
S = pd.Series(input_data)
result_ref = test_series_count_impl(S)
result = hpat_func(S)
self.assertEqual(result, result_ref)
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'Series.cumsum() np.nan as input data unsupported')
def test_series_cumsum(self):
def test_impl():
series = pd.Series([1.0, np.nan, -1.0, 0.0, 5e-324])
return series.cumsum()
pyfunc = test_impl
cfunc = hpat.jit(pyfunc)
pd.testing.assert_series_equal(pyfunc(), cfunc())
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'Series.cumsum() np.nan as input data unsupported')
def test_series_cumsum_unboxing(self):
def test_impl(s):
return s.cumsum()
pyfunc = test_impl
cfunc = hpat.jit(pyfunc)
for data in test_global_input_data_numeric + [[]]:
series = | pd.Series(data) | pandas.Series |
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta, date, time
import numpy as np
import pandas as pd
import pandas.lib as lib
import pandas.util.testing as tm
from pandas import Index
from pandas.compat import long, u, PY2
class TestInference(tm.TestCase):
def test_infer_dtype_bytes(self):
compare = 'string' if PY2 else 'bytes'
# string array of bytes
arr = np.array(list('abc'), dtype='S1')
self.assertEqual(pd.lib.infer_dtype(arr), compare)
# object array of bytes
arr = arr.astype(object)
self.assertEqual(pd.lib.infer_dtype(arr), compare)
def test_isinf_scalar(self):
# GH 11352
self.assertTrue(lib.isposinf_scalar(float('inf')))
self.assertTrue(lib.isposinf_scalar(np.inf))
self.assertFalse(lib.isposinf_scalar(-np.inf))
self.assertFalse(lib.isposinf_scalar(1))
self.assertFalse(lib.isposinf_scalar('a'))
self.assertTrue(lib.isneginf_scalar(float('-inf')))
self.assertTrue(lib.isneginf_scalar(-np.inf))
self.assertFalse(lib.isneginf_scalar(np.inf))
self.assertFalse(lib.isneginf_scalar(1))
self.assertFalse(lib.isneginf_scalar('a'))
def test_maybe_convert_numeric_infinities(self):
# see gh-13274
infinities = ['inf', 'inF', 'iNf', 'Inf',
'iNF', 'InF', 'INf', 'INF']
na_values = set(['', 'NULL', 'nan'])
pos = np.array(['inf'], dtype=np.float64)
neg = np.array(['-inf'], dtype=np.float64)
msg = "Unable to parse string"
for infinity in infinities:
for maybe_int in (True, False):
out = lib.maybe_convert_numeric(
np.array([infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['-' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, neg)
out = lib.maybe_convert_numeric(
np.array([u(infinity)], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['+' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
# too many characters
with tm.assertRaisesRegexp(ValueError, msg):
lib.maybe_convert_numeric(
np.array(['foo_' + infinity], dtype=object),
na_values, maybe_int)
def test_maybe_convert_numeric_post_floatify_nan(self):
# see gh-13314
data = np.array(['1.200', '-999.000', '4.500'], dtype=object)
expected = np.array([1.2, np.nan, 4.5], dtype=np.float64)
nan_values = set([-999, -999.0])
for coerce_type in (True, False):
out = lib.maybe_convert_numeric(data, nan_values, coerce_type)
tm.assert_numpy_array_equal(out, expected)
def test_convert_infs(self):
arr = np.array(['inf', 'inf', 'inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
self.assertTrue(result.dtype == np.float64)
arr = np.array(['-inf', '-inf', '-inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
self.assertTrue(result.dtype == np.float64)
def test_scientific_no_exponent(self):
# See PR 12215
arr = np.array(['42E', '2E', '99e', '6e'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False, True)
self.assertTrue(np.all(np.isnan(result)))
def test_convert_non_hashable(self):
# GH13324
# make sure that we are handing non-hashables
arr = np.array([[10.0, 2], 1.0, 'apple'])
result = lib.maybe_convert_numeric(arr, set(), False, True)
tm.assert_numpy_array_equal(result, np.array([np.nan, 1.0, np.nan]))
class TestTypeInference(tm.TestCase):
_multiprocess_can_split_ = True
def test_length_zero(self):
result = lib.infer_dtype(np.array([], dtype='i4'))
self.assertEqual(result, 'integer')
result = lib.infer_dtype([])
self.assertEqual(result, 'empty')
def test_integers(self):
arr = np.array([1, 2, 3, np.int64(4), np.int32(5)], dtype='O')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'integer')
arr = np.array([1, 2, 3, np.int64(4), np.int32(5), 'foo'], dtype='O')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'mixed-integer')
arr = np.array([1, 2, 3, 4, 5], dtype='i4')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'integer')
def test_bools(self):
arr = np.array([True, False, True, True, True], dtype='O')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'boolean')
arr = np.array([np.bool_(True), np.bool_(False)], dtype='O')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'boolean')
arr = np.array([True, False, True, 'foo'], dtype='O')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'mixed')
arr = np.array([True, False, True], dtype=bool)
result = lib.infer_dtype(arr)
self.assertEqual(result, 'boolean')
def test_floats(self):
arr = np.array([1., 2., 3., np.float64(4), np.float32(5)], dtype='O')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'floating')
arr = np.array([1, 2, 3, np.float64(4), np.float32(5), 'foo'],
dtype='O')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'mixed-integer')
arr = np.array([1, 2, 3, 4, 5], dtype='f4')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'floating')
arr = np.array([1, 2, 3, 4, 5], dtype='f8')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'floating')
def test_string(self):
pass
def test_unicode(self):
pass
def test_datetime(self):
dates = [datetime(2012, 1, x) for x in range(1, 20)]
index = Index(dates)
self.assertEqual(index.inferred_type, 'datetime64')
def test_date(self):
dates = [date(2012, 1, x) for x in range(1, 20)]
index = Index(dates)
self.assertEqual(index.inferred_type, 'date')
def test_to_object_array_tuples(self):
r = (5, 6)
values = [r]
result = lib.to_object_array_tuples(values)
try:
# make sure record array works
from collections import namedtuple
record = namedtuple('record', 'x y')
r = record(5, 6)
values = [r]
result = lib.to_object_array_tuples(values) # noqa
except ImportError:
pass
def test_to_object_array_width(self):
# see gh-13320
rows = [[1, 2, 3], [4, 5, 6]]
expected = np.array(rows, dtype=object)
out = lib.to_object_array(rows)
tm.assert_numpy_array_equal(out, expected)
expected = np.array(rows, dtype=object)
out = lib.to_object_array(rows, min_width=1)
tm.assert_numpy_array_equal(out, expected)
expected = np.array([[1, 2, 3, None, None],
[4, 5, 6, None, None]], dtype=object)
out = lib.to_object_array(rows, min_width=5)
tm.assert_numpy_array_equal(out, expected)
def test_object(self):
# GH 7431
# cannot infer more than this as only a single element
arr = np.array([None], dtype='O')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'mixed')
def test_categorical(self):
# GH 8974
from pandas import Categorical, Series
arr = Categorical(list('abc'))
result = lib.infer_dtype(arr)
self.assertEqual(result, 'categorical')
result = lib.infer_dtype(Series(arr))
self.assertEqual(result, 'categorical')
arr = Categorical(list('abc'), categories=['cegfab'], ordered=True)
result = lib.infer_dtype(arr)
self.assertEqual(result, 'categorical')
result = lib.infer_dtype(Series(arr))
self.assertEqual(result, 'categorical')
class TestConvert(tm.TestCase):
def test_convert_objects(self):
arr = np.array(['a', 'b', np.nan, np.nan, 'd', 'e', 'f'], dtype='O')
result = lib.maybe_convert_objects(arr)
self.assertTrue(result.dtype == np.object_)
def test_convert_objects_ints(self):
# test that we can detect many kinds of integers
dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8']
for dtype_str in dtypes:
arr = np.array(list(np.arange(20, dtype=dtype_str)), dtype='O')
self.assertTrue(arr[0].dtype == np.dtype(dtype_str))
result = lib.maybe_convert_objects(arr)
self.assertTrue(issubclass(result.dtype.type, np.integer))
def test_convert_objects_complex_number(self):
for dtype in np.sctypes['complex']:
arr = np.array(list(1j * np.arange(20, dtype=dtype)), dtype='O')
self.assertTrue(arr[0].dtype == np.dtype(dtype))
result = lib.maybe_convert_objects(arr)
self.assertTrue(issubclass(result.dtype.type, np.complexfloating))
class Testisscalar(tm.TestCase):
def test_isscalar_builtin_scalars(self):
self.assertTrue(lib.isscalar(None))
self.assertTrue(lib.isscalar(True))
self.assertTrue(lib.isscalar(False))
self.assertTrue(lib.isscalar(0.))
self.assertTrue(lib.isscalar(np.nan))
self.assertTrue(lib.isscalar('foobar'))
self.assertTrue(lib.isscalar(b'foobar'))
self.assertTrue(lib.isscalar(u('efoobar')))
self.assertTrue(lib.isscalar(datetime(2014, 1, 1)))
self.assertTrue(lib.isscalar(date(2014, 1, 1)))
self.assertTrue(lib.isscalar(time(12, 0)))
self.assertTrue(lib.isscalar(timedelta(hours=1)))
self.assertTrue(lib.isscalar(pd.NaT))
def test_isscalar_builtin_nonscalars(self):
self.assertFalse(lib.isscalar({}))
self.assertFalse(lib.isscalar([]))
self.assertFalse(lib.isscalar([1]))
self.assertFalse(lib.isscalar(()))
self.assertFalse(lib.isscalar((1, )))
self.assertFalse(lib.isscalar(slice(None)))
self.assertFalse(lib.isscalar(Ellipsis))
def test_isscalar_numpy_array_scalars(self):
self.assertTrue(lib.isscalar(np.int64(1)))
self.assertTrue(lib.isscalar(np.float64(1.)))
self.assertTrue(lib.isscalar(np.int32(1)))
self.assertTrue(lib.isscalar(np.object_('foobar')))
self.assertTrue(lib.isscalar(np.str_('foobar')))
self.assertTrue(lib.isscalar(np.unicode_(u('foobar'))))
self.assertTrue(lib.isscalar(np.bytes_(b'foobar')))
self.assertTrue(lib.isscalar(np.datetime64('2014-01-01')))
self.assertTrue(lib.isscalar(np.timedelta64(1, 'h')))
def test_isscalar_numpy_zerodim_arrays(self):
for zerodim in [np.array(1), np.array('foobar'),
np.array(np.datetime64('2014-01-01')),
np.array(np.timedelta64(1, 'h')),
np.array(np.datetime64('NaT'))]:
self.assertFalse(lib.isscalar(zerodim))
self.assertTrue(lib.isscalar(lib.item_from_zerodim(zerodim)))
def test_isscalar_numpy_arrays(self):
self.assertFalse(lib.isscalar(np.array([])))
self.assertFalse(lib.isscalar(np.array([[]])))
self.assertFalse(lib.isscalar(np.matrix('1; 2')))
def test_isscalar_pandas_scalars(self):
self.assertTrue(lib.isscalar(pd.Timestamp('2014-01-01')))
self.assertTrue(lib.isscalar(pd.Timedelta(hours=1)))
self.assertTrue(lib.isscalar(pd.Period('2014-01-01')))
def test_lisscalar_pandas_containers(self):
self.assertFalse(lib.isscalar(pd.Series()))
self.assertFalse(lib.isscalar(pd.Series([1])))
self.assertFalse(lib.isscalar(pd.DataFrame()))
self.assertFalse(lib.isscalar(pd.DataFrame([[1]])))
self.assertFalse(lib.isscalar(pd.Panel()))
self.assertFalse(lib.isscalar(pd.Panel([[[1]]])))
self.assertFalse(lib.isscalar(pd.Index([])))
self.assertFalse(lib.isscalar(pd.Index([1])))
class TestParseSQL(tm.TestCase):
def test_convert_sql_column_floats(self):
arr = np.array([1.5, None, 3, 4.2], dtype=object)
result = lib.convert_sql_column(arr)
expected = np.array([1.5, np.nan, 3, 4.2], dtype='f8')
self.assert_numpy_array_equal(result, expected)
def test_convert_sql_column_strings(self):
arr = np.array(['1.5', None, '3', '4.2'], dtype=object)
result = lib.convert_sql_column(arr)
expected = np.array(['1.5', np.nan, '3', '4.2'], dtype=object)
self.assert_numpy_array_equal(result, expected)
def test_convert_sql_column_unicode(self):
arr = np.array([u('1.5'), None, u('3'), u('4.2')],
dtype=object)
result = lib.convert_sql_column(arr)
expected = np.array([u('1.5'), np.nan, u('3'), u('4.2')],
dtype=object)
self.assert_numpy_array_equal(result, expected)
def test_convert_sql_column_ints(self):
arr = np.array([1, 2, 3, 4], dtype='O')
arr2 = np.array([1, 2, 3, 4], dtype='i4').astype('O')
result = lib.convert_sql_column(arr)
result2 = lib.convert_sql_column(arr2)
expected = np.array([1, 2, 3, 4], dtype='i8')
self.assert_numpy_array_equal(result, expected)
self.assert_numpy_array_equal(result2, expected)
arr = np.array([1, 2, 3, None, 4], dtype='O')
result = lib.convert_sql_column(arr)
expected = np.array([1, 2, 3, np.nan, 4], dtype='f8')
self.assert_numpy_array_equal(result, expected)
def test_convert_sql_column_longs(self):
arr = np.array([long(1), long(2), long(3), long(4)], dtype='O')
result = lib.convert_sql_column(arr)
expected = np.array([1, 2, 3, 4], dtype='i8')
self.assert_numpy_array_equal(result, expected)
arr = np.array([long(1), long(2), | long(3) | pandas.compat.long |
import csv
from io import StringIO
import os
import numpy as np
import pytest
from pandas.errors import ParserError
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
NaT,
Series,
Timestamp,
date_range,
read_csv,
to_datetime,
)
import pandas._testing as tm
import pandas.core.common as com
from pandas.io.common import get_handle
MIXED_FLOAT_DTYPES = ["float16", "float32", "float64"]
MIXED_INT_DTYPES = [
"uint8",
"uint16",
"uint32",
"uint64",
"int8",
"int16",
"int32",
"int64",
]
class TestDataFrameToCSV:
def read_csv(self, path, **kwargs):
params = {"index_col": 0, "parse_dates": True}
params.update(**kwargs)
return read_csv(path, **params)
def test_to_csv_from_csv1(self, float_frame, datetime_frame):
with tm.ensure_clean("__tmp_to_csv_from_csv1__") as path:
float_frame["A"][:5] = np.nan
float_frame.to_csv(path)
float_frame.to_csv(path, columns=["A", "B"])
float_frame.to_csv(path, header=False)
float_frame.to_csv(path, index=False)
# test roundtrip
# freq does not roundtrip
datetime_frame.index = datetime_frame.index._with_freq(None)
datetime_frame.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(datetime_frame, recons)
datetime_frame.to_csv(path, index_label="index")
recons = self.read_csv(path, index_col=None)
assert len(recons.columns) == len(datetime_frame.columns) + 1
# no index
datetime_frame.to_csv(path, index=False)
recons = self.read_csv(path, index_col=None)
tm.assert_almost_equal(datetime_frame.values, recons.values)
# corner case
dm = DataFrame(
{
"s1": Series(range(3), index=np.arange(3)),
"s2": Series(range(2), index=np.arange(2)),
}
)
dm.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(dm, recons)
def test_to_csv_from_csv2(self, float_frame):
with tm.ensure_clean("__tmp_to_csv_from_csv2__") as path:
# duplicate index
df = DataFrame(
np.random.randn(3, 3), index=["a", "a", "b"], columns=["x", "y", "z"]
)
df.to_csv(path)
result = self.read_csv(path)
tm.assert_frame_equal(result, df)
midx = MultiIndex.from_tuples([("A", 1, 2), ("A", 1, 2), ("B", 1, 2)])
df = DataFrame(np.random.randn(3, 3), index=midx, columns=["x", "y", "z"])
df.to_csv(path)
result = self.read_csv(path, index_col=[0, 1, 2], parse_dates=False)
tm.assert_frame_equal(result, df, check_names=False)
# column aliases
col_aliases = Index(["AA", "X", "Y", "Z"])
float_frame.to_csv(path, header=col_aliases)
rs = self.read_csv(path)
xp = float_frame.copy()
xp.columns = col_aliases
tm.assert_frame_equal(xp, rs)
msg = "Writing 4 cols but got 2 aliases"
with pytest.raises(ValueError, match=msg):
float_frame.to_csv(path, header=["AA", "X"])
def test_to_csv_from_csv3(self):
with tm.ensure_clean("__tmp_to_csv_from_csv3__") as path:
df1 = DataFrame(np.random.randn(3, 1))
df2 = DataFrame(np.random.randn(3, 1))
df1.to_csv(path)
df2.to_csv(path, mode="a", header=False)
xp = pd.concat([df1, df2])
rs = read_csv(path, index_col=0)
rs.columns = [int(label) for label in rs.columns]
xp.columns = [int(label) for label in xp.columns]
tm.assert_frame_equal(xp, rs)
def test_to_csv_from_csv4(self):
with tm.ensure_clean("__tmp_to_csv_from_csv4__") as path:
# GH 10833 (TimedeltaIndex formatting)
dt = pd.Timedelta(seconds=1)
df = DataFrame(
{"dt_data": [i * dt for i in range(3)]},
index=Index([i * dt for i in range(3)], name="dt_index"),
)
df.to_csv(path)
result = read_csv(path, index_col="dt_index")
result.index = pd.to_timedelta(result.index)
result["dt_data"] = pd.to_timedelta(result["dt_data"])
tm.assert_frame_equal(df, result, check_index_type=True)
def test_to_csv_from_csv5(self, timezone_frame):
# tz, 8260
with tm.ensure_clean("__tmp_to_csv_from_csv5__") as path:
timezone_frame.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=["A"])
converter = (
lambda c: to_datetime(result[c])
.dt.tz_convert("UTC")
.dt.tz_convert(timezone_frame[c].dt.tz)
)
result["B"] = converter("B")
result["C"] = converter("C")
tm.assert_frame_equal(result, timezone_frame)
def test_to_csv_cols_reordering(self):
# GH3454
chunksize = 5
N = int(chunksize * 2.5)
df = tm.makeCustomDataframe(N, 3)
cs = df.columns
cols = [cs[2], cs[0]]
with tm.ensure_clean() as path:
df.to_csv(path, columns=cols, chunksize=chunksize)
rs_c = read_csv(path, index_col=0)
tm.assert_frame_equal(df[cols], rs_c, check_names=False)
def test_to_csv_new_dupe_cols(self):
def _check_df(df, cols=None):
with tm.ensure_clean() as path:
df.to_csv(path, columns=cols, chunksize=chunksize)
rs_c = read_csv(path, index_col=0)
# we wrote them in a different order
# so compare them in that order
if cols is not None:
if df.columns.is_unique:
rs_c.columns = cols
else:
indexer, missing = df.columns.get_indexer_non_unique(cols)
rs_c.columns = df.columns.take(indexer)
for c in cols:
obj_df = df[c]
obj_rs = rs_c[c]
if isinstance(obj_df, Series):
tm.assert_series_equal(obj_df, obj_rs)
else:
tm.assert_frame_equal(obj_df, obj_rs, check_names=False)
# wrote in the same order
else:
rs_c.columns = df.columns
tm.assert_frame_equal(df, rs_c, check_names=False)
chunksize = 5
N = int(chunksize * 2.5)
# dupe cols
df = tm.makeCustomDataframe(N, 3)
df.columns = ["a", "a", "b"]
_check_df(df, None)
# dupe cols with selection
cols = ["b", "a"]
_check_df(df, cols)
@pytest.mark.slow
def test_to_csv_dtnat(self):
# GH3437
def make_dtnat_arr(n, nnat=None):
if nnat is None:
nnat = int(n * 0.1) # 10%
s = list(date_range("2000", freq="5min", periods=n))
if nnat:
for i in np.random.randint(0, len(s), nnat):
s[i] = NaT
i = np.random.randint(100)
s[-i] = NaT
s[i] = NaT
return s
chunksize = 1000
# N=35000
s1 = make_dtnat_arr(chunksize + 5)
s2 = make_dtnat_arr(chunksize + 5, 0)
# s3=make_dtnjat_arr(chunksize+5,0)
with tm.ensure_clean("1.csv") as pth:
df = | DataFrame({"a": s1, "b": s2}) | pandas.DataFrame |
# Copyright 2017 QuantRocket LLC - All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
import math
import warnings
import empyrical as ep
import scipy.stats
from .perf import DailyPerformance, AggregateDailyPerformance
from .base import BaseTearsheet
from .exceptions import MoonchartError
from .utils import (
with_baseline,
get_sharpe,
get_cagr,
get_cum_returns,
get_drawdowns
)
class Tearsheet(BaseTearsheet):
"""
Generates a tear sheet of performance stats and graphs.
"""
@classmethod
def from_moonshot_csv(cls, filepath_or_buffer, figsize=None,
max_cols_for_details=25, trim_outliers=None,
how_to_aggregate=None,
pdf_filename=None, riskfree=0,
start_date=None, end_date=None,
compound=True, rolling_sharpe_window=200):
"""
Create a full tear sheet from a moonshot backtest results CSV.
Parameters
----------
filepath_or_buffer : str or file-like object
filepath of CSV or file-like object
figsize : tuple (width, height), optional
(width, height) of matplotlib figure. Default is (16, 12)
max_cols_for_details : int, optional
suppress detailed plots if there are more than this many columns
(i.e. strategies or securities). Too many plots may cause slow
rendering. Default 25.
trim_outliers: int or float, optional
discard returns that are more than this many standard deviations
from the mean. Useful for dealing with data anomalies that cause
large spikes in plots.
how_to_aggregate : dict, optional
a dict of {fieldname: aggregation method} specifying how to aggregate
fields from intraday to daily. See the docstring for
`moonchart.utils.intraday_to_daily` for more details.
pdf_filename : string, optional
save tear sheet to this filepath as a PDF instead of displaying
riskfree : float, optional
the riskfree rate (default 0)
start_date : str (YYYY-MM-DD), optional
truncate at this start date (otherwise include entire date range)
end_date : str (YYYY-MM-DD), optional
truncate at this end date (otherwise include entire date range)
compound : bool
True for compound/geometric returns, False for arithmetic returns.
Default True
rolling_sharpe_window : int, optional
compute rolling Sharpe over this many periods (default 200)
Returns
-------
None
Examples
--------
>>> from moonshot import Tearsheet
>>> Tearsheet.from_moonshot_csv("backtest_results.csv")
"""
perf = DailyPerformance.from_moonshot_csv(
filepath_or_buffer,
start_date=start_date,
end_date=end_date,
trim_outliers=trim_outliers,
how_to_aggregate=how_to_aggregate,
riskfree=riskfree,
compound=compound,
rolling_sharpe_window=rolling_sharpe_window)
t = cls(figsize=figsize,
max_cols_for_details=max_cols_for_details,
pdf_filename=pdf_filename)
return t.create_full_tearsheet(perf)
@classmethod
def from_pnl_csv(cls, filepath_or_buffer, figsize=None,
max_cols_for_details=25, trim_outliers=None,
how_to_aggregate=None,
pdf_filename=None, riskfree=0,
start_date=None, end_date=None,
compound=True, rolling_sharpe_window=200):
"""
Create a full tear sheet from a pnl CSV.
Parameters
----------
filepath_or_buffer : str or file-like object
filepath or file-like object of the CSV
figsize : tuple (width, height), optional
(width, height) of matplotlib figure. Default is (16, 12)
max_cols_for_details : int, optional
suppress detailed plots if there are more than this many columns
(i.e. strategies or securities). Too many plots may cause slow
rendering. Default 25.
trim_outliers: int or float, optional
discard returns that are more than this many standard deviations
from the mean
how_to_aggregate : dict, optional
a dict of {fieldname: aggregation method} specifying how to aggregate
fields from intraday to daily. See the docstring for
`moonchart.utils.intraday_to_daily` for more details.
pdf_filename : string, optional
save tear sheet to this filepath as a PDF instead of displaying
riskfree : float, optional
the riskfree rate (default 0)
start_date : str (YYYY-MM-DD), optional
truncate at this start date (otherwise include entire date range)
end_date : str (YYYY-MM-DD), optional
truncate at this end date (otherwise include entire date range)
compound : bool
True for compound/geometric returns, False for arithmetic returns.
Default True
rolling_sharpe_window : int, optional
compute rolling Sharpe over this many periods (default 200)
Returns
-------
None
"""
perf = DailyPerformance.from_pnl_csv(
filepath_or_buffer,
start_date=start_date,
end_date=end_date,
trim_outliers=trim_outliers,
how_to_aggregate=how_to_aggregate,
riskfree=riskfree,
compound=compound,
rolling_sharpe_window=rolling_sharpe_window)
t = cls(figsize=figsize,
max_cols_for_details=max_cols_for_details,
pdf_filename=pdf_filename)
return t.create_full_tearsheet(perf)
def create_full_tearsheet(self, performance):
"""
Create a full tear sheet of performance results including returns
plots, returns by year plots, and position-related plots.
Parameters
----------
performance : instance
a DailyPerformance instance
Returns
-------
None
Examples
--------
>>> from moonchart import DailyPerformance, Tearsheet
>>> perf = DailyPerformance.from_moonshot_csv("backtest_results.csv")
>>> t = Tearsheet()
>>> t.create_full_tearsheet(perf)
See Also
--------
Tearsheet.from_moonshot_csv : create a full tear sheet from a Moonshot CSV
"""
agg_performance = AggregateDailyPerformance(performance)
num_cols = len(performance.returns.columns)
if num_cols > self.max_cols_for_details:
warnings.warn("Suppressing details because there are more than {0} columns "
"(you can control this setting by modifying "
"Tearsheet.max_cols_for_details)".format(
self.max_cols_for_details))
self.create_summary_tearsheet(performance, agg_performance)
self.create_returns_tearsheet(performance, agg_performance)
self.create_returns_by_year_tearsheet(performance, agg_performance)
if any([exposures is not None for exposures in (
performance.net_exposures, performance.abs_exposures)]):
self.create_positions_tearsheet(performance, agg_performance)
self._create_constituents_tearsheet(performance)
self._save_or_show()
def create_summary_tearsheet(self, performance, agg_performance=None):
"""
Create a tear sheet of summary performance stats in a table.
Parameters
----------
performance : DailyPerformance, required
a DailyPerformance instance
agg_performance : AggregateDailyPerformance, optional
an AggregateDailyPerformance instance. Constructed from performance
if not provided.
Returns
-------
None
Examples
--------
>>> from moonchart import DailyPerformance, Tearsheet
>>> perf = DailyPerformance.from_moonshot_csv("backtest_results.csv")
>>> t = Tearsheet()
>>> t.create_summary_tearsheet(perf)
"""
if agg_performance is None:
agg_performance = AggregateDailyPerformance(performance)
stats = []
if agg_performance.pnl is not None:
stats.append(["PNL", round(agg_performance.pnl.sum(), 2)])
if agg_performance.commission_amounts is not None:
stats.append(["Commissions", round(agg_performance.commission_amounts.sum(), 2)])
stats.append(["Start Date", agg_performance.returns.index.min().date().isoformat()])
stats.append(["End Date", agg_performance.returns.index.max().date().isoformat()])
stats.append(['Total Months', round(
(agg_performance.returns.index.max() - agg_performance.returns.index.min()) / pd.Timedelta(365.25/12, 'D'))])
stats.append(["", " Risk and Returns"])
stats.append(["CAGR", "{0}%".format(round(agg_performance.cagr * 100, 1))])
stats.append([
"Sharpe Ratio",
'%.2f' % agg_performance.sharpe])
stats.append([
"Max Drawdown",
"{0}%".format(round(agg_performance.max_drawdown * 100, 1))])
stats.append([
"Cumulative Return",
"{0}%".format(round(ep.cum_returns_final(agg_performance.returns) * 100, 1))])
stats.append([
"Annual Volatility",
"{0}%".format(round(ep.annual_volatility(agg_performance.returns) * 100, 1))])
stats.append([
"Sortino Ratio",
'%.2f' % ep.sortino_ratio(agg_performance.returns)])
stats.append([
"Calmar Ratio",
'%.2f' % ep.calmar_ratio(agg_performance.returns)])
stats.append([
"Skew",
'%.2f' % scipy.stats.skew(agg_performance.returns)])
stats.append([
"Kurtosis",
'%.2f' % scipy.stats.kurtosis(agg_performance.returns)])
if any([field is not None for field in (
agg_performance.abs_exposures,
agg_performance.net_exposures,
agg_performance.total_holdings,
agg_performance.turnover
)]):
stats.append(["", " Positions and Exposure"])
if agg_performance.abs_exposures is not None:
avg_abs_exposures = agg_performance.abs_exposures.mean()
stats.append([
"Absolute Exposure (percentage of capital)",
"{0}%".format(round(avg_abs_exposures * 100, 1))])
if agg_performance.net_exposures is not None:
avg_net_exposures = agg_performance.net_exposures.mean()
stats.append([
"Net Exposure (percentage of capital)",
"{0}%".format(round(avg_net_exposures * 100, 1))])
if agg_performance.total_holdings is not None:
avg_daily_holdings = agg_performance.total_holdings.mean()
stats.append([
"Average Daily Holdings",
round(avg_daily_holdings)])
if agg_performance.turnover is not None:
avg_daily_turnover = agg_performance.turnover.mean()
stats.append([
"Average Daily Turnover (percentage of capital)",
"{0}%".format(round(avg_daily_turnover * 100, 1))])
if agg_performance.abs_exposures is not None:
norm_cagr = agg_performance.cagr / avg_abs_exposures
stats.append([
"Normalized CAGR (CAGR/Absolute Exposure)",
"{0}%".format(round(norm_cagr * 100, 1))])
with sns.axes_style("white"):
fig = plt.figure("Performance Summary", figsize=(6,6))
axis = fig.add_subplot(111)
axis.axis("off")
headings, values = zip(*stats)
table = axis.table(
cellText=[[v] for v in values],
rowLabels=headings,
colLabels=["Performance Summary"],
loc="center"
)
for (row, col), cell in table.get_celld().items():
txt = cell.get_text().get_text()
if row == 0 or txt.startswith(" "):
cell.set_text_props(fontproperties=FontProperties(weight='bold'))
table.scale(1, 2)
table.set_fontsize("large")
def _create_constituents_tearsheet(self, performance):
"""
Create a tear sheet of the strategies or symbols in the data.
"""
with sns.axes_style("white"):
fig = plt.figure("Strategies or Securities", figsize=(6,6))
axis = fig.add_subplot(111)
axis.axis("off")
cols = list(performance.returns.columns)
if len(cols) > 58:
hidden_cols = len(cols) - 58
cols = cols[0:58]
cols.append("and {0} more".format(hidden_cols))
cells_per_row = 4
cells = ["Included columns:"] + cols
num_cells = len(cells)
if num_cells > cells_per_row and num_cells % cells_per_row != 0:
# Cells must be divisible by cells_per_row for matplotlib table
extra_cells_required = cells_per_row - num_cells % cells_per_row
for _ in range(extra_cells_required):
cells.append("")
table = axis.table(
cellText=[cells[i:i + cells_per_row] for i in range(0, len(cells), cells_per_row)],
loc="top"
)
for (row, col), cell in table.get_celld().items():
if (row == 0) and (col == 0):
cell.set_text_props(fontproperties=FontProperties(weight='bold'))
table.scale(2, 2)
table.set_fontsize("large")
def create_returns_tearsheet(self, performance, agg_performance=None):
"""
Create a tear sheet of returns-related plots.
The included plots depend on what is present in the performance data.
Always plots cumulative returns, drawdowns, and rolling Sharpe. Plots
cumulative returns vs benchmark if benchmark is present. Plots
cumulative PNL if PNL is present. For multi-column performance
data (multi-strategy or detailed single-strategy), plots bar
charts of Sharpe, CAGR, and PNL if present.
Parameters
----------
performance : DailyPerformance, required
a DailyPerformance instance
agg_performance : AggregateDailyPerformance, optional
an AggregateDailyPerformance instance. Constructed from performance
if not provided.
Returns
-------
None
Examples
--------
>>> from moonchart import DailyPerformance, Tearsheet
>>> perf = DailyPerformance.from_moonshot_csv("backtest_results.csv")
>>> t = Tearsheet()
>>> t.create_returns_tearsheet(perf)
"""
if agg_performance is None:
agg_performance = AggregateDailyPerformance(performance)
num_cols = len(performance.returns.columns)
show_details = num_cols > 1 and num_cols <= self.max_cols_for_details
width, height = self.figsize
# cut height in half if not showing details
if not show_details:
height /= 2
self._create_returns_plots(
agg_performance,
subplot=211 if show_details else 111,
extra_label="(Aggregate)" if show_details else "",
figsize=(width, height))
if show_details:
self._create_returns_plots(performance, subplot=212, extra_label="(Details)")
self._create_detailed_returns_bar_charts(performance)
def _create_detailed_returns_bar_charts(self, performance):
fig = plt.figure("Returns (Details)", figsize=self.figsize)
color_palette = sns.color_palette()
num_series = len(performance.cum_returns.columns)
if num_series > 6:
color_palette = sns.color_palette("hls", num_series)
with sns.color_palette(color_palette):
axis = fig.add_subplot(2,2,1)
axis.set_ylabel("CAGR")
self._y_format_as_percentage(axis)
cagr = performance.cagr.copy()
cagr.index = cagr.index.astype(str).str.wrap(10)
cagr.plot(ax=axis, kind="bar", title="CAGR (Details)")
axis = fig.add_subplot(2,2,2)
self._y_format_at_least_two_decimal_places(axis)
axis.set_ylabel("Sharpe ratio")
sharpe = performance.sharpe.copy()
sharpe.index = sharpe.index.astype(str).str.wrap(10)
sharpe.plot(ax=axis, kind="bar", title="Sharpe (Details)")
axis = fig.add_subplot(2,2,3)
axis.set_ylabel("Drawdown")
self._y_format_as_percentage(axis)
max_drawdowns = performance.max_drawdown.copy()
max_drawdowns.index = max_drawdowns.index.astype(str).str.wrap(10)
max_drawdowns.plot(ax=axis, kind="bar", title="Max drawdown (Details)")
fig.tight_layout()
if performance.pnl is not None:
fig = plt.figure("PNL (Details)", figsize=self.figsize)
axis = fig.add_subplot(111)
axis.set_ylabel("PNL")
pnl = performance.pnl.sum()
if performance.commission_amounts is not None:
pnl.name = "pnl"
commission_amounts = performance.commission_amounts.sum()
commission_amounts.name = "commissions"
gross_pnl = pnl + commission_amounts
gross_pnl.name = "gross pnl"
try:
pnl = pd.concat((pnl, gross_pnl, commission_amounts), axis=1, sort=True)
except TypeError:
# sort was introduced in pandas 0.23
pnl = pd.concat((pnl, gross_pnl, commission_amounts), axis=1)
pnl.plot(
ax=axis, kind="bar", title="PNL (Details)")
def create_positions_tearsheet(self, performance, agg_performance=None):
"""
Create a tear sheet of position-related plots.
Includes plots of net and absolute daily exposure, number of daily
holdings, and daily turnover.
Parameters
----------
performance : DailyPerformance, required
a DailyPerformance instance
agg_performance : AggregateDailyPerformance, optional
an AggregateDailyPerformance instance. Constructed from performance
if not provided.
Returns
-------
None
Examples
--------
>>> from moonchart import DailyPerformance, Tearsheet
>>> perf = DailyPerformance.from_moonshot_csv("backtest_results.csv")
>>> t = Tearsheet()
>>> t.create_positions_tearsheet(perf)
"""
if agg_performance is None:
agg_performance = AggregateDailyPerformance(performance)
num_cols = len(performance.returns.columns)
show_details = num_cols > 1 and num_cols <= self.max_cols_for_details
width, height = self.figsize
# cut height in half if not showing details
if not show_details:
height /= 2
self._create_positions_plots(
agg_performance,
subplot=211 if show_details else 111,
extra_label="(Aggregate)" if show_details else "",
figsize=(width, height))
if show_details:
self._create_positions_plots(performance, subplot=212, extra_label="(Details)")
self._create_detailed_positions_bar_charts(performance)
def _create_positions_plots(self, performance, subplot, extra_label, figsize=None):
figsize = figsize or self.figsize
color_palette = sns.color_palette()
if isinstance(performance.returns, pd.DataFrame):
num_series = len(performance.returns.columns)
if num_series > 6:
color_palette = sns.color_palette("hls", num_series)
with sns.color_palette(color_palette):
if performance.abs_exposures is not None:
fig = plt.figure("Absolute Exposure", figsize=figsize)
axis = fig.add_subplot(subplot)
self._y_format_as_percentage(axis)
plot = performance.abs_exposures.round(2).plot(ax=axis, title="Absolute Exposure {0}".format(extra_label))
axis.set_ylabel("Percentage of capital")
axis.set_xlabel("")
if isinstance(performance.abs_exposures, pd.DataFrame):
self._clear_legend(plot)
if performance.net_exposures is not None:
fig = plt.figure("Net Exposure", figsize=figsize)
axis = fig.add_subplot(subplot)
self._y_format_as_percentage(axis)
plot = performance.net_exposures.round(2).plot(ax=axis, title="Net Exposure {0}".format(extra_label))
axis.set_ylabel("Percentage of capital")
axis.set_xlabel("")
if isinstance(performance.net_exposures, pd.DataFrame):
self._clear_legend(plot)
if performance.total_holdings is not None:
fig = plt.figure("Daily Holdings", figsize=figsize)
axis = fig.add_subplot(subplot)
plot = performance.total_holdings.plot(ax=axis, title="Daily Holdings {0}".format(extra_label))
axis.set_ylabel("Number of holdings")
axis.set_xlabel("")
if isinstance(performance.total_holdings, pd.DataFrame):
self._clear_legend(plot)
if performance.turnover is not None:
fig = plt.figure("Daily Turnover", figsize=figsize)
axis = fig.add_subplot(subplot)
self._y_format_as_percentage(axis)
turnover = performance.turnover
plot = turnover.plot(ax=axis, title="Daily Turnover {0}".format(extra_label))
axis.set_ylabel("Percentage of capital")
axis.set_xlabel("")
if isinstance(turnover, pd.DataFrame):
self._clear_legend(plot)
def _create_detailed_positions_bar_charts(self, performance):
# extend figsize due to 3 rows
width, height = self.figsize
figsize = width, height*1.5
fig = plt.figure("Positions (Details)", figsize=figsize)
color_palette = sns.color_palette()
num_series = len(performance.cum_returns.columns)
if num_series > 6:
color_palette = sns.color_palette("hls", num_series)
total_plots = sum([1 for field in (
performance.abs_exposures,
performance.net_exposures,
performance.total_holdings,
performance.turnover,
performance.abs_exposures) if field is not None])
rows = math.ceil(total_plots/2)
with sns.color_palette(color_palette):
next_pos = 1
if performance.abs_exposures is not None:
avg_abs_exposures = performance.abs_exposures.mean()
axis = fig.add_subplot(rows,2,next_pos)
next_pos += 1
self._y_format_as_percentage(axis)
avg_abs_exposures.plot(ax=axis, kind="bar", title="Avg Absolute Exposure (Details)")
axis.set_ylabel("Percentage of capital")
if performance.net_exposures is not None:
avg_net_exposures = performance.net_exposures.mean()
axis = fig.add_subplot(rows,2,next_pos)
next_pos += 1
self._y_format_as_percentage(axis)
avg_net_exposures.plot(ax=axis, kind="bar", title="Avg Net Exposure (Details)")
axis.set_ylabel("Percentage of capital")
if performance.total_holdings is not None:
avg_total_holdings = performance.total_holdings.mean()
axis = fig.add_subplot(rows,2,next_pos)
next_pos += 1
avg_total_holdings.plot(ax=axis, kind="bar", title="Avg Daily Holdings (Details)")
axis.set_ylabel("Number of holdings")
if performance.turnover is not None:
avg_daily_turnover = performance.turnover.mean()
axis = fig.add_subplot(rows,2,next_pos)
next_pos += 1
self._y_format_as_percentage(axis)
avg_daily_turnover.plot(ax=axis, kind="bar", title="Avg Daily Turnover (Details)")
axis.set_ylabel("Percentage of capital")
if performance.abs_exposures is not None:
norm_cagrs = performance.cagr / avg_abs_exposures
axis = fig.add_subplot(rows,2,next_pos)
next_pos += 1
self._y_format_as_percentage(axis)
norm_cagrs.plot(ax=axis, kind="bar", title="Normalized CAGR (CAGR/Absolute Exposure) (Details)")
axis.set_ylabel("Normalized CAGR")
fig.tight_layout()
def create_returns_by_year_tearsheet(self, performance, agg_performance=None):
"""
Plots bar charts showing CAGR and Sharpe by year.
Parameters
----------
performance : DailyPerformance, required
a DailyPerformance instance
agg_performance : AggregateDailyPerformance, optional
an AggregateDailyPerformance instance. Constructed from performance
if not provided.
Returns
-------
None
Examples
--------
>>> from moonchart import DailyPerformance, Tearsheet
>>> perf = DailyPerformance.from_moonshot_csv("backtest_results.csv")
>>> t = Tearsheet()
>>> t.create_returns_by_year_tearsheet(perf)
"""
if agg_performance is None:
agg_performance = AggregateDailyPerformance(performance)
num_cols = len(performance.returns.columns)
show_details = num_cols > 1 and num_cols <= self.max_cols_for_details
width, height = self.figsize
# cut height in half if not showing details
if not show_details:
height /= 2
fig = plt.figure("Returns by Year", figsize=(width, height))
self._create_returns_by_year_plots(
agg_performance,
rows=2 if show_details else 1,
row=1,
fig=fig,
extra_label="(Aggregate)" if show_details else "")
if show_details:
self._create_returns_by_year_plots(performance, rows=2, row=2, fig=fig, extra_label="(Details)")
def _create_returns_by_year_plots(self, performance, rows, row, fig, extra_label):
color_palette = sns.color_palette()
if isinstance(performance.returns, pd.DataFrame):
num_series = len(performance.cum_returns.columns)
if num_series > 6:
color_palette = sns.color_palette("hls", num_series)
else:
color_palette = sns.color_palette()[0:1]
grouped_returns = performance.returns.groupby(performance.returns.index.year)
cagrs_by_year = grouped_returns.apply(lambda x: get_cagr(
get_cum_returns(x, compound=performance.compound),
compound=performance.compound))
sharpes_by_year = grouped_returns.apply(get_sharpe, riskfree=performance.riskfree)
cols = 2
# 2 cols per row, minus 1, gives the start position
start_at = 2 * row - 1
with sns.color_palette(color_palette):
axis = fig.add_subplot(rows, 2, start_at)
axis.set_ylabel("CAGR")
self._y_format_as_percentage(axis)
plot = cagrs_by_year.plot(ax=axis, kind="bar", title="CAGR by Year {0}".format(extra_label))
axis.set_xlabel("")
if isinstance(cagrs_by_year, pd.DataFrame):
# Remove legend, rely on legend from Sharpe plot
plot.legend_.remove()
axis = fig.add_subplot(rows, 2, start_at+1)
axis.set_ylabel("Sharpe ratio")
self._y_format_at_least_two_decimal_places(axis)
plot = sharpes_by_year.plot(ax=axis, kind="bar", title="Sharpe by Year {0}".format(extra_label))
axis.set_xlabel("")
if isinstance(sharpes_by_year, pd.DataFrame):
self._clear_legend(plot)
fig.tight_layout()
def create_montecarlo_tearsheet(self, performance, cycles=5, aggregate_before_shuffle=True):
"""
Run a Montecarlo simulation by shuffling the DataFrame of returns a specified
number of times and plotting the shuffled returns against the original returns.
Parameters
----------
performance : DailyPerformance, required
a DailyPerformance instance
cycles : int, optional
the number of Montecarlo simulations (default 5)
aggregate_before_shuffle : bool
whether to aggregate daily returns before or after shuffling. Only relevant to
multi-column (that is, multi-strategy or detailed single-strategy) DataFrames.
If True, aggregated daily returns are preserved and only the order of days is
randomized. If False, each column's returns are shuffled separately, without
preservation of daily aggregations. False is more random. True may be preferable
if daily returns across columns are expected to be correlated. Default True.
Returns
-------
None
Examples
--------
>>> from moonchart import DailyPerformance, Tearsheet
>>> perf = DailyPerformance.from_moonshot_csv("backtest_results.csv")
>>> t = Tearsheet()
>>> t.create_montecarlo_tearsheet(perf, cycles=10)
"""
all_simulations = []
returns = performance.returns
if aggregate_before_shuffle:
returns = returns.sum(axis=1)
for i in range(cycles):
if aggregate_before_shuffle:
sim_returns = pd.Series(np.random.permutation(returns), index=returns.index)
else:
sim_returns = returns.apply(np.random.permutation).sum(axis=1)
all_simulations.append(sim_returns)
try:
sim_returns = | pd.concat(all_simulations, axis=1, sort=False) | pandas.concat |
#!/usr/bin/python3
import datetime
import pandas as pd
import re
import requests
import lxml
from lxml import etree
from dev_global.env import TIME_FMT
from libmysql_utils.mysql8 import (mysqlBase, mysqlHeader)
from mars.utils import trans
from mars.log_manager import error_log
from requests.models import HTTPError
from venus.form import formStockManager
__version__ = '1.0.10'
class StockBase(mysqlBase):
"""
param header: mysqlHeader
"""
def __init__(self, header):
if not isinstance(header, mysqlHeader):
raise HeaderException("Error due to incorrect header.")
super(StockBase, self).__init__(header)
# date format: YYYY-mm-dd
self._Today = datetime.date.today().strftime(TIME_FMT)
# date format: YYYYmmdd
self.today = datetime.date.today().strftime('%Y%m%d')
# self.TAB_STOCK_MANAGER = "stock_manager"
@property
def Today(self) -> str:
self._Today = datetime.date.today().strftime(TIME_FMT)
return self._Today
def get_all_stock_list(self) -> list:
"""
Return stock code --> list.
"""
query_stock_code = self.session.query(formStockManager.stock_code).filter_by(flag='t')
df = pd.DataFrame.from_dict(query_stock_code)
stock_list = df['stock_code'].tolist()
# should test if stock list is null
return stock_list
def get_all_index_list(self):
"""
Return stock code --> list.
"""
query_stock_code = self.session.query(formStockManager.stock_code).filter_by(flag='i')
df = pd.DataFrame.from_dict(query_stock_code)
stock_list = df['stock_code'].tolist()
return stock_list
def get_all_security_list(self):
"""
Return stock code --> list
"""
# Return all kinds of securities in form stock list.
# Result : List type data.
query_stock_code = self.session.query(formStockManager.stock_code).all()
df = pd.DataFrame.from_dict(query_stock_code)
stock_list = df['stock_code'].tolist()
return stock_list
@staticmethod
def get_html_object(url: str, HttpHeader=None):
"""
result is a etree.HTML object
"""
response = requests.get(url, HttpHeader=None, timeout=3)
if response.status_code == 200:
# setting encoding
response.encoding = response.apparent_encoding
html = lxml.etree.HTML(response.text)
else:
html = None
raise HTTPError(f"Status code: {response.status_code} for {url}")
return html
class HeaderException(BaseException):
pass
class StockEventBase(object):
"""
Today: date format like yyyy-mm-dd \n
today: date format like yyyymmdd
"""
def __init__(self, header):
self.Today = datetime.date.today().strftime(TIME_FMT)
self.today = datetime.date.today().strftime('%Y%m%d')
if not header:
raise Exception
self.mysql = mysqlBase(header)
self.stock_list = []
self.coder = StockCodeFormat()
def __str__(self):
return "<Stock Event Base>"
def data_clean(self, df):
for index, col in df.iteritems():
try:
if re.search('date', index):
df[index] = pd.to_datetime(df[index])
elif re.search('int', index):
df[index] = pd.to_numeric(df[index])
elif re.search('float', index):
df[index] = pd.to_numeric(df[index])
elif re.search('char', index):
pass
else:
pass
except Exception:
error_log(
f"Error while record interest of {col['char_stock_code']}")
return df
def update_date_time(self):
"""
Get date of today.
"""
self.Today = datetime.date.today().strftime(TIME_FMT)
def get_all_stock_list(self):
"""
Return stock code --> list.
"""
query = self.mysql.condition_select(
"stock_manager", "stock_code", "flag='t'"
)
df = pd.DataFrame.from_dict(query)
self.stock_list = df['stock_code'].tolist()
return self.stock_list
def get_all_index_list(self):
"""
Return stock code --> list.
"""
query = self.mysql.condition_select(
"stock_manager", "stock_code", "flag='i'"
)
df = pd.DataFrame.from_dict(query)
self.stock_list = df['stock_code'].tolist()
return self.stock_list
def get_all_security_list(self):
"""
Return stock code --> list
"""
# Return all kinds of securities in form stock list.
# Result : List type data.
from venus.form import formStockManager
result = self.mysql.session.query(
formStockManager.stock_code).all()
df = pd.DataFrame.from_dict(result)
result = df['stock_code'].tolist()
return result
def get_html_object(self, url, header=None):
"""
result is a etree.HTML object
"""
content = requests.get(url, headers=None, timeout=3)
content.encoding = content.apparent_encoding
html = etree.HTML(content.text)
return html
def get_excel_object(self, url):
df = pd.read_excel(url)
return df
def get_html_table(self, url, attr=''):
# get html table from url.
# Return a string like table object.
# attr: [@class='table_bg001 border_box limit_scale scr_table']
# //table[contains(@id,'historyTable')]
html = self.get_html_object(url)
table_list = html.xpath(f"//table{attr}")
result = []
if table_list:
for table in table_list:
df = etree.tostring(table).decode()
result.append(df)
return result
def update_stock_manager(self, stock_code: str, option='update'):
if option == 'update':
col = 'modified_date'
elif option == 'xrdr':
col = 'xrdr_date'
elif option == 'balance':
col = 'balance_date'
elif option == 'income':
col = 'income_date'
elif option == 'cashflow':
col = 'cashflow_date'
else:
col = None
if col:
self.mysql.update_value(
'stock_manager', col,
f"'{self.Today}'", f"stock_code='{stock_code}'")
def close(self):
self.mysql.engine.close()
class EventStockList(StockEventBase):
def get_all_stock_list(self):
"""
Return stock code --> list.
"""
query = self.mysql.condition_select(
"stock_manager", "stock_code", "flag='t'"
)
df = pd.DataFrame.from_dict(query)
self.stock_list = df[0].tolist()
return self.stock_list
def get_all_index_list(self):
"""
Return stock code --> list.
"""
query = self.mysql.condition_select(
"stock_manager", "stock_code", "flag='i'"
)
df = pd.DataFrame.from_dict(query)
self.stock_list = df[0].tolist()
return self.stock_list
def get_all_security_list(self):
"""
Return stock code --> list
"""
# Return all kinds of securities in form stock list.
# Result : List type data.
from venus.form import formStockManager
result = self.mysql.session.query(
formStockManager.stock_code).all()
df = | pd.DataFrame.from_dict(result) | pandas.DataFrame.from_dict |
#!/usr/bin/env python
# coding: utf-8
from __future__ import unicode_literals
import numpy as np
import pandas as pd
import pandas.compat as compat
import pandas.core.common as com
_formats = ['%Y年', '%Y年%m月%d日', '%Y年%m月',
'%Y年%m月%d日%H時%M分', '%Y年%m月%d日%H時%M分%S秒',
'%y年', '%y年%m月%d日', '%y年%m月',
'%y年%m月%d日%H時%M分', '%y年%m月%d日%H時%M分%S秒',
'%m月%d日', '%m月%d日%H時%M分', '%m月%d日%H時%M分%S秒']
def to_datetime(arg, box=True, format=None, **kwargs):
try:
result = pd.to_datetime(arg, box=box, format=format, **kwargs)
if format is not None:
# if format is specified, return pd.to_datetime as it is
return result
if result is None:
return result
elif isinstance(result, (pd.Timestamp, pd.DatetimeIndex)):
return result
except ValueError:
# as of pandas 0.17, to_datetime raises when parsing fails
result = arg
def _convert_listlike(arg, box):
for format in _formats:
try:
return pd.to_datetime(arg, box=box, format=format, **kwargs)
except ValueError:
pass
return arg
if isinstance(result, compat.string_types):
arg = np.array([arg], dtype='O')
result = _convert_listlike(arg, box)
return result[0]
if isinstance(result, pd.Series):
values = _convert_listlike(arg.values, False)
return pd.Series(values, index=arg.index, name=arg.name)
elif com.is_list_like(result):
return _convert_listlike(result, box)
return result
def date_range(start=None, end=None, **kwargs):
start = to_datetime(start)
end = to_datetime(end)
return | pd.date_range(start=start, end=end, **kwargs) | pandas.date_range |
# Team: Darvirian
# Developer: <NAME>
# Contents:
# PART I: Load the data
# PART II: Preprocessing
# PART III: Tokenize in sentences and words
# PART IV: Vectorize (and calculate TF-IDF)
# PART V: Create the worddic with per word: doc, positions in doc, TF-IDF score
# CASES: Kaggle CORD-19 What do we know about virus genetics, origin, and evolution?
# CASES: EUvsVirus Health & Life, Research
# Credits:
# Inspiration: https://www.kaggle.com/amitkumarjaiswal/nlp-search-engine
# CORD-19 CSV files from: https://www.kaggle.com/xhlulu/cord-19-eda-parse-json-and-generate-clean-csv
# =============================================================================
# Import the libraries
# =============================================================================
# TODO also full-genome (fullgenome is in but full-genome not)
import numpy as np
import pandas as pd
import pickle
import re
import json
import time
import nltk
from nltk import word_tokenize
from nltk import sent_tokenize
from nltk.stem import WordNetLemmatizer
from collections import Counter
from collections import defaultdict
from sklearn.feature_extraction.text import TfidfVectorizer
# =============================================================================
# PART I: Load the data
# =============================================================================
## Read docs from CORD-19
# import os
# os.chdir("../Data/CSV")
df_biorxiv = pd.read_csv('/Users/henrybol/Documents/GitHub_off/darvirian/Data/CSV/biorxiv_clean.csv')
df_clean_comm_use = pd.read_csv('/Users/henrybol/Documents/GitHub_off/darvirian/Data/CSV/clean_comm_use.csv')
df_clean_noncomm_use = pd.read_csv('/Users/henrybol/Documents/GitHub_off/darvirian/Data/CSV/clean_noncomm_use.csv')
df_clean_pmc = pd.read_csv('/Users/henrybol/Documents/GitHub_off/darvirian/Data/CSV/clean_pmc.csv')
# Add all dataframes togethers
df = df_biorxiv.append(df_clean_comm_use).reset_index(drop=True)
df = df.append(df_clean_noncomm_use).reset_index(drop=True)
df = df.append(df_clean_pmc).reset_index(drop=True)
# Select dataset (test purposes)
# df = df_biorxiv.copy()
## Save df file
f = open("df.pkl","wb")
pickle.dump(df, f)
f.close()
## Series plot_data with text (all documents)
plot_data = df['text']
# for i in range(len(plot_data)):
# if 'SARS-CoV-2' in plot_data[i]:
# # if 'Hal' in plot_data[i]:
# print(i)
# "SARS-CoV-2" in plot_data[10]
# check = sentences[10]
# check = ''.join(item for item in check)
# TODO check documents in other languages than English (e.g. German)
# Create Documentnumber to PaperID table
# doc_to_paper_id = df.paper_id.reset_index()
# doc_to_paper_id.to_csv('Data/output/doc_to_paper.csv')
# Slice for short df
# df.columns
# df = df[['paper_id', 'title', 'authors', 'affiliations', 'abstract', 'bibliography']]
# # df.to_csv('Data/output/df.csv')
# # df = pd.read_csv('Data/output/df.csv')
# f = open("Data/output/df.pkl","wb")
# pickle.dump(df, f)
# f.close()
# =============================================================================
# PART II: Preprocessing
# =============================================================================
# Check NaNs
# df.isnull().values.any()
# df.isna().any() # title, authors, afffiliations, avstract
# NaN_list_rows = df.isnull().sum(axis=1).sort_values(ascending=False)
# df = df.replace(np.nan, '', regex=True)
# plot_data.isnull().values.any() # False
## Check duplicates
duplicate_papers = df[df.paper_id.duplicated()] # None
## Create all docs with sentences tokenized
sentences = [sent_tokenize(plot_data[i]) for i in range(len(plot_data)) if len(plot_data[i]) != 0]
## Save sentences file
# f = open("Data/output/sentences_200426-2.pkl","wb")
# pickle.dump(sentences, f)
# f.close()
# Load pickle file sentences
# if inference == 'on':
# pickle_in = open("Data/output/sentences_200415.pkl", "rb")
# sentences = pickle.load(pickle_in)
# Dump to json file
with open('sentences.json', 'w') as json_file:
json.dump(sentences, json_file)
## Replace '\n' by ' '
plot_data = [x.replace('\n', ' ') for x in plot_data]
# TODO include '-'
# rank('Full-genome phylogenetic analysis'): full-genome is not taken into account
# rank('Full genome phylogenetic analysis'): full genome is taken into account
# Replace SARS-CoV-2 and Covid-19
plot_data = [x.replace('SARS-CoV-2', 'sarscov2') for x in plot_data]
plot_data = [x.replace('sars-cov-2', 'sarscov2') for x in plot_data]
plot_data = [x.replace('Covid-19', 'covid19') for x in plot_data]
plot_data = [x.replace('covid-19', 'covid19') for x in plot_data]
## Clean text
# TODO CHANGE
# Keep figures, letters and hyphens (hyphen gives error in worddic function)
plot_data = [re.sub(r'[^a-zA-Z0-9]', ' ', str(x)) for x in plot_data]
# ADDED - to keep hyphen -> PROBLEMS later
# plot_data = [re.sub(r'[^a-zA-Z0-9-]', ' ', str(x)) for x in plot_data]
# Remove single characters (not 0-9 to keep SARS-CoV-2)
# plot_data = [re.sub(r'\b[a-zA-Z0-9]\b', '', str(x)) for x in plot_data]
plot_data = [re.sub(r'\b[a-zA-Z0-9]\b', '', str(x)) for x in plot_data]
# Remove punctuation (!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~)
# plot_data = ["".join(j for j in i if j not in string.punctuation) for i in plot_data]
# =============================================================================
# PART III: Tokenize and preprocess more
# =============================================================================
## Tokenize words
# plot_data = [word_tokenize(doc) for doc in set(plot_data)] # Do NOT use SET here
plot_data = [word_tokenize(doc) for doc in plot_data]
## Lower case words for all docs
plot_data = [[word.lower() for word in doc] for doc in plot_data]
## Lemmatization
time_start = time.time()
lemmatizer = WordNetLemmatizer()
plot_data = [[lemmatizer.lemmatize(word) for word in doc] for doc in plot_data]
time_end = time.time()
print('Lemmatization duration:', time_end - time_start) # Lemmatization duration: 334.67016315460205
#snowball_stemmer = SnowballStemmer("english")
#stemmed_sentence = [snowball_stemmer.stem(w) for w in filtered_sentence]
#stemmed_sentence[0:10]
#
#porter_stemmer = PorterStemmer()
#snowball_stemmer = SnowballStemmer("english")
#stemmed_sentence = [porter_stemmer.stem(w) for w in filtered_sentence]
#stemmed_sentence[0:10]
## Remove stop words from all docs
from nltk.corpus import stopwords
stop_words = set(stopwords.words('english'))
plot_data = [[word for word in doc if word not in stop_words] for doc in plot_data]
## Remove words that occur only once in all documents
# Check frequency of words and sort from high to low
num_of_words = Counter(word for doc in plot_data for word in set(doc))
# num_of_words_sorted = OrderedDict(num_of_words.most_common())
num_of_words_sorted = [(l,k) for k,l in sorted([(j,i) for i,j in num_of_words.items()], reverse=True)]
# All words with a frequency of 1 (word[0] is a word and word[1] the frequency)
words_low_freq = [word[0] for word in num_of_words_sorted if word[1] == 1]
# Set to increase speed
words_low_freq = set(words_low_freq)
# Remove words with a frequency of 1 (this takes a while) = this takes too much time
# plot_data = [[word for word in doc if word not in words_low_freq] for doc in plot_data]
plot_data = [[word for word in doc if word not in words_low_freq] for doc in plot_data]
# all_words = [item for sublist in plot_data for item in sublist]
# wordsunique = set(all_words)
# wordsunique = list(wordsunique)
# len(wordsunique)
## Save plot_data file
# f = open("Data/output/plot_data_200419.pkl", "wb")
# pickle.dump(plot_data, f)
# f.close()
## Load pickle file plot_data
# if inference == 'on':
# pickle_in = open("Data/output/plot_data_200415.pkl", "rb")
# plot_data = pickle.load(pickle_in)
# "SARS-CoV-2" in plot_data[10]
# "sars-cov-2" in plot_data[10]
# "sars-cov-2" in texts_flattened[10]
# word2idx["sars-cov-2"]
# =============================================================================
# PART IV: Vectorize (and calculate TF-IDF)
# ============================================================================
texts_flattened = [" ".join(x) for x in plot_data]
# vectorizer = TfidfVectorizer(lowercase=True, analyzer='word', stop_words='english')
# Include with token_pattern also single characters but keep hyphenated
vectorizer = TfidfVectorizer(lowercase=False, stop_words=None, token_pattern=r"(?u)\b\w+\b")
# pattern = "(?u)\\b[\\w-]+\\b"
# vectorizer = TfidfVectorizer(lowercase=False, stop_words=None, token_pattern=pattern)
# vectorizer = TfidfVectorizer(lowercase=False, stop_words=None)
vectors = vectorizer.fit_transform(texts_flattened)
feature_names = vectorizer.get_feature_names()
dense = vectors.todense()
## Dictionary of unique words as values
word2idx = dict(zip(feature_names, range(len(feature_names))))
# word2idx['sars']
# Save word2idx file
# f = open("Data/output/word2idx_200426-2.pkl", "wb")
# pickle.dump(word2idx, f)
# f.close()
# Load pickle file word2idx
# if inference == 'on':
# pickle_in = open("Data/output/word2idx_200426-2.pkl", "rb")
# word2idx = pickle.load(pickle_in)
# Dump to json file
with open('word2idx.json', 'w') as json_file:
json.dump(word2idx, json_file)
## Dictionary with the unique words as keys
idx2word = {v:k for k,v in word2idx.items()}
## Save idx2word file
# f = open("Data/output/idx2word_200426-2.pkl", "wb")
# pickle.dump(idx2word, f)
# f.close()
# Load pickle file idx2word
# if inference == 'on':
# pickle_in = open("Data/output/idx2word_200415.pkl", "rb")
# idx2word = pickle.load(pickle_in)
# Dump to json file
with open('idx2word.json', 'w') as json_file:
json.dump(idx2word, json_file)
## word2idx all feature_names
feature_names_num = [word2idx[feature] for feature in feature_names]
## dataframe tfidf
df_tfidf = | pd.DataFrame(dense, columns=feature_names_num) | pandas.DataFrame |
from copy import deepcopy
import datetime
import inspect
import pydoc
import numpy as np
import pytest
from pandas.compat import PY37
from pandas.util._test_decorators import async_mark, skip_if_no
import pandas as pd
from pandas import Categorical, DataFrame, Series, compat, date_range, timedelta_range
import pandas._testing as tm
class TestDataFrameMisc:
@pytest.mark.parametrize("attr", ["index", "columns"])
def test_copy_index_name_checking(self, float_frame, attr):
# don't want to be able to modify the index stored elsewhere after
# making a copy
ind = getattr(float_frame, attr)
ind.name = None
cp = float_frame.copy()
getattr(cp, attr).name = "foo"
assert getattr(float_frame, attr).name is None
def test_getitem_pop_assign_name(self, float_frame):
s = float_frame["A"]
assert s.name == "A"
s = float_frame.pop("A")
assert s.name == "A"
s = float_frame.loc[:, "B"]
assert s.name == "B"
s2 = s.loc[:]
assert s2.name == "B"
def test_get_value(self, float_frame):
for idx in float_frame.index:
for col in float_frame.columns:
result = float_frame._get_value(idx, col)
expected = float_frame[col][idx]
tm.assert_almost_equal(result, expected)
def test_add_prefix_suffix(self, float_frame):
with_prefix = float_frame.add_prefix("foo#")
expected = pd.Index([f"foo#{c}" for c in float_frame.columns])
tm.assert_index_equal(with_prefix.columns, expected)
with_suffix = float_frame.add_suffix("#foo")
expected = pd.Index([f"{c}#foo" for c in float_frame.columns])
tm.assert_index_equal(with_suffix.columns, expected)
with_pct_prefix = float_frame.add_prefix("%")
expected = pd.Index([f"%{c}" for c in float_frame.columns])
tm.assert_index_equal(with_pct_prefix.columns, expected)
with_pct_suffix = float_frame.add_suffix("%")
expected = pd.Index([f"{c}%" for c in float_frame.columns])
tm.assert_index_equal(with_pct_suffix.columns, expected)
def test_get_axis(self, float_frame):
f = float_frame
assert f._get_axis_number(0) == 0
assert f._get_axis_number(1) == 1
assert f._get_axis_number("index") == 0
assert f._get_axis_number("rows") == 0
assert f._get_axis_number("columns") == 1
assert f._get_axis_name(0) == "index"
assert f._get_axis_name(1) == "columns"
assert f._get_axis_name("index") == "index"
assert f._get_axis_name("rows") == "index"
assert f._get_axis_name("columns") == "columns"
assert f._get_axis(0) is f.index
assert f._get_axis(1) is f.columns
with pytest.raises(ValueError, match="No axis named"):
f._get_axis_number(2)
with pytest.raises(ValueError, match="No axis.*foo"):
f._get_axis_name("foo")
with pytest.raises(ValueError, match="No axis.*None"):
f._get_axis_name(None)
with pytest.raises(ValueError, match="No axis named"):
f._get_axis_number(None)
def test_keys(self, float_frame):
getkeys = float_frame.keys
assert getkeys() is float_frame.columns
def test_column_contains_raises(self, float_frame):
with pytest.raises(TypeError, match="unhashable type: 'Index'"):
float_frame.columns in float_frame
def test_tab_completion(self):
# DataFrame whose columns are identifiers shall have them in __dir__.
df = pd.DataFrame([list("abcd"), list("efgh")], columns=list("ABCD"))
for key in list("ABCD"):
assert key in dir(df)
assert isinstance(df.__getitem__("A"), pd.Series)
# DataFrame whose first-level columns are identifiers shall have
# them in __dir__.
df = pd.DataFrame(
[list("abcd"), list("efgh")],
columns=pd.MultiIndex.from_tuples(list(zip("ABCD", "EFGH"))),
)
for key in list("ABCD"):
assert key in dir(df)
for key in list("EFGH"):
assert key not in dir(df)
assert isinstance(df.__getitem__("A"), pd.DataFrame)
def test_not_hashable(self):
empty_frame = DataFrame()
df = DataFrame([1])
msg = "'DataFrame' objects are mutable, thus they cannot be hashed"
with pytest.raises(TypeError, match=msg):
hash(df)
with pytest.raises(TypeError, match=msg):
hash(empty_frame)
def test_column_name_contains_unicode_surrogate(self):
# GH 25509
colname = "\ud83d"
df = DataFrame({colname: []})
# this should not crash
assert colname not in dir(df)
assert df.columns[0] == colname
def test_new_empty_index(self):
df1 = DataFrame(np.random.randn(0, 3))
df2 = DataFrame(np.random.randn(0, 3))
df1.index.name = "foo"
assert df2.index.name is None
def test_array_interface(self, float_frame):
with np.errstate(all="ignore"):
result = np.sqrt(float_frame)
assert isinstance(result, type(float_frame))
assert result.index is float_frame.index
assert result.columns is float_frame.columns
tm.assert_frame_equal(result, float_frame.apply(np.sqrt))
def test_get_agg_axis(self, float_frame):
cols = float_frame._get_agg_axis(0)
assert cols is float_frame.columns
idx = float_frame._get_agg_axis(1)
assert idx is float_frame.index
msg = r"Axis must be 0 or 1 \(got 2\)"
with pytest.raises(ValueError, match=msg):
float_frame._get_agg_axis(2)
def test_nonzero(self, float_frame, float_string_frame):
empty_frame = DataFrame()
assert empty_frame.empty
assert not float_frame.empty
assert not float_string_frame.empty
# corner case
df = DataFrame({"A": [1.0, 2.0, 3.0], "B": ["a", "b", "c"]}, index=np.arange(3))
del df["A"]
assert not df.empty
def test_iteritems(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["a", "a", "b"])
for k, v in df.items():
assert isinstance(v, DataFrame._constructor_sliced)
def test_items(self):
# GH 17213, GH 13918
cols = ["a", "b", "c"]
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=cols)
for c, (k, v) in zip(cols, df.items()):
assert c == k
assert isinstance(v, Series)
assert (df[k] == v).all()
def test_iter(self, float_frame):
assert tm.equalContents(list(float_frame), float_frame.columns)
def test_iterrows(self, float_frame, float_string_frame):
for k, v in float_frame.iterrows():
exp = float_frame.loc[k]
tm.assert_series_equal(v, exp)
for k, v in float_string_frame.iterrows():
exp = float_string_frame.loc[k]
tm.assert_series_equal(v, exp)
def test_iterrows_iso8601(self):
# GH 19671
s = DataFrame(
{
"non_iso8601": ["M1701", "M1802", "M1903", "M2004"],
"iso8601": date_range("2000-01-01", periods=4, freq="M"),
}
)
for k, v in s.iterrows():
exp = s.loc[k]
tm.assert_series_equal(v, exp)
def test_iterrows_corner(self):
# gh-12222
df = DataFrame(
{
"a": [datetime.datetime(2015, 1, 1)],
"b": [None],
"c": [None],
"d": [""],
"e": [[]],
"f": [set()],
"g": [{}],
}
)
expected = Series(
[datetime.datetime(2015, 1, 1), None, None, "", [], set(), {}],
index=list("abcdefg"),
name=0,
dtype="object",
)
_, result = next(df.iterrows())
tm.assert_series_equal(result, expected)
def test_itertuples(self, float_frame):
for i, tup in enumerate(float_frame.itertuples()):
s = DataFrame._constructor_sliced(tup[1:])
s.name = tup[0]
expected = float_frame.iloc[i, :].reset_index(drop=True)
tm.assert_series_equal(s, expected)
df = DataFrame(
{"floats": np.random.randn(5), "ints": range(5)}, columns=["floats", "ints"]
)
for tup in df.itertuples(index=False):
assert isinstance(tup[1], int)
df = DataFrame(data={"a": [1, 2, 3], "b": [4, 5, 6]})
dfaa = df[["a", "a"]]
assert list(dfaa.itertuples()) == [(0, 1, 1), (1, 2, 2), (2, 3, 3)]
# repr with int on 32-bit/windows
if not (compat.is_platform_windows() or compat.is_platform_32bit()):
assert (
repr(list(df.itertuples(name=None)))
== "[(0, 1, 4), (1, 2, 5), (2, 3, 6)]"
)
tup = next(df.itertuples(name="TestName"))
assert tup._fields == ("Index", "a", "b")
assert (tup.Index, tup.a, tup.b) == tup
assert type(tup).__name__ == "TestName"
df.columns = ["def", "return"]
tup2 = next(df.itertuples(name="TestName"))
assert tup2 == (0, 1, 4)
assert tup2._fields == ("Index", "_1", "_2")
df3 = DataFrame({"f" + str(i): [i] for i in range(1024)})
# will raise SyntaxError if trying to create namedtuple
tup3 = next(df3.itertuples())
assert isinstance(tup3, tuple)
if PY37:
assert hasattr(tup3, "_fields")
else:
assert not hasattr(tup3, "_fields")
# GH 28282
df_254_columns = DataFrame([{f"foo_{i}": f"bar_{i}" for i in range(254)}])
result_254_columns = next(df_254_columns.itertuples(index=False))
assert isinstance(result_254_columns, tuple)
assert hasattr(result_254_columns, "_fields")
df_255_columns = DataFrame([{f"foo_{i}": f"bar_{i}" for i in range(255)}])
result_255_columns = next(df_255_columns.itertuples(index=False))
assert isinstance(result_255_columns, tuple)
# Dataframes with >=255 columns will fallback to regular tuples on python < 3.7
if PY37:
assert hasattr(result_255_columns, "_fields")
else:
assert not hasattr(result_255_columns, "_fields")
def test_sequence_like_with_categorical(self):
# GH 7839
# make sure can iterate
df = DataFrame(
{"id": [1, 2, 3, 4, 5, 6], "raw_grade": ["a", "b", "b", "a", "a", "e"]}
)
df["grade"] = Categorical(df["raw_grade"])
# basic sequencing testing
result = list(df.grade.values)
expected = np.array(df.grade.values).tolist()
tm.assert_almost_equal(result, expected)
# iteration
for t in df.itertuples(index=False):
str(t)
for row, s in df.iterrows():
str(s)
for c, col in df.items():
str(s)
def test_len(self, float_frame):
assert len(float_frame) == len(float_frame.index)
def test_values_mixed_dtypes(self, float_frame, float_string_frame):
frame = float_frame
arr = frame.values
frame_cols = frame.columns
for i, row in enumerate(arr):
for j, value in enumerate(row):
col = frame_cols[j]
if np.isnan(value):
assert np.isnan(frame[col][i])
else:
assert value == frame[col][i]
# mixed type
arr = float_string_frame[["foo", "A"]].values
assert arr[0, 0] == "bar"
df = DataFrame({"complex": [1j, 2j, 3j], "real": [1, 2, 3]})
arr = df.values
assert arr[0, 0] == 1j
# single block corner case
arr = float_frame[["A", "B"]].values
expected = float_frame.reindex(columns=["A", "B"]).values
tm.assert_almost_equal(arr, expected)
def test_to_numpy(self):
df = pd.DataFrame({"A": [1, 2], "B": [3, 4.5]})
expected = np.array([[1, 3], [2, 4.5]])
result = df.to_numpy()
tm.assert_numpy_array_equal(result, expected)
def test_to_numpy_dtype(self):
df = pd.DataFrame({"A": [1, 2], "B": [3, 4.5]})
expected = np.array([[1, 3], [2, 4]], dtype="int64")
result = df.to_numpy(dtype="int64")
tm.assert_numpy_array_equal(result, expected)
def test_to_numpy_copy(self):
arr = np.random.randn(4, 3)
df = pd.DataFrame(arr)
assert df.values.base is arr
assert df.to_numpy(copy=False).base is arr
assert df.to_numpy(copy=True).base is not arr
def test_to_numpy_mixed_dtype_to_str(self):
# https://github.com/pandas-dev/pandas/issues/35455
df = pd.DataFrame([[pd.Timestamp("2020-01-01 00:00:00"), 100.0]])
result = df.to_numpy(dtype=str)
expected = np.array([["2020-01-01 00:00:00", "100.0"]], dtype=str)
tm.assert_numpy_array_equal(result, expected)
def test_swapaxes(self):
df = DataFrame(np.random.randn(10, 5))
tm.assert_frame_equal(df.T, df.swapaxes(0, 1))
tm.assert_frame_equal(df.T, df.swapaxes(1, 0))
tm.assert_frame_equal(df, df.swapaxes(0, 0))
msg = "No axis named 2 for object type DataFrame"
with pytest.raises(ValueError, match=msg):
df.swapaxes(2, 5)
def test_axis_aliases(self, float_frame):
f = float_frame
# reg name
expected = f.sum(axis=0)
result = f.sum(axis="index")
tm.assert_series_equal(result, expected)
expected = f.sum(axis=1)
result = f.sum(axis="columns")
tm.assert_series_equal(result, expected)
def test_class_axis(self):
# GH 18147
# no exception and no empty docstring
assert pydoc.getdoc(DataFrame.index)
assert pydoc.getdoc(DataFrame.columns)
def test_more_values(self, float_string_frame):
values = float_string_frame.values
assert values.shape[1] == len(float_string_frame.columns)
def test_repr_with_mi_nat(self, float_string_frame):
df = DataFrame(
{"X": [1, 2]}, index=[[pd.NaT, | pd.Timestamp("20130101") | pandas.Timestamp |
import numpy as np
import pandas as pd
from numpy import inf, nan
from numpy.testing import assert_array_almost_equal, assert_array_equal
from pandas import DataFrame, Series, Timestamp
from pandas.testing import assert_frame_equal, assert_series_equal
from shapely.geometry.point import Point
from pymove import MoveDataFrame
from pymove.utils import integration
from pymove.utils.constants import (
ADDRESS,
CITY,
DATETIME,
DIST_EVENT,
DIST_HOME,
DIST_POI,
EVENT_ID,
EVENT_TYPE,
GEOMETRY,
HOME,
ID_POI,
LATITUDE,
LONGITUDE,
NAME_POI,
POI,
TRAJ_ID,
TYPE_POI,
VIOLATING,
)
list_random_banks = [
[39.984094, 116.319236, 1, 'bank'],
[39.984198, 116.319322, 2, 'randomvalue'],
[39.984224, 116.319402, 3, 'bancos_postos'],
[39.984211, 116.319389, 4, 'randomvalue'],
[39.984217, 116.319422, 5, 'bancos_PAE'],
[39.984710, 116.319865, 6, 'bancos_postos'],
[39.984674, 116.319810, 7, 'bancos_agencias'],
[39.984623, 116.319773, 8, 'bancos_filiais'],
[39.984606, 116.319732, 9, 'banks'],
[39.984555, 116.319728, 10, 'banks']
]
list_random_bus_station = [
[39.984094, 116.319236, 1, 'transit_station'],
[39.984198, 116.319322, 2, 'randomvalue'],
[39.984224, 116.319402, 3, 'transit_station'],
[39.984211, 116.319389, 4, 'pontos_de_onibus'],
[39.984217, 116.319422, 5, 'transit_station'],
[39.984710, 116.319865, 6, 'randomvalue'],
[39.984674, 116.319810, 7, 'bus_station'],
[39.984623, 116.319773, 8, 'bus_station'],
]
list_random_bar_restaurant = [
[39.984094, 116.319236, 1, 'restaurant'],
[39.984198, 116.319322, 2, 'restaurant'],
[39.984224, 116.319402, 3, 'randomvalue'],
[39.984211, 116.319389, 4, 'bar'],
[39.984217, 116.319422, 5, 'bar'],
[39.984710, 116.319865, 6, 'bar-restaurant'],
[39.984674, 116.319810, 7, 'random123'],
[39.984623, 116.319773, 8, '123'],
]
list_random_parks = [
[39.984094, 116.319236, 1, 'pracas_e_parques'],
[39.984198, 116.319322, 2, 'park'],
[39.984224, 116.319402, 3, 'parks'],
[39.984211, 116.319389, 4, 'random'],
[39.984217, 116.319422, 5, '123'],
[39.984710, 116.319865, 6, 'park'],
[39.984674, 116.319810, 7, 'parks'],
[39.984623, 116.319773, 8, 'pracas_e_parques'],
]
list_random_police = [
[39.984094, 116.319236, 1, 'distritos_policiais'],
[39.984198, 116.319322, 2, 'police'],
[39.984224, 116.319402, 3, 'police'],
[39.984211, 116.319389, 4, 'distritos_policiais'],
[39.984217, 116.319422, 5, 'random'],
[39.984710, 116.319865, 6, 'randomvalue'],
[39.984674, 116.319810, 7, '123'],
[39.984623, 116.319773, 8, 'bus_station'],
]
list_move = [
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984559000000004, 116.326696, Timestamp('2008-10-23 10:37:26'), 1],
[40.002899, 116.32151999999999, Timestamp('2008-10-23 10:50:16'), 1],
[40.016238, 116.30769099999999, Timestamp('2008-10-23 11:03:06'), 1],
[40.013814, 116.306525, Timestamp('2008-10-23 11:58:33'), 2],
[40.009735, 116.315069, Timestamp('2008-10-23 23:50:45'), 2],
[39.993527, 116.32648300000001, Timestamp('2008-10-24 00:02:14'), 2],
[39.978575, 116.326975, Timestamp('2008-10-24 00:22:01'), 3],
[39.981668, 116.310769, Timestamp('2008-10-24 01:57:57'), 3],
]
list_pois = [
[39.984094, 116.319236, 1, 'policia', 'distrito_pol_1'],
[39.991013, 116.326384, 2, 'policia', 'policia_federal'],
[40.01, 116.312615, 3, 'comercio', 'supermercado_aroldo'],
[40.013821, 116.306531, 4, 'show', 'forro_tropykalia'],
[40.008099, 116.31771100000002, 5, 'risca-faca',
'rinha_de_galo_world_cup'],
[39.985704, 116.326877, 6, 'evento', 'adocao_de_animais'],
[39.979393, 116.3119, 7, 'show', 'dia_do_municipio']
]
# Testes de Unions
def test_union_poi_bank():
pois_df = DataFrame(
data=list_random_banks,
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
)
expected = DataFrame(
data=[
[39.984094, 116.319236, 1, 'banks'],
[39.984198, 116.319322, 2, 'randomvalue'],
[39.984224, 116.319402, 3, 'banks'],
[39.984211, 116.319389, 4, 'randomvalue'],
[39.984217, 116.319422, 5, 'banks'],
[39.984710, 116.319865, 6, 'banks'],
[39.984674, 116.319810, 7, 'banks'],
[39.984623, 116.319773, 8, 'banks'],
[39.984606, 116.319732, 9, 'banks'],
[39.984555, 116.319728, 10, 'banks']
],
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
)
integration.union_poi_bank(pois_df, TYPE_POI, inplace=True)
assert_frame_equal(pois_df, expected)
def test_union_poi_bus_station():
pois_df = DataFrame(
data=list_random_bus_station,
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
expected = DataFrame(
data=[
[39.984094, 116.319236, 1, 'bus_station'],
[39.984198, 116.319322, 2, 'randomvalue'],
[39.984224, 116.319402, 3, 'bus_station'],
[39.984211, 116.319389, 4, 'bus_station'],
[39.984217, 116.319422, 5, 'bus_station'],
[39.984710, 116.319865, 6, 'randomvalue'],
[39.984674, 116.319810, 7, 'bus_station'],
[39.984623, 116.319773, 8, 'bus_station'],
],
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
integration.union_poi_bus_station(pois_df, TYPE_POI, inplace=True)
assert_frame_equal(pois_df, expected)
def test_union_poi_bar_restaurant():
pois_df = DataFrame(
data=list_random_bar_restaurant,
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
expected = DataFrame(
data=[
[39.984094, 116.319236, 1, 'bar-restaurant'],
[39.984198, 116.319322, 2, 'bar-restaurant'],
[39.984224, 116.319402, 3, 'randomvalue'],
[39.984211, 116.319389, 4, 'bar-restaurant'],
[39.984217, 116.319422, 5, 'bar-restaurant'],
[39.984710, 116.319865, 6, 'bar-restaurant'],
[39.984674, 116.319810, 7, 'random123'],
[39.984623, 116.319773, 8, '123'],
],
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
integration.union_poi_bar_restaurant(pois_df, TYPE_POI, inplace=True)
assert_frame_equal(pois_df, expected)
def test_union_poi_parks():
pois_df = DataFrame(
data=list_random_parks,
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
expected = DataFrame(
data=[
[39.984094, 116.319236, 1, 'parks'],
[39.984198, 116.319322, 2, 'parks'],
[39.984224, 116.319402, 3, 'parks'],
[39.984211, 116.319389, 4, 'random'],
[39.984217, 116.319422, 5, '123'],
[39.984710, 116.319865, 6, 'parks'],
[39.984674, 116.319810, 7, 'parks'],
[39.984623, 116.319773, 8, 'parks'],
],
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
integration.union_poi_parks(pois_df, TYPE_POI, inplace=True)
assert_frame_equal(pois_df, expected)
def test_union_poi_police():
pois_df = DataFrame(
data=list_random_police,
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
expected = DataFrame(
data=[
[39.984094, 116.319236, 1, 'police'],
[39.984198, 116.319322, 2, 'police'],
[39.984224, 116.319402, 3, 'police'],
[39.984211, 116.319389, 4, 'police'],
[39.984217, 116.319422, 5, 'random'],
[39.984710, 116.319865, 6, 'randomvalue'],
[39.984674, 116.319810, 7, '123'],
[39.984623, 116.319773, 8, 'bus_station'],
],
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
integration.union_poi_police(pois_df, TYPE_POI, inplace=True)
assert_frame_equal(pois_df, expected)
def test_join_colletive_areas():
move_df = MoveDataFrame(
data=list_move,
)
move_df['geometry'] = move_df.apply(lambda x: Point(x['lon'], x['lat']), axis=1)
expected = move_df.copy()
indexes_ac = np.linspace(0, move_df.shape[0], 5, dtype=int)
area_c = move_df[move_df.index.isin(indexes_ac)].copy()
integration.join_collective_areas(move_df, area_c, inplace=True)
expected[VIOLATING] = [True, False, True, False, True, False, True, False, False]
assert_frame_equal(move_df, expected)
def test__reset_and_creates_id_and_lat_lon():
move_df = MoveDataFrame(list_move)
pois = DataFrame(
data=list_pois,
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI, NAME_POI],
index=[0, 1, 2, 3, 4, 5, 6]
)
dists, ids, tags, lats, lons = (
integration._reset_and_creates_id_and_lat_lon(
move_df, pois, True, True
)
)
id_expected = np.full(9, '', dtype='object_')
tag_expected = np.full(9, '', dtype='object_')
dist_expected = np.full(
9, np.Infinity, dtype=np.float64
)
lat_expected = np.full(7, np.Infinity, dtype=np.float64)
lon_expected = np.full(7, np.Infinity, dtype=np.float64)
assert_array_almost_equal(dists, dist_expected)
assert_array_equal(ids, id_expected)
assert_array_equal(tags, tag_expected)
assert_array_almost_equal(lats, lat_expected)
assert_array_almost_equal(lons, lon_expected)
dists, ids, tags, lats, lons = (
integration._reset_and_creates_id_and_lat_lon(
move_df, pois, True, False
)
)
assert_array_almost_equal(dists, dist_expected)
assert_array_equal(ids, id_expected)
assert_array_equal(tags, tag_expected)
assert_array_almost_equal(lats, lat_expected)
assert_array_almost_equal(lons, lon_expected)
dists, ids, tags, lats, lons = (
integration._reset_and_creates_id_and_lat_lon(
move_df, pois, False, True
)
)
lat_expected = np.full(9, np.Infinity, dtype=np.float64)
lon_expected = np.full(9, np.Infinity, dtype=np.float64)
assert_array_almost_equal(dists, dist_expected)
assert_array_equal(ids, id_expected)
assert_array_equal(tags, tag_expected)
assert_array_almost_equal(lats, lat_expected)
assert_array_almost_equal(lons, lon_expected)
dists, ids, tags, lats, lons = (
integration._reset_and_creates_id_and_lat_lon(
move_df, pois, False, False
)
)
assert_array_almost_equal(dists, dist_expected)
assert_array_equal(ids, id_expected)
assert_array_equal(tags, tag_expected)
assert_array_almost_equal(lats, lat_expected)
assert_array_almost_equal(lons, lon_expected)
def test__reset_set_window__and_creates_event_id_type():
list_events = [
[39.984094, 116.319236, 1,
Timestamp('2008-10-24 01:57:57'), 'show do tropykalia'],
[39.991013, 116.326384, 2,
Timestamp('2008-10-24 00:22:01'), 'evento da prefeitura'],
[40.01, 116.312615, 3,
Timestamp('2008-10-25 00:21:01'), 'show do seu joao'],
[40.013821, 116.306531, 4,
Timestamp('2008-10-26 00:22:01'), 'missa']
]
move_df = MoveDataFrame(list_move)
pois = DataFrame(
data=list_events,
columns=[LATITUDE, LONGITUDE, EVENT_ID, DATETIME, EVENT_TYPE],
index=[0, 1, 2, 3]
)
list_win_start = [
'2008-10-22T17:23:05.000000000', '2008-10-22T22:07:26.000000000',
'2008-10-22T22:20:16.000000000', '2008-10-22T22:33:06.000000000',
'2008-10-22T23:28:33.000000000', '2008-10-23T11:20:45.000000000',
'2008-10-23T11:32:14.000000000', '2008-10-23T11:52:01.000000000',
'2008-10-23T13:27:57.000000000'
]
win_start_expected = Series(pd.to_datetime(list_win_start), name=DATETIME)
list_win_end = [
'2008-10-23T18:23:05.000000000', '2008-10-23T23:07:26.000000000',
'2008-10-23T23:20:16.000000000', '2008-10-23T23:33:06.000000000',
'2008-10-24T00:28:33.000000000', '2008-10-24T12:20:45.000000000',
'2008-10-24T12:32:14.000000000', '2008-10-24T12:52:01.000000000',
'2008-10-24T14:27:57.000000000'
]
win_end_expected = Series(pd.to_datetime(list_win_end), name=DATETIME)
dist_expected = np.full(
9, np.Infinity, dtype=np.float64
)
type_expected = np.full(9, '', dtype='object_')
id_expected = np.full(9, '', dtype='object_')
window_starts, window_ends, current_distances, event_id, event_type = (
integration._reset_set_window__and_creates_event_id_type(
move_df, pois, 45000, DATETIME
)
)
assert_series_equal(window_starts, win_start_expected)
assert_series_equal(window_ends, win_end_expected)
assert_array_almost_equal(current_distances, dist_expected)
assert_array_equal(event_id, id_expected)
assert_array_equal(event_type, type_expected)
def test_reset_set_window_and_creates_event_id_type_all():
list_move = [
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984559000000004, 116.326696, Timestamp('2008-10-23 10:37:26'), 1],
[40.002899, 116.32151999999999, | Timestamp('2008-10-23 10:50:16') | pandas.Timestamp |
# -*- coding: utf-8 -*-
# pylint: disable=E1101,E1103,W0232
import os
import sys
from datetime import datetime
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
import pandas.compat as compat
import pandas.core.common as com
import pandas.util.testing as tm
from pandas import (Categorical, Index, Series, DataFrame, PeriodIndex,
Timestamp, CategoricalIndex)
from pandas.compat import range, lrange, u, PY3
from pandas.core.config import option_context
# GH 12066
# flake8: noqa
class TestCategorical(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'],
ordered=True)
def test_getitem(self):
self.assertEqual(self.factor[0], 'a')
self.assertEqual(self.factor[-1], 'c')
subf = self.factor[[0, 1, 2]]
tm.assert_almost_equal(subf._codes, [0, 1, 1])
subf = self.factor[np.asarray(self.factor) == 'c']
tm.assert_almost_equal(subf._codes, [2, 2, 2])
def test_getitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(np.int8))
result = c.codes[np.array([100000]).astype(np.int64)]
expected = c[np.array([100000]).astype(np.int64)].codes
self.assert_numpy_array_equal(result, expected)
def test_setitem(self):
# int/positional
c = self.factor.copy()
c[0] = 'b'
self.assertEqual(c[0], 'b')
c[-1] = 'a'
self.assertEqual(c[-1], 'a')
# boolean
c = self.factor.copy()
indexer = np.zeros(len(c), dtype='bool')
indexer[0] = True
indexer[-1] = True
c[indexer] = 'c'
expected = Categorical.from_array(['c', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assert_categorical_equal(c, expected)
def test_setitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(
np.int8)).add_categories([-1000])
indexer = np.array([100000]).astype(np.int64)
c[indexer] = -1000
# we are asserting the code result here
# which maps to the -1000 category
result = c.codes[np.array([100000]).astype(np.int64)]
self.assertEqual(result, np.array([5], dtype='int8'))
def test_constructor_unsortable(self):
# it works!
arr = np.array([1, 2, 3, datetime.now()], dtype='O')
factor = Categorical.from_array(arr, ordered=False)
self.assertFalse(factor.ordered)
if compat.PY3:
self.assertRaises(
TypeError, lambda: Categorical.from_array(arr, ordered=True))
else:
# this however will raise as cannot be sorted (on PY3 or older
# numpies)
if LooseVersion(np.__version__) < "1.10":
self.assertRaises(
TypeError,
lambda: Categorical.from_array(arr, ordered=True))
else:
Categorical.from_array(arr, ordered=True)
def test_is_equal_dtype(self):
# test dtype comparisons between cats
c1 = Categorical(list('aabca'), categories=list('abc'), ordered=False)
c2 = Categorical(list('aabca'), categories=list('cab'), ordered=False)
c3 = Categorical(list('aabca'), categories=list('cab'), ordered=True)
self.assertTrue(c1.is_dtype_equal(c1))
self.assertTrue(c2.is_dtype_equal(c2))
self.assertTrue(c3.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(c2))
self.assertFalse(c1.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(Index(list('aabca'))))
self.assertFalse(c1.is_dtype_equal(c1.astype(object)))
self.assertTrue(c1.is_dtype_equal(CategoricalIndex(c1)))
self.assertFalse(c1.is_dtype_equal(
CategoricalIndex(c1, categories=list('cab'))))
self.assertFalse(c1.is_dtype_equal(CategoricalIndex(c1, ordered=True)))
def test_constructor(self):
exp_arr = np.array(["a", "b", "c", "a", "b", "c"])
c1 = Categorical(exp_arr)
self.assert_numpy_array_equal(c1.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["c", "b", "a"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
# categories must be unique
def f():
Categorical([1, 2], [1, 2, 2])
self.assertRaises(ValueError, f)
def f():
Categorical(["a", "b"], ["a", "b", "b"])
self.assertRaises(ValueError, f)
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([1, 2], [1, 2, np.nan, np.nan])
self.assertRaises(ValueError, f)
# The default should be unordered
c1 = Categorical(["a", "b", "c", "a"])
self.assertFalse(c1.ordered)
# Categorical as input
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c1.__array__(), c2.__array__())
self.assert_numpy_array_equal(c2.categories, np.array(["a", "b", "c"]))
# Series of dtype category
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
# Series
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(Series(["a", "b", "c", "a"]))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(
Series(["a", "b", "c", "a"]), categories=["a", "b", "c", "d"])
self.assertTrue(c1.equals(c2))
# This should result in integer categories, not float!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# https://github.com/pydata/pandas/issues/3678
cat = pd.Categorical([np.nan, 1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# this should result in floats
cat = pd.Categorical([np.nan, 1, 2., 3])
self.assertTrue(com.is_float_dtype(cat.categories))
cat = pd.Categorical([np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# Deprecating NaNs in categoires (GH #10748)
# preserve int as far as possible by converting to object if NaN is in
# categories
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1, 2, 3],
categories=[np.nan, 1, 2, 3])
self.assertTrue(com.is_object_dtype(cat.categories))
# This doesn't work -> this would probably need some kind of "remember
# the original type" feature to try to cast the array interface result
# to...
# vals = np.asarray(cat[cat.notnull()])
# self.assertTrue(com.is_integer_dtype(vals))
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, "a", "b", "c"],
categories=[np.nan, "a", "b", "c"])
self.assertTrue(com.is_object_dtype(cat.categories))
# but don't do it for floats
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1., 2., 3.],
categories=[np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# corner cases
cat = pd.Categorical([1])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical(["a"])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == "a")
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Scalars should be converted to lists
cat = pd.Categorical(1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical([1], categories=1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Catch old style constructor useage: two arrays, codes + categories
# We can only catch two cases:
# - when the first is an integer dtype and the second is not
# - when the resulting codes are all -1/NaN
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2],
categories=["a", "b", "c"]) # noqa
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2], # noqa
categories=[3, 4, 5])
# the next one are from the old docs, but unfortunately these don't
# trigger :-(
with tm.assert_produces_warning(None):
c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3]) # noqa
cat = Categorical([1, 2], categories=[1, 2, 3])
# this is a legitimate constructor
with tm.assert_produces_warning(None):
c = Categorical(np.array([], dtype='int64'), # noqa
categories=[3, 2, 1], ordered=True)
def test_constructor_with_index(self):
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(ci)))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(
ci.astype(object), categories=ci.categories)))
def test_constructor_with_generator(self):
# This was raising an Error in isnull(single_val).any() because isnull
# returned a scalar for a generator
xrange = range
exp = Categorical([0, 1, 2])
cat = Categorical((x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = Categorical(xrange(3))
self.assertTrue(cat.equals(exp))
# This uses xrange internally
from pandas.core.index import MultiIndex
MultiIndex.from_product([range(5), ['a', 'b', 'c']])
# check that categories accept generators and sequences
cat = pd.Categorical([0, 1, 2], categories=(x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = pd.Categorical([0, 1, 2], categories=xrange(3))
self.assertTrue(cat.equals(exp))
def test_from_codes(self):
# too few categories
def f():
Categorical.from_codes([1, 2], [1, 2])
self.assertRaises(ValueError, f)
# no int codes
def f():
Categorical.from_codes(["a"], [1, 2])
self.assertRaises(ValueError, f)
# no unique categories
def f():
Categorical.from_codes([0, 1, 2], ["a", "a", "b"])
self.assertRaises(ValueError, f)
# too negative
def f():
Categorical.from_codes([-2, 1, 2], ["a", "b", "c"])
self.assertRaises(ValueError, f)
exp = Categorical(["a", "b", "c"], ordered=False)
res = Categorical.from_codes([0, 1, 2], ["a", "b", "c"])
self.assertTrue(exp.equals(res))
# Not available in earlier numpy versions
if hasattr(np.random, "choice"):
codes = np.random.choice([0, 1], 5, p=[0.9, 0.1])
pd.Categorical.from_codes(codes, categories=["train", "test"])
def test_comparisons(self):
result = self.factor[self.factor == 'a']
expected = self.factor[np.asarray(self.factor) == 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor != 'a']
expected = self.factor[np.asarray(self.factor) != 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor < 'c']
expected = self.factor[np.asarray(self.factor) < 'c']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor > 'a']
expected = self.factor[np.asarray(self.factor) > 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor >= 'b']
expected = self.factor[np.asarray(self.factor) >= 'b']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor <= 'b']
expected = self.factor[np.asarray(self.factor) <= 'b']
self.assertTrue(result.equals(expected))
n = len(self.factor)
other = self.factor[np.random.permutation(n)]
result = self.factor == other
expected = np.asarray(self.factor) == np.asarray(other)
self.assert_numpy_array_equal(result, expected)
result = self.factor == 'd'
expected = np.repeat(False, len(self.factor))
self.assert_numpy_array_equal(result, expected)
# comparisons with categoricals
cat_rev = pd.Categorical(["a", "b", "c"], categories=["c", "b", "a"],
ordered=True)
cat_rev_base = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a"], ordered=True)
cat = | pd.Categorical(["a", "b", "c"], ordered=True) | pandas.Categorical |
import pandas as pd
from flask import render_template, request ,url_for
#from sklearn.externals import joblib
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics import classification_report
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import MultinomialNB
from textblob import Word
import pickle
import joblib
import csv
import os
from werkzeug.utils import secure_filename
from flask import Flask,flash,request,redirect,send_file,render_template
from app import app
cv = CountVectorizer()
UPLOAD_FOLDER = 'uploads/'
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
@app.route("/")
@app.route("/index")
def home():
return render_template('home.html')
@app.route('/predict', methods=['POST'])
def predict():
ngram_size = 1
nb_spam_model = open("pretrainedModel/NB_model.pkl", 'rb')
clf = joblib.load(nb_spam_model)
dictionary_filepath = open("pretrainedModel/vocabulary_model.pkl", 'rb')
vocabulary_to_load = joblib.load(dictionary_filepath)
loaded_vectorizer = CountVectorizer(ngram_range=(ngram_size, ngram_size), min_df=1, vocabulary=vocabulary_to_load)
loaded_vectorizer._validate_vocabulary()
message = request.form['message']
data = [message]
vect = loaded_vectorizer.transform(data).toarray()
my_prediction = clf.predict(vect)
#print(my_prediction)
return render_template('result.html', prediction=my_prediction)
"""
@app.route('/predictcsvfile', methods=[ 'GET','POST'])
def predict1():
ngram_size = 1
nb_spam_model = open("pretrainedModel/NB_model.pkl", 'rb')
clf = joblib.load(nb_spam_model)
dictionary_filepath = open("pretrainedModel/vocabulary_model.pkl", 'rb')
vocabulary_to_load = joblib.load(dictionary_filepath)
loaded_vectorizer = CountVectorizer(ngram_range=(ngram_size, ngram_size), min_df=1, vocabulary=vocabulary_to_load)
loaded_vectorizer._validate_vocabulary()
df1 = pd.read_csv("uploads/svm.csv", encoding='latin-1')
our_list=df1['review']
sre=[]
for name in our_list:
inp1 = [name]
inp1 = loaded_vectorizer.transform(inp1).toarray()
my_prediction1 = clf.predict(inp1)
sre.append(my_prediction1)
print(my_prediction1)
fields = ['Sentiment']
with open("predictCsv/arjun11.csv", 'w' ,newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(fields)
writer.writerows(sre)
return render_template('resultofcsv.html', prediction1=my_prediction1)
"""
@app.route('/uploadcsv', methods=['GET', 'POST'])
def upload_file():
global places
places=[]
if request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
print('no file')
return redirect(request.url)
file = request.files['file']
print(file)
# if user does not select file, browser also
# submit a empty part without filename
if file.filename == '':
# print('no filename')
return redirect(request.url)
else:
filename = secure_filename(file.filename)
# print(filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
# print("saved file successfully")
#send file name as parameter to downlad
file_path = UPLOAD_FOLDER + filename
# print(file_path)
with open(file_path) as csv_file:
data = csv.reader(csv_file, delimiter=',')
places= []
for row in data:
if data.line_num == 1: fields = len(row)
if len(row) != fields:
print("Number of column is not match!")
if fields == 1:
places.append({
"city": row[0]
})
elif fields == 2:
places.append({
"city": row[0],
"attraction": row[1]
})
elif fields == 3:
places.append({
"city": row[0],
"attraction": row[1],
"attraction1": row[2]
})
else:
places.append({
"city": row[0],
"attraction": row[1],
"attraction1": row[2],
"attraction2": row[3]
})
col_name = request.form.get('check')
# print(col_name)
ngram_size = 1
nb_spam_model = open("pretrainedModel/NB_model.pkl", 'rb')
clf = joblib.load(nb_spam_model)
dictionary_filepath = open("pretrainedModel/vocabulary_model.pkl", 'rb')
vocabulary_to_load = joblib.load(dictionary_filepath)
loaded_vectorizer = CountVectorizer(ngram_range=(ngram_size, ngram_size), min_df=1, vocabulary=vocabulary_to_load)
loaded_vectorizer._validate_vocabulary()
df1 = pd.read_csv(file_path,skiprows=1,names=['col_1','col_2','col_3'] , encoding='latin-1')
our_list=df1['col_1']
sre=[]
for name in our_list:
inp1 = [name]
inp1 = loaded_vectorizer.transform(inp1).toarray()
my_prediction1 = clf.predict(inp1)
sre.append(my_prediction1)
# print(my_prediction1)
fields = ['Sentiment']
with open("predictCsv/Predict_sentiment.csv", 'w' ,newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(fields)
writer.writerows(sre)
df2=pd.read_csv(file_path)
df3= | pd.read_csv("predictCsv/Predict_sentiment.csv") | pandas.read_csv |
#! /usr/bin/env python3
# importing modules
import time
import datetime
import requests
import json
import pandas as pd
import praw
import re
# set datetime string pattern
# limit_date below will need to match this format
# CHANGE VALUE HERE
date_pattern = '%Y-%m-%d %H:%M:%S' # == YYYY-MM-DD HH:MM:SS
# date limit for search
# expects a string following the format set above in date_pattern
# CHANGE VALUE HERE
limit_date = '2018-06-18 00:00:00'
# set interval used to split time and run queries.
# expects an int/float
# CHANGE VALUES HERE ON YOUR CONVENIENCE usually, 60 minutes bins are a very conservative number
minutes = 60 # this will cluster data in hour bins
# subreddit to be queried
# case insensitive
# CHANGE VALUE HERE
subreddit = 'SUBREDDIT'
# reddit client login
# CHANGE VALUES HERE
client_id='CLIENT_ID' # 14 CHAR
client_secret='CLIENT_SECRET' # 27 CHAR
user_agent='USER_AGENT' # app user agent name
user_name='USERNAME' # your login handle
password='PASSWORD' # your login password
# transform timestamp strings into Epoch Unix notation
# visit https://www.epochconverter.com for further documentatiion
# expects a string following the format set above in time_pattern
def get_epoch(date_time):
return int(time.mktime(time.strptime(date_time,date_pattern)))
# calculates interval in seconds.
def min_interval(minutes):
return minutes * 60
# transforms Epoch Unix into datetime objects
def get_date(submission):
time = submission
return datetime.datetime.fromtimestamp(time)
# gets string-formatted current time. Time zone: UTC/GMT
now = time.strftime(date_pattern, time.gmtime())
# creates list of Epoch Unix notation times
time_splits = list(range(get_epoch(limit_date),get_epoch(now),min_interval(minutes)))
# calculates the number of iterations
length = len(time_splits)
# set subset of useful columns from submissions data
sub_subset = ['author','created_utc','full_link','id','num_comments','permalink','retrieved_on','subreddit','subreddit_id','title','timestamp']
# URL setup
# visit https://github.com/pushshift/api for further documentation
# base for query
base_url = 'https://api.pushshift.io/reddit/search/submission/?'
# max number of search results per iteration [1-500]
size=500
# starts empty pandas DataFrame
sub_df = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 23 20:37:15 2021
@author: skrem
"""
import pandas as pd
import numpy as np
# import csv
import matplotlib as mpl
import matplotlib.pyplot as plt
import sklearn as sk
import sklearn.preprocessing
from sklearn import metrics
import scipy.stats
import scipy.optimize
import seaborn as sns
import matplotlib.patheffects as path_effects
import os
import copy
scaler = sk.preprocessing.MinMaxScaler()
degree_sign = u'\N{DEGREE SIGN}'
"Get global params and pass them to locals"
import settings_init
import settings_transformations
from Avg_data_getter import Avg_data_getter
if settings_init.storage_location is not None:
file_location = settings_init.file_location
Mode = settings_init.Mode
On_len_s = settings_init.On_len_s
Off_len_s = settings_init.Off_len_s
Cycle_len_s = settings_init.Cycle_len_s
repeats = settings_init.repeats
Stim_width_um = settings_init.Stim_width_um
conds_list = settings_init.conds_list
response_avg_dur = settings_transformations.response_avg_dur
baseline_avg_dur = settings_transformations.baseline_avg_dur
indeces_per_s = settings_transformations.indeces_per_s
total_time = settings_transformations.total_time
vis_ang_list = settings_transformations.vis_ang_list
seconds_list = settings_transformations.seconds_list
avg_df = settings_transformations.avg_df
avg_array = settings_transformations.avg_array
ROI_number = settings_transformations.ROI_number
"Functions____________________________________________________________________"
def Get_event_data(roi = "All", event = "All", normalize = "0", plot = "0", data = file_location):
"""Returns a data for selected events specified (based on Mode), and computes
response and baseline average.
Hint: To select multiple ROIs for a single event or multiple events from a
single ROI, specify as variable eg.g ROI_13_14_15_event_8 =
Get_avg_response((13, 14, 15), (8)). Selecting both multiple ROIs and
multiple events is unstable and will yield unexpected results.
Parameters
----------
roi_select: Tuple or array
ROIs from which data is extracted. Default loops through all ROIs.
Script written to be naive to wheter input is tuple (one ROI) or
array (many ROIs)
event_select: Tuple or array
Events from which data is extracted. Default loops through all events.
Naive to tuple (one event) or arrays (many events)
normalize : 0 or 1
Normalize data so range is from 0 to 1 (no/yes)
plot: 0 or 1
Plot sampled data
*data: If given (as string to directory), script loads new, external datafile
Returns
-------
ROI_responses, ROI_baselines, Average_response, Average_baseline
"""
# if data != file_location:
"""
TODO
- This is not the neatest solution... IF I am to do this, then I should
seriously change the label to NOT BE THE SAME AS GLOBAL PARAMS. What I am
doing currently is just a bit nasty...
"""
alt_data = Avg_data_getter(data)
avg_df = alt_data[0] #"""A test"""
avg_array = alt_data[1]
ROI_number = alt_data[2]
# label_list = alt_data[3]
#new improvements
if roi == "All":
roi = np.arange(0, ROI_number)
else:
roi = roi
if isinstance(roi, int) == True:
roi = np.array([roi])
# print("roi was int(), converted to numpy array")
#print("Warning: 'roi_select' takes tuple, but single int was given. Single int was converted to (1,) array.")
if event == "All":
event = np.arange(0, Mode)
else:
event = event
if isinstance(event, int) == True:
event = np.array([event])
# print("event was int(), converted to numpy array")
#print("Warning: 'event_select' takes tuple, but single int was given. Single int was converted to (1,) array.")
ROI_responses = np.empty((0,1))
ROI_baselines = np.empty((0,1))
if normalize == 1:
norm_avg_array = np.copy(avg_array) #create duplicate to avoid overwriting original imported data matrix
for i in roi:
"""
TODO
- Fix the thing below... This is whats giving IndexError index 8 is out of bounds for axis 1 with size 8
= what happens is that as loop starts, for some reason, it gets to a certain recording and index is
out of bounds for the ROIs in the recording...
"""
curr_operation = scaler.fit_transform((norm_avg_array[:, i]).reshape(-1, 1)) #"""workaround"""
curr_operation = curr_operation.reshape(len(curr_operation))
norm_avg_array[:, i] = curr_operation
normalized_data_set = pd.DataFrame(data = norm_avg_array, columns = np.arange(0, ROI_number))
data_set = normalized_data_set
else:
data_set = pd.DataFrame.copy(avg_df)
for i in roi: #This script samples and extracts data at given intervals
for j in event:
#Get response values:
start_index_res = (On_len_s - response_avg_dur + (Cycle_len_s * j)) * indeces_per_s #set start position for current sampling
end_index_res = (On_len_s + (Cycle_len_s * j)) * indeces_per_s #end position for current sampling
curr_series_res = ((data_set[i].loc[start_index_res:end_index_res]))
curr_series_res = curr_series_res.to_numpy()
ROI_responses = np.append(curr_series_res, ROI_responses)
#Get baseline values:
start_index_bsl = (Cycle_len_s - baseline_avg_dur + (Cycle_len_s * j)) * indeces_per_s
end_index_bsl = (Cycle_len_s + (Cycle_len_s * j)) * indeces_per_s
curr_series_bsl = ((data_set[i].loc[start_index_bsl:end_index_bsl]))
curr_series_bsl = curr_series_bsl.to_numpy()
ROI_baselines = np.append(curr_series_bsl, ROI_baselines)
Average_response = np.average(ROI_responses)
Average_baseline = np.average(ROI_baselines)
if plot == 1:
if len(roi) == 1:
base_colors = mpl.cm.get_cmap('gist_rainbow')
color_list = base_colors(np.linspace(0, 1, ROI_number))
ROI_color = color_list[int(roi)]
else:
ROI_color = 'b'
fig, (ax1, ax2) = plt.subplots(1, 2, sharey = True, figsize = (10, 5))
plt.subplots_adjust(wspace = 0)
if isinstance(roi, int) == True:
plt.suptitle("Sampled activity for ROI {}, event {}".format(int(roi), int(event)))
else:
plt.suptitle("Sampled activity for ROIs {}, event {}".format((roi), (event)))
# plt.figure(0)
ax1.set_title("Response period")
if normalize == 0:
ax1.set_ylabel("Z-score (raw)")
if normalize == 1:
ax1.set_ylabel("Z-score (normalised)")
ax1.set_xlabel("Sample sequence")
ax1.plot(ROI_responses, c = ROI_color)
# plt.figure(1)
ax2.set_title("Baseline period")
# ax2.set_ylabel("Z-score")
ax2.set_xlabel("Sample sequence")
ax2.plot(ROI_baselines, c = ROI_color)
#plt.vlines(np.linspace(0, len(ROI_resp_array.flatten('F')), Mode), np.amin(ROI_resp_array), np.amax(ROI_resp_array), colors = 'k')
# print("Avg respone: {}, Avg baseline: {}".format(Average_response, Average_baseline))
return ROI_responses, ROI_baselines, Average_response, Average_baseline
def Get_interval_data(roi, interval_start_s, interval_end_s, normalize = "0", plot = "0"):
"""Returns data from given ROI within specified time interval (s)
Parameters
-------------
roi: int
Which ROI to sample data from. Only one can be chosen at a time.
interval_start_s: int
Start of sampling interval (in seconds)
interval_end_s: int
End of sampling interval (in seconds)
normalize : 0 or 1
Normalize data so range is from 0 to 1 (no/yes)
plot: 0 or 1
Plot sampled data
Returns
-------
interval_data, interval_data_with_s
"""
if normalize == 1:
norm_avg_array = np.copy(avg_array) #create duplicate to avoid overwriting original imported data matrix
curr_operation = scaler.fit_transform((norm_avg_array[:,roi]).reshape(-1, 1)) #"""workaround"""
curr_operation = curr_operation.reshape(len(curr_operation))
norm_avg_array[:, roi] = curr_operation
normalized_data_set = pd.DataFrame(data = norm_avg_array, columns = np.arange(0, ROI_number)) #np.arange(0, ROI_number)
data_set = normalized_data_set
else:
data_set = pd.DataFrame.copy(avg_df)
interval_data = np.empty((0,1))
start_index = interval_start_s * indeces_per_s #set start position for current sampling
end_index = interval_end_s * indeces_per_s #end position for current sampling
curr_series_res = ((data_set[roi].loc[start_index:end_index]))
curr_series_res = curr_series_res.to_numpy()
interval_data = np.append(curr_series_res, interval_data)
if interval_end_s > total_time:
time_in_s = np.linspace(interval_start_s, total_time, len(interval_data))
else:
time_in_s = np.linspace(interval_start_s, interval_end_s, len(interval_data))
interval_data_with_s = np.column_stack((interval_data, time_in_s))
if plot == 1:
if isinstance(roi, int) is True:
base_colors = mpl.cm.get_cmap('gist_rainbow')
color_list = base_colors(np.linspace(0, 1, ROI_number))
ROI_color = color_list[roi]
else:
ROI_color = 'b'
plt.figure(0, dpi = 800)
if normalize == 0:
plt.ylabel("Z-score (raw)")
if normalize == 1:
plt.ylabel("Z-score (normalised)")
plt.title("Sampled interval data from ROI{}".format(roi))
x_axis = time_in_s
plt.plot(x_axis, interval_data, c=ROI_color)
plt.xlabel("Time (s)")
for m in range(Mode):
plt.axvspan((m * Cycle_len_s), ((m * Cycle_len_s) + On_len_s),
color='r', alpha=0.25, lw=0)
if interval_end_s > total_time:
plt.xlim([interval_start_s, total_time])
else:
plt.xlim([interval_start_s, interval_end_s])
return interval_data, interval_data_with_s
def Plot_activity(ROIs = "All", shade = 1, **kwargs):
"""Plot activity of all or specified ROIs"""
if ROIs == "All":
to_plot = np.arange(0, ROI_number)
else:
to_plot = np.array(ROIs)
#Colormap
base_colors = mpl.cm.get_cmap('gist_rainbow') #hsv(x) for x in range(ROI_number)] <-- legacy solution
color_list = base_colors(np.linspace(0, 1, ROI_number))
#Calculate time interval for x-axis
time_in_s = np.linspace(0, total_time, len(avg_df))
#Build each individual ROI plot
# if ROIs == "All":
fig, ax1 = plt.subplots(len(to_plot), 1, sharex = 'col', sharey = False, dpi = 1200, figsize=(10, 15))
# else:
# fig, ax1 = plt.subplots(len(to_plot), 1, sharex = 'col', sharey = False, dpi = 800, figsize=(10, 15))
for v, i in enumerate(to_plot):
w = v+1
ax1[v] = plt.subplot(len(to_plot), 1, w)
ax1[v].plot(time_in_s, avg_df[i], color = color_list[i], linewidth=1.5)
sns.despine(left = True, right = True, bottom = True)
ax1[v].get_yaxis().set_visible(False)
ax1[v].set_title("ROI{}".format(i), x=-0.01, y=.5, size = 10)
if shade == 1:
for m in range(Mode):
ax1[v].axvspan(
(m * Cycle_len_s), ((m * Cycle_len_s) + On_len_s),
color = '#ffe0f9', lw = 0)#, alpha = 0)
# plt.setp(ax1[i-1].get_xticklabels(), visible=False) #This is a work around. Hides axis
#for every ax1 except last one, as #share-axis did not function properly.
plt.subplots_adjust(hspace = 0)
#Frame for adding titles and such
fig.add_subplot(111, frameon = False)
plt.tick_params(labelcolor='none', which='both', top=False, bottom=False, left=False, right=False)
plt.xlabel("Time (s)")
plt.title("Average ROI activity ({} trials)".format(repeats))
# ax2.spines["top"].set_visible(True)
# ax2.spines["bottom"].set_visible(False)
# ax2.spines["left"].set_visible(False)
# ax2.spines["right"].set_visible(False)
# ax2.axis("off")
if 'saveas' in kwargs:
# plt.figure(dpi = 2000)
plt.savefig(r'C://Users//skrem//OneDrive//Universitet//MSc//Experimental project//Figures//Python generated//{}'.format(kwargs['saveas']), dpi = 2000, bbox_inches='tight')
plt.figure(2, dpi=800)
ROI_overlap, bx = plt.subplots(1, 1, figsize=(15, 10))
bx.set_title("All ROI activity")
plt.locator_params(axis = 'x', tight = None, nbins = 30)
for i in to_plot:
bx.plot(seconds_list, avg_df[i], color = color_list[i], linewidth=0.75)
bx.set_xlabel("Time (s)")
bx.set_ylabel("Z-score")
def Get_RF_matrix(roi = 'All', normalize = 0, data = file_location):
"""Gives the receptive field as a matrix by computing the difference
between the response and baseline for each event, for specified ROIs."""
# avg_bsln = Get_event_data()[3]
if normalize == 0:
norm = 0
if normalize == 1:
norm = 1
x_axis = np.empty(0)
y_axis = np.empty(0)
# for i in reversed(range(int(Mode/2))):
for i in reversed(range(int(Mode/2))):
# x_axis = np.append(Get_event_data(roi, i)[3]-Get_event_data(roi, i)[2] - avg_bsln, x_axis)
x_axis = np.append(Get_event_data(roi, i, norm, data = data)[3]-Get_event_data(roi, i, norm, data = data)[2], x_axis)
# a = np.flip(a)
for j in reversed(range(int(Mode/2), Mode)):
# for j in reversed(range(int(Mode/2), Mode)):
# y_axis = np.append(Get_event_data(roi, j)[3]-Get_event_data(roi, j)[2] - avg_bsln, y_axis)
y_axis = np.append(Get_event_data(roi, j, norm, data = data)[3]-Get_event_data(roi, j, norm, data = data)[2], y_axis)
# b = np.flip(b)
RF_matrix = x_axis.reshape(int(Mode/2), 1) @ y_axis.reshape(1, int(Mode/2))
RF_matrix = np.rot90(RF_matrix, 1)
return RF_matrix, x_axis, y_axis
def Plot_RF(roi = 'All', normalize = 0, data = file_location, **kwargs):
if normalize == 0:
RF_matrix = Get_RF_matrix(roi, 0, data = data)[0]
if normalize == 1:
RF_matrix = Get_RF_matrix(roi, 1, data = data)[0]
if 'interpolation' in kwargs:
interpol = kwargs['interpolation']
else:
interpol = None
vis_ang_list_rounded = np.round(vis_ang_list, 1) #axis starts at 0
# vis_ang_list_rounded = np.round(np.absolute(vis_ang_list_alt), 1) #axis centered on 0
fig, ax1 = plt.subplots(1,1, figsize = (10, 10))
RF_plot = ax1.imshow(RF_matrix, cmap = 'bone', interpolation = interpol)
ax1.set_ylabel("Visual angle (°)", labelpad = 15)
ax1.set_yticks(np.arange(-.5, Mode/2))
ax1.set_yticklabels(np.flip(vis_ang_list_rounded))
ax1.yaxis.set_label_position("right")
ax1.yaxis.tick_right()
ax1.set_xlabel("Visual angle (°)", labelpad = 15)
ax1.set_xticks(np.arange(-.5, (Mode/2)))
ax1.set_xticklabels((vis_ang_list_rounded))
ax1.xaxis.set_label_position("top")
ax1.xaxis.tick_top()
ax2 = ax1.secondary_xaxis('bottom')
ax2.set_xticks(np.arange(0, Mode/2))
ax2.set_xticklabels(np.arange(1, round((Mode/2)) + 1))
ax2.set_xlabel("Bar location", labelpad = 15)
ax2 = ax1.secondary_yaxis('left')
ax2.set_yticks(np.arange(0, Mode/2))
ax2.set_yticklabels(reversed(np.arange(1, round((Mode/2)) + 1)))
ax2.set_ylabel("Bar location", labelpad = 15)
plt.grid(True, which = 'major', color = "grey")
plt.colorbar(RF_plot, fraction = 0.04 ,pad = .175, label = "Z-score difference (baseline avg. - response avg.)")
if roi == 'All':
plt.suptitle("Computed receptive field for all sampled ROIs", y = .90)
if "title" in kwargs:
plt.suptitle(kwargs["title"], y = .90)
else:
plt.suptitle("Computed receptive field for ROI {}".format(roi), y = .90)
if 'saveas' in kwargs:
plt.savefig(r'C://Users//skrem//OneDrive//Universitet//MSc//Experimental project//Figures//Python generated//{}'.format(kwargs['saveas']), dpi = 2000, bbox_inches='tight')
"""Consider this 3D RF plot too! https://stackoverflow.com/questions/44895117/colormap-for-3d-bar-plot-in-matplotlib-applied-to-every-bar
or https://www.geeksforgeeks.org/3d-surface-plotting-in-python-using-matplotlib/ or https://stackoverflow.com/questions/38698277/plot-normal-distribution-in-3d """
def gaus(x, a, b, c):
# a Gaussian distribution
return a * np.exp(-(x-b)**2/(2*c**2))
def find_near(input_array, target):
"""Return nearest value to specified target and its index in array"""
arr = np.asarray(input_array)
x = target
difference_array = np.abs(arr-x)
index = difference_array.argmin()
nearest = arr[index]
nearest_loc = index
return nearest, nearest_loc
def RF_profile(roi = 'All', normalize = 0, plot = 1, curvefit = 1, data = file_location, test_fit = True, title = 0, **kwargs):
"""Returns a barchart of X and Y response profiles for specified ROI. Differs
from RF_matrix_slice() in that RF_profile retrieves plot BEFORE matrix
multiplication and subsequent matrix slicing --> E.g. RF_profile draws on
raw"""
if normalize == 0:
norm = 0
if normalize == 1:
norm = 1
if 'example_data' in kwargs:
x_axis = kwargs['example_data'][0]
y_axis = kwargs['example_data'][1]
else:
x_axis = np.empty(0)
y_axis = np.empty(0)
for i in reversed(range(int(Mode/2))):
x_axis = np.append(Get_event_data(roi, i, norm, data = data)[3]-Get_event_data(roi, i, norm, data = data)[2], x_axis)
for j in (range(int(Mode/2), Mode)):
y_axis = np.append(Get_event_data(roi, j, norm, data = data)[3]-Get_event_data(roi, j, norm, data = data)[2], y_axis)
if plot == 1:
plt.figure(dpi = 800)
# plt.subplot(2, 1, 1)
plt.bar(np.arange(0, Mode/2), x_axis.reshape(int(Mode/2),), width=1, label = "X axis scores")
plt.bar(np.arange(0, Mode/2), y_axis.reshape(int(Mode/2),), width=.90, label = "Y axis scores")
axx = plt.gca()
axy = axx.secondary_xaxis('top')
if title == 1:
plt.title("ROI RF response profile (X and Y axes)")
axx.set_xlabel("Visual angle (°)")
axx.set_ylabel("Response (Z-score difference)")
plt.xticks(np.arange(-.5, (Mode/2)))
axx.set_xticklabels(np.round(vis_ang_list, 1))
axy.set_xticks(np.arange(0, Mode/2))
axy.set_xticklabels(np.arange(0, round((Mode/2))))
axy.set_xlabel("Bar position")
handles, labels = axx.get_legend_handles_labels()
plt.legend(reversed(handles), reversed(labels))
if curvefit == 1: #for plotting purposes
xdata = np.arange(0, int(Mode/2))
x_ydata = x_axis.reshape(int(Mode/2),)
y_ydata = y_axis.reshape(int(Mode/2),)
#Get curve params
popt_x, pcov_x = scipy.optimize.curve_fit(gaus, xdata, x_ydata, maxfev=2500, p0 = np.array((max(x_ydata), np.argmax(x_ydata),1)), bounds = ((-np.inf, -np.inf, -np.inf), (max(x_ydata), np.inf, np.inf)))
popt_y, pcov_y = scipy.optimize.curve_fit(gaus, xdata, y_ydata, maxfev=2500, p0 = np.array((max(y_ydata), np.argmax(y_ydata),1)), bounds = ((-np.inf, -np.inf, -np.inf), (max(y_ydata), np.inf, np.inf)))
#Plot curve
resolution = 1000
x=np.linspace(0, Mode/2, resolution)
yx = gaus(x, *popt_x)
yy = gaus(x, *popt_y)
if test_fit == True:
#Compute R^2 --> https://stackoverflow.com/questions/19189362/getting-the-r-squared-value-using-curve-fit
x_residuals = x_ydata - gaus(xdata, *popt_x) #Get residuals
x_ss_res = np.sum(x_residuals**2) #Calculate residual sum of squares
x_ss_tot = np.sum((x_ydata - np.mean(x_ydata))**2) #Total sum of squares
x_r_squared = 1 - (x_ss_res / x_ss_tot) #R^2 value
x_r = np.sqrt(x_r_squared)
y_residuals = y_ydata - gaus(xdata, *popt_y)
y_ss_res = np.sum(y_residuals**2)
y_ss_tot = np.sum((y_ydata - np.mean(y_ydata))**2)
y_r_squared = 1 - (y_ss_res / y_ss_tot)
y_r = np.sqrt(y_r_squared)
#Compute Adjusted R^2 --> https://www.statisticshowto.com/probability-and-statistics/statistics-definitions/adjusted-r2/
regs = len(np.array(gaus.__code__.co_varnames))-1 #Number of regressors (variables in model - constant)
x_n = len(x_ydata) #n number of points in data sample (of curve or data?)
x_r_squared_adj = 1 - ((1-x_r_squared)*(x_n - 1))/(x_n-regs-1)
y_n = x_n
y_r_squared_adj = 1 - ((1-y_r_squared)*(y_n - 1))/(y_n-regs-1)
if plot == 1:
#Put R^2 and Chi^2 values into a little table
table_content = np.array([["R", np.round(x_r, 2), np.round(y_r, 2)], ["R\u00b2", np.round(x_r_squared, 2), np.round(y_r_squared, 2)],["R\u2090\u00b2", np.round(x_r_squared_adj, 2), np.round(y_r_squared_adj, 2)]]) #["X\u00b2", np.round(x_chi_p, 2), np.round(y_chi_p, 2)]]) #placeholder
collabel = ('Fit', 'X', 'Y')
The_table = plt.table(cellText=table_content ,colLabels=collabel, colWidths = [0.05]*3, loc = 'bottom left', bbox = (-.1,-.4,.25,.25))
The_table.scale(1 * 1.5, 1)
if plot == 1:
x_curve_eq = r"$\ f(x) = %.2f e ^ {-\frac{(x - %.2f)^2}{(%.2f)^2}} "\
"$" % (popt_x[0], popt_x[1], 2*popt_x[2])
y_curve_eq = r"$\ f(y) = %.2f e ^ {-\frac{(y - %.2f)^2}{(%.2f)^2}} "\
"$" % (popt_y[0], popt_y[1], 2*popt_y[2])
plt.plot(x, yx, c='b', label="{}".format(x_curve_eq),
path_effects=[path_effects.Stroke(linewidth=4,
foreground = 'black'), path_effects.Normal()])
plt.plot(x, yy, c = 'orange', label = y_curve_eq,
path_effects=[path_effects.Stroke(linewidth = 4,
foreground = 'black'), path_effects.Normal()])
plt.xticks(np.arange(-.5, (Mode/2)))
handles, labels = axx.get_legend_handles_labels()
plt.legend(reversed(handles), (reversed(labels)))
axx.set_xticklabels(np.round(vis_ang_list, 1))
if plot == 1:
plt.show()
if curvefit == 0:
return x_axis, y_axis
if curvefit == 1 and test_fit == True:
return x_axis, y_axis, x_r_squared, y_r_squared
else:
return x_axis, y_axis
def RF_matrix_slice (roi = 'All', normalize = 0, plot = 1, curvefit = 1, data = file_location):
if normalize == 0:
RF_matrix = Get_RF_matrix(roi, 0, data)[0]
if normalize == 1:
RF_matrix = Get_RF_matrix(roi, 1, data)[0]
# RF_peak = np.amax(RF_matrix)
RF_peak_loc = np.where(RF_matrix == np.amax(RF_matrix))
y_axis_vals = RF_matrix[:, RF_peak_loc[1]]
x_axis_vals = RF_matrix[RF_peak_loc[0]]
if plot == 1:
plt.figure(dpi = 800)
plt.bar(np.arange(0, Mode/2), x_axis_vals.reshape(int(Mode/2),), width=1, label = "X axis scores")
plt.bar(np.arange(0, Mode/2), y_axis_vals.reshape(int(Mode/2),), width=.90, label = "Y axis scores")
axx = plt.gca()
axy = axx.secondary_xaxis('top')
plt.title("Slice through centre of RF matrix (X and Y axes)")
axx.set_xticks(np.arange(0, Mode/2))
axx.set_xlabel("Visual angle (°)")
axx.set_ylabel("Response (Z-score difference)")
axy.set_xticks(np.arange(0, Mode/2))
axy.set_xticklabels(np.arange(0, round((Mode/2))))
axy.set_xlabel("Bar position")
handles, labels = axx.get_legend_handles_labels()
plt.legend(reversed(handles), reversed(labels))
if curvefit == 1:
xdata = np.arange(0, int(Mode/2))
x_ydata = x_axis_vals.reshape(int(Mode/2),)
y_ydata = y_axis_vals.reshape(int(Mode/2),)
# popt_x, pcov_x = scipy.optimize.curve_fit(gaus, np.arange(0, int(Mode/2)), x_axis_vals.reshape(int(Mode/2),), maxfev=2500)
# popt_y, pcov_y = scipy.optimize.curve_fit(gaus, np.arange(0, int(Mode/2)), y_axis_vals.reshape(int(Mode/2),), maxfev=2500)
popt_x, pcov_x = scipy.optimize.curve_fit(gaus, xdata, x_ydata, maxfev=2500, p0 = np.array((max(x_ydata), np.argmax(x_ydata), 1)), bounds = ((-np.inf, -np.inf, -np.inf), (max(x_ydata), np.inf, np.inf)))
popt_y, pcov_y = scipy.optimize.curve_fit(gaus, xdata, y_ydata, maxfev=2500, p0 = np.array((max(y_ydata), np.argmax(y_ydata), 1)), bounds = ((-np.inf, -np.inf, -np.inf), (max(y_ydata), np.inf, np.inf)))
x=np.linspace(0, Mode/2, 1000)
yx = gaus(x, *popt_x)
yy = gaus(x, *popt_y)
x_curve_eq = r"$\ f(x) = %.2f e ^ {-\frac{(x - %.2f)^2}{(%.2f)^2}} "\
"$" % (popt_x[0], popt_x[1], 2*popt_x[2])
y_curve_eq = r"$\ f(y) = %.2f e ^ {-\frac{(y - %.2f)^2}{(%.2f)^2}} "\
"$" % (popt_y[0], popt_y[1], 2*popt_y[2])
plt.plot(x, yx, c='b', label="{}".format(x_curve_eq),
path_effects=[path_effects.Stroke(linewidth=4,
foreground = 'black'), path_effects.Normal()])
plt.plot(x, yy, c = 'orange', label = y_curve_eq,
path_effects=[path_effects.Stroke(linewidth = 4,
foreground = 'black'), path_effects.Normal()])
plt.xticks(np.arange(-.5, (Mode/2)))
handles, labels = axx.get_legend_handles_labels()
plt.legend(reversed(handles), reversed(labels))
axx.set_xticklabels(np.round(vis_ang_list, 1))
plt.show()
return x_axis_vals, y_axis_vals
def Compute_RF_size(roi = 'All', normalize = 0, plot = 0, data = file_location, test_fit = True, **kwargs):
"""https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2269911/"""
if 'example_data' in kwargs:
x_vals = kwargs['example_data'][0]
y_vals = kwargs['example_data'][1]
else:
if normalize == 0:
x_vals, y_vals = RF_profile(roi, 0, 0, data = data)[:2]
if normalize == 1:
x_vals, y_vals = RF_profile(roi, 1, 0, data = data)[:2]
xdata = np.arange(0, int(Mode/2))
x_ydata = x_vals.reshape(int(Mode/2),)
y_ydata = y_vals.reshape(int(Mode/2),)
try:
popt_x, pcov_x = scipy.optimize.curve_fit(gaus, xdata, x_ydata, maxfev=2500, p0 = np.array((max(x_ydata), np.argmax(x_ydata), 1)), bounds = ((-np.inf, -np.inf, -np.inf), (max(x_ydata), np.inf, np.inf)))
popt_y, pcov_y = scipy.optimize.curve_fit(gaus, xdata, y_ydata, maxfev=2500, p0 = np.array((max(y_ydata), np.argmax(y_ydata), 1)), bounds = ((-np.inf, -np.inf, -np.inf), (max(y_ydata), np.inf, np.inf)))
except Exception:
popt_x = pcov_x = popt_y = pcov_y = 0
Nofit = 'No fit'
print ("scipy.optimize.curve_fit maxfev reached, returned (None, None) dtype = object")
return Nofit, Nofit
resolution = 10000 #how many points on curve, more is better but computationally slower
index_to_visang = vis_ang_list[-1]/resolution
x=np.linspace(0, Mode/2, resolution)
yx = gaus(x, *popt_x)
yy = gaus(x, *popt_y)
criteria = 0.005
yx_peak = np.where(yx == np.amax(yx))[0][0]
if yx_peak == 0 or yx_peak == resolution:
yx_half_width = "Peak obscured"#None #return None if value falls outside range of data
yx_curve_indeces = np.where(yx > criteria)
yx_left_index = yx_curve_indeces[0][0]
yx_right_index = yx_curve_indeces[0][-1]
yx_half_width = ((yx_right_index - yx_left_index) * index_to_visang) / 2
if yx_left_index == 0 or yx_right_index == resolution:
yx_half_width = "Half-width obscured"
yy_peak = np.where(yy == np.amax(yy))[0][0]
if yy_peak == 0 or yy_peak == resolution:
yy_half_width = "Peak obscured"#None #return None if value falls outside range of data
yy_curve_indeces = np.where(yy > criteria)
yy_left_index = yy_curve_indeces[0][0]
yy_right_index = yy_curve_indeces[0][-1]
yy_half_width = ((yy_right_index - yy_left_index) * index_to_visang) / 2
if yy_left_index == 0: #or yy_right_index == resolution:
yy_half_width = "Half-width obscured"
if test_fit == True:
x_axis = np.empty(0)
y_axis = np.empty(0)
for i in reversed(range(int(Mode/2))):
x_axis = np.append(Get_event_data(roi, i, normalize, data = data)[3]-Get_event_data(roi, i, normalize, data = data)[2], x_axis)
for j in (range(int(Mode/2), Mode)):
y_axis = np.append(Get_event_data(roi, j, normalize, data = data)[3]-Get_event_data(roi, j, normalize, data = data)[2], y_axis)
xdata = np.arange(0, int(Mode/2))
x_ydata = x_axis.reshape(int(Mode/2),)
y_ydata = y_axis.reshape(int(Mode/2),)
x_y = x_ydata
y_y = y_ydata
X = gaus(xdata, *popt_x)
x_pearsons_r = scipy.stats.pearsonr(x_y, gaus(xdata, *popt_x))
x_r_sqrd = metrics.r2_score(x_y, gaus(xdata, *popt_x))
x_r_squared_adjusted = 1 - ((1 - x_r_sqrd)*(len(x_y) - 1)) / ((len(x_y) - len(popt_x) - 1))
spearmans_for_x = scipy.stats.spearmanr(x_y, gaus(xdata, *popt_x))
y_pearsons_r = scipy.stats.pearsonr(y_y, gaus(xdata, *popt_y))
y_r_sqrd = sk.metrics.r2_score(y_y, gaus(xdata, *popt_y))
y_r_squared_adjusted = 1 - ((1 - y_r_sqrd)*(len(y_y) - 1)) / ((len(y_y) - len(popt_y) - 1))
spearmans_for_y = scipy.stats.spearmanr(y_y, gaus(xdata, *popt_y))
if plot == 1:
plt.plot(np.linspace(0, vis_ang_list[-1], resolution), yx)
plt.plot(np.linspace(0, vis_ang_list[-1], resolution), yy)
if isinstance(yx_half_width, str) == False:
plt.hlines(yx[int((yx_left_index + yx_peak) / 2)], yx_left_index * index_to_visang + yx_half_width/2, yx_right_index * index_to_visang - yx_half_width/2)
plt.hlines(yx[int((yx_left_index + yx_peak) / 2)], yx_left_index * index_to_visang, yx_right_index * index_to_visang, linestyle = 'dotted', colors = 'k')
plt.vlines(x = yx_left_index*index_to_visang, ymin = 0, ymax = yx[int((yx_left_index + yx_peak) / 2)], linestyle = 'dotted', colors = 'k')
plt.vlines(x = yx_left_index * index_to_visang + yx_half_width/2, ymin = 0, ymax = yx[int((yx_left_index + yx_peak) / 2)])
plt.vlines(x = yx_right_index*index_to_visang, ymin = 0, ymax = yx[int((yx_left_index + yx_peak) / 2)], linestyle = 'dotted', colors = 'k')
plt.vlines(x = yx_right_index * index_to_visang - yx_half_width/2, ymin = 0, ymax = yx[int((yx_left_index + yx_peak) / 2)])
if isinstance(yy_half_width, str) == False:
plt.hlines(yy[int((yy_left_index + yy_peak) / 2)], yy_left_index * index_to_visang + yy_half_width/2, yy_right_index * index_to_visang - yy_half_width/2, colors = '#FF8317')
plt.hlines(yy[int((yy_left_index + yy_peak) / 2)], yy_left_index * index_to_visang, yy_right_index * index_to_visang, linestyle = 'dotted', colors = 'k')
plt.vlines(x = yy_left_index*index_to_visang, ymin = 0, ymax = yy[int((yy_left_index + yy_peak) / 2)], linestyle = 'dotted', colors = 'k')
plt.vlines(x = yy_left_index * index_to_visang + yy_half_width/2, ymin = 0, ymax = yy[int((yy_left_index + yy_peak) / 2)], colors = '#FF8317')
plt.vlines(x = yy_right_index*index_to_visang, ymin = 0, ymax = yy[int((yy_left_index + yy_peak) / 2)], linestyle = 'dotted', colors = 'k')
plt.vlines(x = yy_right_index * index_to_visang - yy_half_width/2, ymin = 0, ymax = yy[int((yy_left_index + yy_peak) / 2)], colors = '#FF8317')
plt.axvline(x = yx_peak*index_to_visang, c = 'g', linestyle = (0, (5, 10)))
plt.axvline(x = yy_peak*index_to_visang, c = 'g', linestyle = (0, (5, 10)))
# plt.xlim(0, 75)
plt.xlabel("Visual angle (°)")
print("Pearsons X: {}, {}".format(x_pearsons_r, y_pearsons_r))
print("R2: {} {}".format(x_r_sqrd, y_r_sqrd))
print("R2adj {}, {}".format(x_r_squared_adjusted, y_r_squared_adjusted))
print("Spearman R: {}, {}".format(spearmans_for_x, spearmans_for_y))
if 'title' in kwargs:
plt.title(kwargs["title"])
plt.show()
# return yx_RF_size, yy_RF_size
return yx_half_width, yy_half_width
def Model_RF_size(roi = 'All', normalize = 0, plot = 0, data = file_location, test_fit = True):
"""https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2269911/"""
if normalize == 0:
x_vals, y_vals = RF_profile(roi, 0, 0, 0, data = data)[:2]
if normalize == 1:
x_vals, y_vals = RF_profile(roi, 1, 0, 0, data = data)[:2]
xdata = np.arange(0, int(Mode/2))
x_ydata = x_vals.reshape(int(Mode/2),)
y_ydata = y_vals.reshape(int(Mode/2),)
try:
popt_x, pcov_x = scipy.optimize.curve_fit(gaus, xdata, x_ydata, maxfev=2500, p0 = np.array((max(x_ydata), np.argmax(x_ydata), 1)), bounds = ((-np.inf, -np.inf, -np.inf), (max(x_ydata), np.inf, np.inf)))
popt_y, pcov_y = scipy.optimize.curve_fit(gaus, xdata, y_ydata, maxfev=2500, p0 = np.array((max(y_ydata), np.argmax(y_ydata), 1)), bounds = ((-np.inf, -np.inf, -np.inf), (max(y_ydata), np.inf, np.inf)))
except Exception:
popt_x = pcov_x = popt_y = pcov_y = 0
Nofit = 'No fit'
print ("scipy.optimize.curve_fit maxfev reached, returned (None, None) dtype = object")
return Nofit, Nofit
resolution = 10000 #how many points on curve, more is better but computationally slower
x=np.linspace(-Mode, Mode, resolution)
index_to_visang = vis_ang_list[-1]*4/resolution #multiply by 2 because
yx = gaus(x, *popt_x)
yy = gaus(x, *popt_y)
buffer_estimate = .05 #If the first index is within x percentage of half-height, count it as half-height
criteria = 0.005
yx_peak = np.where(yx == np.amax(yx))[0][0]
if yx_peak == 0 or yx_peak == resolution:
yx_half_width = "Peak obscured"#None #return None if value falls outside range of data
yx_curve_indeces = np.where(yx > criteria)
yx_left_index = yx_curve_indeces[0][0]
yx_right_index = yx_curve_indeces[0][-1]
yx_half_width = ((yx_right_index - yx_left_index) * index_to_visang) / 2
if yx_left_index == 0 or yx_right_index == resolution:
yx_half_width = "Half-width obscured"
yy_peak = np.where(yy == np.amax(yy))[0][0]
if yy_peak == 0 or yy_peak == resolution:
yy_half_width = "Peak obscured"#None #return None if value falls outside range of data
yy_curve_indeces = np.where(yy > criteria)
yy_left_index = yy_curve_indeces[0][0]
yy_right_index = yy_curve_indeces[0][-1]
yy_half_width = ((yy_right_index - yy_left_index) * index_to_visang) / 2
if yy_left_index == 0: #or yy_right_index == resolution:
yy_half_width = "Half-width obscured"
if test_fit == True:
x_axis = np.empty(0)
y_axis = np.empty(0)
for i in reversed(range(int(Mode/2))):
x_axis = np.append(Get_event_data(roi, i, normalize, data = data)[3]-Get_event_data(roi, i, normalize, data = data)[2], x_axis)
for j in (range(int(Mode/2), Mode)):
y_axis = np.append(Get_event_data(roi, j, normalize, data = data)[3]-Get_event_data(roi, j, normalize, data = data)[2], y_axis)
xdata = np.arange(0, int(Mode/2))
x_ydata = x_axis.reshape(int(Mode/2),)
y_ydata = y_axis.reshape(int(Mode/2),)
spearmans_for_x = scipy.stats.spearmanr(x_ydata, gaus(xdata, *popt_x))
x_r = spearmans_for_x[0]
spearmans_for_y = scipy.stats.spearmanr(y_ydata, gaus(xdata, *popt_y))
y_r = spearmans_for_y[0]
if plot == 1:
plt.plot((np.linspace(-vis_ang_list[-1]*2, vis_ang_list[-1]*2, resolution)), yx)
plt.plot((np.linspace(-vis_ang_list[-1]*2, vis_ang_list[-1]*2, resolution)), yy)
if isinstance(yx_half_width, str) == False:
plt.hlines(yx[int((yx_left_index + yx_peak) / 2)], yx_left_index * index_to_visang - vis_ang_list[-1]*2 + yx_half_width/2, yx_right_index * index_to_visang - vis_ang_list[-1]*2 - yx_half_width/2)
plt.hlines(yx[int((yx_left_index + yx_peak) / 2)], yx_left_index * index_to_visang - vis_ang_list[-1]*2, yx_right_index * index_to_visang - vis_ang_list[-1]*2, linestyle = 'dotted', colors = 'k')
plt.vlines(x = yx_left_index*index_to_visang - vis_ang_list[-1]*2, ymin = 0, ymax = yx[int((yx_left_index + yx_peak) / 2)], linestyle = 'dotted', colors = 'k')
plt.vlines(x = yx_left_index * index_to_visang - vis_ang_list[-1]*2 + yx_half_width/2, ymin = 0, ymax = yx[int((yx_left_index + yx_peak) / 2)])
plt.vlines(x = yx_right_index*index_to_visang - vis_ang_list[-1]*2, ymin = 0, ymax = yx[int((yx_left_index + yx_peak) / 2)], linestyle = 'dotted', colors = 'k')
plt.vlines(x = yx_right_index * index_to_visang - vis_ang_list[-1]*2 - yx_half_width/2, ymin = 0, ymax = yx[int((yx_left_index + yx_peak) / 2)])
if isinstance(yy_half_width, str) == False:
plt.hlines(yy[int((yy_left_index + yy_peak) / 2)], yy_left_index * index_to_visang - vis_ang_list[-1]*2 + yy_half_width/2, yy_right_index * index_to_visang - vis_ang_list[-1]*2 - yy_half_width/2, colors = '#FF8317')
plt.hlines(yy[int((yy_left_index + yy_peak) / 2)], yy_left_index * index_to_visang - vis_ang_list[-1]*2, yy_right_index * index_to_visang - vis_ang_list[-1]*2, linestyle = 'dotted', colors = 'k')
plt.vlines(x = yy_left_index*index_to_visang - vis_ang_list[-1]*2, ymin = 0, ymax = yy[int((yy_left_index + yy_peak) / 2)], linestyle = 'dotted', colors = 'k')
plt.vlines(x = yy_left_index * index_to_visang - vis_ang_list[-1]*2 + yy_half_width/2, ymin = 0, ymax = yy[int((yy_left_index + yy_peak) / 2)], colors = '#FF8317')
plt.vlines(x = yy_right_index*index_to_visang - vis_ang_list[-1]*2, ymin = 0, ymax = yy[int((yy_left_index + yy_peak) / 2)], linestyle = 'dotted', colors = 'k')
plt.vlines(x = yy_right_index * index_to_visang - vis_ang_list[-1]*2 - yy_half_width/2, ymin = 0, ymax = yy[int((yy_left_index + yy_peak) / 2)], colors = '#FF8317')
plt.axvline(x = yx_peak*index_to_visang - vis_ang_list[-1]*2, c = 'g', linestyle = (0, (5, 10)))
plt.axvline(x = yy_peak*index_to_visang - vis_ang_list[-1]*2, c = 'g', linestyle = (0, (5, 10)))
plt.xlabel("Visual angle (°)")
plt.show()
# return yx_RF_size, yy_RF_size, x_r, y_r #, x_pearsons_r, y_pearsons_r
return yx_half_width, yy_half_width, x_r, y_r
def RF_estimates_list(function, stimfolder, resolutionfolder, rootfolder = 'D:\\Dissertation files\\Further analysis'):
"""Returns a list of RF estimates based on script Compute_RF_size, for each
condition, for each file, for each ROI."""
# stim = rootfolder + '\\' + stimfolder
res = rootfolder + '\\' + stimfolder + '\\' + resolutionfolder
conds = os.listdir(res)
# All_estimates = []
Compare_estimates = []
Total_ROIs = 0
Total_R_eligible = 0
for j in conds: #Conditions to loop through
print(j)
txt_files = []
dir_files = os.listdir(res + '\\' + j)
intermediate_list = []
for file in dir_files: #Build list of files to loop through
if file.endswith('.txt') is True:
txt_files.append(file)
for file in txt_files: #Then loop through those files
print(file)
file_dir = res + '\\' + j + '\\' + file
curr_data = Avg_data_getter(file_dir)
if file == txt_files[len(txt_files)-1]:
Compare_estimates.append(intermediate_list)
for roi in curr_data[0].columns:
estimate = function(roi, normalize = 1, plot = 0, data = file_dir)
print(r"Currently on ROI#:{} RF estimate: {} ".format(Total_ROIs, estimate[:2]), flush = True, end = '')
Total_ROIs += 1
# if isinstance(estimate[2], float) and isinstance(estimate[3], float):
if len(estimate) > 2:
if estimate[2] >= 0.5 and estimate[3] >= 0.5:
intermediate_list.append(estimate[:2])
Total_R_eligible += 1
print("R values: {}, {}".format(estimate[2], estimate[3]))
# else:
else:
print("R values: {}, {} REJECTED!".format(estimate[2], estimate[3]))
if roi == len(curr_data[0].columns)-1:
print(" - Number of ROIs in file = {}".format(len(curr_data[0].columns)))
print(" - Total number of ROIS = {}".format(Total_ROIs))
print(" - N ROIs with sufficient R = {}".format(Total_R_eligible))
Compare_estimates.append(conds)
return Compare_estimates
def Discard_junk_data(data_list, conditions = 4):
"""If index contains a string or the value 0, discard those indexes and
return a "cleaned" list. """
data_copy = copy.deepcopy(data_list)
conds = data_list[conditions][:]
cleaned_list = []
for i in range(conditions):
cleaned_list.append([])
for n, i in enumerate(data_copy[0:conditions]):
for j in data_copy[n]:
cleaned_list[n] = [k for k in data_copy[n]
if isinstance(k[0],str) is False
and isinstance(k[1],str) is False
and k[0] != 0 and k[1] != 0]
cleaned_list.append(conds)
return cleaned_list
def Plot_ellipses(X_width, Y_width, **kwargs):
fig = plt.figure(figsize = (5, 5), dpi = 500)
a = X_width
b = Y_width
if X_width > Y_width:
ecc = np.sqrt(X_width**2 - Y_width**2) / X_width
if X_width < Y_width:
ecc = np.sqrt(Y_width**2 - X_width**2) / Y_width
if X_width == Y_width:
ecc = np.sqrt(X_width**2 - Y_width**2) / X_width
""" -TODO: Implement eccentricity variable so that you can specify ecc"""
if 'ecc' in kwargs:
ecc = kwargs['ecc']
X_width = 1
Y_width = 1
t = np.linspace(0, 2*np.pi, 1000)
x = a * np.cos(t)
y = b * np.sin(t)
ax = fig.add_subplot(111)
ax.set_aspect('equal')
ax.axis('off')
plt.text(.75, -.01, "Ecc = {}".format(np.round(ecc, 3)), transform = ax.transAxes)
plt.plot(x, y)
plt.show()
def Compute_ellipse(X_width, Y_width, plot = 0):
""" Computes the eccentricity, area, and perimiter of ellipse given X and Y dims.
(x - c₁)² / a² + (y - c₂)² / b² = 1, where....:
- (x, y) are the variables - the coordinates of an arbitrary point on the ellipse;
- (c₁, c₂) are the coordinates of the ellipse's center;
- a is the distance between the center and the ellipse's vertex, lying on the horizontal axis;
- b is the distance between the center and the ellipse's vertex, lying on the vertical axis.
c₁ and c₂ are assumed to be 0, 0, meaning ellipses are centered.
Returns
-------
X_dim: Vis ang (°)
Y_dim: Vis ang (°)
Eccentricity: Scale from 0 = Circle, 1 = basically flat
Area: Divided by stim_width_visang (so, mm) --> not currently true
"""
X_dim = X_width
Y_dim = Y_width
if X_width > Y_width:
ecc = np.sqrt(X_width**2 - Y_width**2) / X_width
if X_width < Y_width:
ecc = np.sqrt(Y_width**2 - X_width**2) / Y_width
if X_width == Y_width:
ecc = np.sqrt(X_width**2 - Y_width**2) / X_width
# area = np.sqrt((np.pi * X_width * Y_width)) #Area of ellipses: Area = Pi * A * B
area = (np.pi * X_dim/2 * Y_dim/2) #Area of ellipses: Area = Pi * A * B
# perim = np.pi * (X_width + Y_width) * (1 + 3 *(X_width - Y_width)**2 / (10 + np.sqrt((4 - 3* X_width - Y_width)**2 / (X_width + Y_width)**2))) #Ramanujan approximation
if plot == 1:
Plot_ellipses(X_width, Y_width)
return X_dim, Y_dim, ecc, area #, area, perim
def RF_ellipses_list(two_dim_RF_list, conditions = 4):
RF_list = two_dim_RF_list
ellipse_list = []
for i in range(conditions):
ellipse_list.append([])
for n, i in enumerate(RF_list[:conditions]):
for j in RF_list[n]:
Ellipse_data = Compute_ellipse(j[0], j[1])
ellipse_list[n].append(Ellipse_data)
conds = two_dim_RF_list[conditions][:]
ellipse_list.append(conds)
return ellipse_list
def List_ellipse_params(ellipse_list, conditions = 4, get_avg = 0):
all_Xs = []
all_Ys = []
all_eccs = []
all_areas = []
for i in ellipse_list[:conditions]:
cond_x = []
cond_y = []
cond_ecc = []
cond_area = []
for n, j in enumerate(i):
cond_x.append(j[0])
cond_y.append(j[1])
cond_ecc.append(j[2])
cond_area.append(j[3])
if j == i[-1]:
all_Xs.append(cond_x)
all_Ys.append(cond_y)
all_eccs.append(cond_ecc)
all_areas.append(cond_area)
if get_avg == 1:
avg_Xs = np.empty((conditions,1))
avg_Ys = np.empty((conditions,1))
avg_eccs = np.empty((conditions,1))
avg_areas = np.empty((conditions,1))
for n, i in enumerate(all_Xs):
avg_Xs[n] = np.average(i)
for m, j in enumerate(all_Ys):
avg_Ys[m] = np.average(j)
for l, k in enumerate(all_eccs):
avg_eccs[l] = np.average(k)
for k, l in enumerate(all_areas):
avg_areas[k] = np.average(l)
return avg_Xs, avg_Ys, avg_eccs
else:
return all_Xs, all_Ys, all_eccs, all_areas
def ellipse_param_dfs(RF_ellipses):
All_Xs = List_ellipse_params(RF_ellipses)[0]
All_Ys = List_ellipse_params(RF_ellipses)[1]
All_eccs = List_ellipse_params(RF_ellipses)[2]
All_areas = List_ellipse_params(RF_ellipses)[3]
All_Xs_df = | pd.DataFrame(All_Xs) | pandas.DataFrame |
import pandas as pd
import numpy as np
from _datetime import datetime
class AttendanceSystem:
def __init__(self, path_to_internal_employees_file, path_to_attendance_log):
self.clock = Clock(path_to_internal_employees_file, path_to_attendance_log)
# add method - read csv, write csv
self.report = Report(path_to_internal_employees_file, path_to_attendance_log)
# class Helper
# add method - read csv, write csv
#
def start(self, user_id):
# CHECK if user_id is None
# start menu, being called after validation
proceed = True
while proceed:
option = int(
input("Enter the number of action you wish to do:\n1) Add employee manually\n2) Delete employee "
"manually\n3) Add employees from file\n4) Delete employees from file\n5) Mark attendance\n6) Generate "
"attnedance report for an employee\n7) Print monthly report for all employees\n8) Print report for "
"all late employees\nYour choice: "))
# method that takes a dict (keys:options,val:methods)
# 25-27 : method
if option == 1:
self.report.add_employee()
answer = input("Would you like to go back to the menu?\nyes/no\nYour answer: ")
if answer != 'yes':
proceed = False
if option == 2:
self.report.delete_employee()
answer = input("Would you like to go back to the menu?\nyes/no\nYour answer: ")
if answer != 'yes':
proceed = False
if option == 3:
self.report.add_employees_from_external_file()
answer = input("Would you like to go back to the menu?\nyes/no\nYour answer: ")
if answer != 'yes':
proceed = False
if option == 4:
self.report.delete_employees_from_external_file()
answer = input("Would you like to go back to the menu?\nyes/no\nYour answer: ")
if answer != 'yes':
proceed = False
if option == 5:
clock_option = int(
input("Enter the number of action you wish to do:\n1) Clock in\n2) Clock out\nYour answer: "))
if clock_option == 1:
datetime = self.clock.get_date()
attendance_log_df = pd.read_csv(self.clock.path_to_attendance_log, dtype=str)
data = [{'employee_id': user_id, 'date_time_in': datetime}]
dict_df = pd.DataFrame.from_dict(data)
updated_attendance_log_df = attendance_log_df.append(dict_df, sort=False)
print(updated_attendance_log_df)
updated_attendance_log_df.to_csv(self.clock.path_to_attendance_log, mode='w', index=False)
answer = input("Would you like to go back to the menu?\nyes/no\nYour answer: ")
if answer != 'yes':
proceed = False
if clock_option == 2:
# column- datetime, column= in\out
datetime = self.clock.get_date()
attendance_log_df = pd.read_csv(self.clock.path_to_attendance_log, dtype=str)
data = [{'date_time_out': datetime}]
dict_df = pd.DataFrame.from_dict(data)
updated_attendance_log_df = attendance_log_df.append(dict_df, sort=False)
print(updated_attendance_log_df)
updated_attendance_log_df.to_csv(self.clock.path_to_attendance_log, mode='w', index=False)
answer = input("Would you like to go back to the menu?\nyes/no\nYour answer: ")
if answer != 'yes':
proceed = False
def is_valid(self):
restart = True
user_id = None
# validating that the user is one of the employees of the company
while restart:
internal_employees_file_df = pd.read_csv(self.report.path_to_internal_employees_file, dtype=str)
print(internal_employees_file_df)
print("Welcome to Employee Attendance Management System.")
user_id = str(input("Please enter your ID: "))
user_name = str(input("Please enter your full name: "))
# TODO: check validity
input_df = pd.DataFrame(data=[[user_id, user_name]], columns=["user_id", "user_name"])
is_id_valid = internal_employees_file_df['employee_id'].isin(input_df['user_id'])
is_name_valid = internal_employees_file_df['employee_name'].isin(input_df['user_name'])
if is_id_valid.any() and is_name_valid.any():
restart = False
print("you are valid")
else:
print("try again")
if not restart:
self.start(user_id)
class Clock:
def __init__(self, path_to_internal_employees_file, path_to_attendance_log):
self.path_to_internal_employees_file = path_to_internal_employees_file
self.path_to_attendance_log = path_to_attendance_log
def get_date(self):
return datetime.now().isoformat(' ', 'seconds')
class Report:
def __init__(self, path_to_internal_employees_file, path_to_attendance_log):
self.path_to_internal_employees_file = path_to_internal_employees_file
self.path_to_attendance_log = path_to_attendance_log
def add_employee(self):
# put in a dict
new_employee_id = str(input("Please enter employee's ID: "))
new_employee_name = str(input("Please enter employee's full name: "))
new_employee_phone = str(input("Please enter employee's phone: "))
new_employee_age = str(input("Please enter employee's age: "))
input_df = pd.DataFrame(data=[[new_employee_id, new_employee_name, new_employee_phone, new_employee_age]],
columns=["employee_id", "employee_name", "employee_phone", "employee_age"])
input_df.to_csv(self.path_to_internal_employees_file, mode='a', header=False, index=False)
def delete_employee(self):
deleted_employee_id = str(input("Please enter employee's ID: "))
internal_employees_file_df = pd.read_csv(self.path_to_internal_employees_file, dtype=str)
internal_employees_file_df.drop(
internal_employees_file_df.loc[internal_employees_file_df['employee_id'] == deleted_employee_id].index,
inplace=True)
print("The new employees file after deleting:\n")
print(internal_employees_file_df)
internal_employees_file_df.to_csv(self.path_to_internal_employees_file, mode='w', index=False)
def add_employees_from_external_file(self):
external_file_path = str(input("Please enter file path: "))
external_employees_file = pd.read_csv(external_file_path, dtype=str)
external_employees_file.to_csv(self.path_to_internal_employees_file, mode='a', header=False, index=False)
def delete_employees_from_external_file(self):
external_file_path = str(input("Please enter file path: "))
external_employees_to_delete_df = pd.read_csv(external_file_path, dtype=str)
print(external_employees_to_delete_df)
internal_employees_file_df = | pd.read_csv(self.path_to_internal_employees_file, dtype=str) | pandas.read_csv |
"""
Module contains tools for processing Stata files into DataFrames
The StataReader below was originally written by <NAME> as part of PyDTA.
It has been extended and improved by <NAME> from the Statsmodels
project who also developed the StataWriter and was finally added to pandas in
a once again improved version.
You can find more information on http://presbrey.mit.edu/PyDTA and
https://www.statsmodels.org/devel/
"""
from __future__ import annotations
from collections import abc
import datetime
from io import BytesIO
import os
import struct
import sys
from typing import (
Any,
AnyStr,
Hashable,
Sequence,
cast,
)
import warnings
from dateutil.relativedelta import relativedelta
import numpy as np
from pandas._libs.lib import infer_dtype
from pandas._libs.writers import max_len_string_array
from pandas._typing import (
Buffer,
CompressionOptions,
FilePathOrBuffer,
StorageOptions,
)
from pandas.util._decorators import (
Appender,
doc,
)
from pandas.core.dtypes.common import (
ensure_object,
is_categorical_dtype,
is_datetime64_dtype,
)
from pandas import (
Categorical,
DatetimeIndex,
NaT,
Timestamp,
concat,
isna,
to_datetime,
to_timedelta,
)
from pandas.core import generic
from pandas.core.frame import DataFrame
from pandas.core.indexes.base import Index
from pandas.core.series import Series
from pandas.io.common import get_handle
_version_error = (
"Version of given Stata file is {version}. pandas supports importing "
"versions 105, 108, 111 (Stata 7SE), 113 (Stata 8/9), "
"114 (Stata 10/11), 115 (Stata 12), 117 (Stata 13), 118 (Stata 14/15/16),"
"and 119 (Stata 15/16, over 32,767 variables)."
)
_statafile_processing_params1 = """\
convert_dates : bool, default True
Convert date variables to DataFrame time values.
convert_categoricals : bool, default True
Read value labels and convert columns to Categorical/Factor variables."""
_statafile_processing_params2 = """\
index_col : str, optional
Column to set as index.
convert_missing : bool, default False
Flag indicating whether to convert missing values to their Stata
representations. If False, missing values are replaced with nan.
If True, columns containing missing values are returned with
object data types and missing values are represented by
StataMissingValue objects.
preserve_dtypes : bool, default True
Preserve Stata datatypes. If False, numeric data are upcast to pandas
default types for foreign data (float64 or int64).
columns : list or None
Columns to retain. Columns will be returned in the given order. None
returns all columns.
order_categoricals : bool, default True
Flag indicating whether converted categorical data are ordered."""
_chunksize_params = """\
chunksize : int, default None
Return StataReader object for iterations, returns chunks with
given number of lines."""
_compression_params = f"""\
compression : str or dict, default None
If string, specifies compression mode. If dict, value at key 'method'
specifies compression mode. Compression mode must be one of {{'infer',
'gzip', 'bz2', 'zip', 'xz', None}}. If compression mode is 'infer'
and `filepath_or_buffer` is path-like, then detect compression from
the following extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise
no compression). If dict and compression mode is one of
{{'zip', 'gzip', 'bz2'}}, or inferred as one of the above,
other entries passed as additional compression options.
{generic._shared_docs["storage_options"]}"""
_iterator_params = """\
iterator : bool, default False
Return StataReader object."""
_reader_notes = """\
Notes
-----
Categorical variables read through an iterator may not have the same
categories and dtype. This occurs when a variable stored in a DTA
file is associated to an incomplete set of value labels that only
label a strict subset of the values."""
_read_stata_doc = f"""
Read Stata file into DataFrame.
Parameters
----------
filepath_or_buffer : str, path object or file-like object
Any valid string path is acceptable. The string could be a URL. Valid
URL schemes include http, ftp, s3, and file. For file URLs, a host is
expected. A local file could be: ``file://localhost/path/to/table.dta``.
If you want to pass in a path object, pandas accepts any ``os.PathLike``.
By file-like object, we refer to objects with a ``read()`` method,
such as a file handle (e.g. via builtin ``open`` function)
or ``StringIO``.
{_statafile_processing_params1}
{_statafile_processing_params2}
{_chunksize_params}
{_iterator_params}
{_compression_params}
Returns
-------
DataFrame or StataReader
See Also
--------
io.stata.StataReader : Low-level reader for Stata data files.
DataFrame.to_stata: Export Stata data files.
{_reader_notes}
Examples
--------
Creating a dummy stata for this example
>>> df = pd.DataFrame({{'animal': ['falcon', 'parrot', 'falcon',
... 'parrot'],
... 'speed': [350, 18, 361, 15]}})
>>> df.to_stata('animals.dta')
Read a Stata dta file:
>>> df = pd.read_stata('animals.dta')
Read a Stata dta file in 10,000 line chunks:
>>> values = np.random.randint(0, 10, size=(20_000, 1), dtype="uint8")
>>> df = pd.DataFrame(values, columns=["i"])
>>> df.to_stata('filename.dta')
>>> itr = pd.read_stata('filename.dta', chunksize=10000)
>>> for chunk in itr:
... # Operate on a single chunk, e.g., chunk.mean()
... pass
>>> import os
>>> os.remove("./filename.dta")
>>> os.remove("./animals.dta")
"""
_read_method_doc = f"""\
Reads observations from Stata file, converting them into a dataframe
Parameters
----------
nrows : int
Number of lines to read from data file, if None read whole file.
{_statafile_processing_params1}
{_statafile_processing_params2}
Returns
-------
DataFrame
"""
_stata_reader_doc = f"""\
Class for reading Stata dta files.
Parameters
----------
path_or_buf : path (string), buffer or path object
string, path object (pathlib.Path or py._path.local.LocalPath) or object
implementing a binary read() functions.
{_statafile_processing_params1}
{_statafile_processing_params2}
{_chunksize_params}
{_compression_params}
{_reader_notes}
"""
_date_formats = ["%tc", "%tC", "%td", "%d", "%tw", "%tm", "%tq", "%th", "%ty"]
stata_epoch = datetime.datetime(1960, 1, 1)
# TODO: Add typing. As of January 2020 it is not possible to type this function since
# mypy doesn't understand that a Series and an int can be combined using mathematical
# operations. (+, -).
def _stata_elapsed_date_to_datetime_vec(dates, fmt) -> Series:
"""
Convert from SIF to datetime. https://www.stata.com/help.cgi?datetime
Parameters
----------
dates : Series
The Stata Internal Format date to convert to datetime according to fmt
fmt : str
The format to convert to. Can be, tc, td, tw, tm, tq, th, ty
Returns
Returns
-------
converted : Series
The converted dates
Examples
--------
>>> dates = pd.Series([52])
>>> _stata_elapsed_date_to_datetime_vec(dates , "%tw")
0 1961-01-01
dtype: datetime64[ns]
Notes
-----
datetime/c - tc
milliseconds since 01jan1960 00:00:00.000, assuming 86,400 s/day
datetime/C - tC - NOT IMPLEMENTED
milliseconds since 01jan1960 00:00:00.000, adjusted for leap seconds
date - td
days since 01jan1960 (01jan1960 = 0)
weekly date - tw
weeks since 1960w1
This assumes 52 weeks in a year, then adds 7 * remainder of the weeks.
The datetime value is the start of the week in terms of days in the
year, not ISO calendar weeks.
monthly date - tm
months since 1960m1
quarterly date - tq
quarters since 1960q1
half-yearly date - th
half-years since 1960h1 yearly
date - ty
years since 0000
"""
MIN_YEAR, MAX_YEAR = Timestamp.min.year, Timestamp.max.year
MAX_DAY_DELTA = (Timestamp.max - datetime.datetime(1960, 1, 1)).days
MIN_DAY_DELTA = (Timestamp.min - datetime.datetime(1960, 1, 1)).days
MIN_MS_DELTA = MIN_DAY_DELTA * 24 * 3600 * 1000
MAX_MS_DELTA = MAX_DAY_DELTA * 24 * 3600 * 1000
def convert_year_month_safe(year, month) -> Series:
"""
Convert year and month to datetimes, using pandas vectorized versions
when the date range falls within the range supported by pandas.
Otherwise it falls back to a slower but more robust method
using datetime.
"""
if year.max() < MAX_YEAR and year.min() > MIN_YEAR:
return to_datetime(100 * year + month, format="%Y%m")
else:
index = getattr(year, "index", None)
return Series(
[datetime.datetime(y, m, 1) for y, m in zip(year, month)], index=index
)
def convert_year_days_safe(year, days) -> Series:
"""
Converts year (e.g. 1999) and days since the start of the year to a
datetime or datetime64 Series
"""
if year.max() < (MAX_YEAR - 1) and year.min() > MIN_YEAR:
return to_datetime(year, format="%Y") + | to_timedelta(days, unit="d") | pandas.to_timedelta |
#### structure / guide ####
# imports
# functions
## fetch_daily_data --> api call for all data (good for automated pipeline)
## extract_data --> extraneous
## read_data --> read pre-saved parquet (good for ad-hoc usage)
## created_merged_df -->
## create_plot -->
## correlation_plot -->
## table_plot -->
## rolling_correlation -->
## create_holdings_portfolio -->
## bat_acquisition -->
## calc_hold_only_roi -->
## roi_plot -->
## series_to_supervised --> ##### predict the price of BAT (relative to USD) # https://machinelearningmastery.com/xgboost-for-time-series-forecasting/
## train_test_split -->
## expanding_window_validation --> ##### expanding-window validation
## xgboost_forecast --> #### fit an xgboost model and make a one step prediction
## xgBoost_model -->
## trading_portfolio -->
# main
## parameters
## processing
### ad-hoc
### pipeline/data update
## graphs
# First import the libraries that we need to use
import datetime
import json
import math
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
import numpy as np
import pandas as pd
import plotly.figure_factory as ff
import requests
from sklearn.metrics import mean_absolute_error
from xgboost import XGBRegressor
def fetch_daily_data(symbol, start, end, api_call_limit=300):
# calculate number of iterations
df = pd.DataFrame(columns=["one", "two"])
df.one = [start]
df.one = pd.to_datetime(df.one)
df.two = [end]
df.two = pd.to_datetime(df.two)
difference = (df.two - df.one)
difference = difference.astype('timedelta64[D]')
iterations = difference/api_call_limit
iterations = math.ceil(iterations)
full_df = pd.DataFrame()
pair_split = symbol.split('/') # symbol must be in format XXX/XXX e.g., BTC/EUR
symbol = pair_split[0] + '-' + pair_split[1]
final_end = end
for i in range(iterations):
# update start + end
start = pd.to_datetime(start)
end = start + datetime.timedelta(days=api_call_limit)
start = start.strftime("%Y-%m-%d")
end = end.strftime("%Y-%m-%d")
if i == (iterations - 1):
end = final_end
# extract data
url = f'https://api.pro.coinbase.com/products/{symbol}/candles?granularity=86400&start={start}&end={end}'
response = requests.get(url)
if response.status_code == 200: # check to make sure the response from server is good
data = pd.DataFrame(json.loads(response.text), columns=['unix', 'low', 'high', 'open', 'close', 'volume'])
data['date'] = pd.to_datetime(data['unix'], unit='s') # convert to a readable date
# data['vol_fiat'] = data['volume'] * data['close'] # multiply the BTC volume by closing price to approximate fiat volume
# if we failed to get any data, print an error...otherwise write the file
if data is None:
print("Did not return any data from Coinbase for this symbol")
else:
full_df = pd.concat([full_df, data])
else:
print(response.status_code)
print(response.text)
print("Did not receieve OK response from Coinbase API")
print("iteration #: " + str((iterations+1)))
#move start forward
start = pd.to_datetime(start)
start = start + datetime.timedelta(days=api_call_limit)
start = start.strftime("%Y-%m-%d")
# save full dataframe
full_df.to_csv(f'data/Coinbase_{pair_split[0] + pair_split[1]}_dailydata.csv', index=False)
full_df.to_parquet(f'data/{pair_split[0]}-{pair_split[1]}.parquet', index=False)
return full_df
def extract_data(token_pair, start, end):
dt = fetch_daily_data(symbol=token_pair, start=start, end=end)
return dt
def read_data(file_name, cols=['date','close','volume']):
dt = pd.read_parquet('data/' + file_name)
dt = dt[cols]
dt['date'] = pd.to_datetime(dt.date)
dt = dt.sort_values(['date'])
return dt
def create_merged_df(BAT_BTC, BAT_USD, BTC_USD, col):
BAT_BTC = BAT_BTC[['date',col]]
BAT_USD = BAT_USD[['date',col]]
BTC_USD = BTC_USD[['date',col]]
BAT_BTC = BAT_BTC.rename(columns={col: "BAT_BTC"})
BAT_USD = BAT_USD.rename(columns={col: "BAT_USD"})
BTC_USD = BTC_USD.rename(columns={col: "BTC_USD"})
close_df = BAT_BTC.merge(BAT_USD, on='date')
close_df = close_df.merge(BTC_USD, on='date')
close_df.set_index('date',inplace=True)
close_df = close_df.fillna(method="ffill") # forward fill two missing BAT/BTC values
return close_df
def create_plot(df, cols, plt_show=False, plt_save=False, png_name='plot.png'):
fig, ax = plt.subplots()
plt.plot(df[cols])
plt.title(str(cols[0]) + ' Price Plot')
plt.xlabel('Date')
plt.ylabel('Price')
ax.xaxis_date()
fig.autofmt_xdate()
fmt = '${x:,.2f}'
tick = mtick.StrMethodFormatter(fmt)
ax.yaxis.set_major_formatter(tick)
if plt_show == True:
plt.show()
if plt_save == True:
plt.savefig(png_name)
def correlation_plot(df, plt_show=False, plt_save=False, png_name='plot.png'):
fig, ax = plt.subplots()
plt.plot(df)
plt.title('Correlation Plot')
plt.xlabel('Date')
ax.xaxis_date()
fig.autofmt_xdate()
plt.ylabel('Correlation')
plt.legend(['BAT/USD & BAT/BTC','BAT/USD & BTC/USD','BAT/BTC & BTC/USD'])
if plt_show == True:
plt.show()
if plt_save == True:
plt.savefig(png_name)
def table_plot(df, plt_show=False, plt_save=False, png_name='table_plot.png'): # https://stackoverflow.com/questions/19726663/how-to-save-the-pandas-dataframe-series-data-as-a-figure
fig = ff.create_table(close_corr_table, index=True)
fig.update_layout(
autosize=False,
width=500,
height=200,
font={'size':8})
if plt_show == True:
fig.show()
if plt_save == True:
fig.write_image(png_name, scale=2)
def rolling_correlation(df):
roll_bat = df['BAT_USD'].rolling(180).corr(df['BAT_BTC'])
roll_bat.name = 'roll_bat'
roll_usd = df['BAT_USD'].rolling(180).corr(df['BTC_USD'])
roll_usd.name = 'roll_usd'
roll_bat_btc = df['BAT_BTC'].rolling(180).corr(df['BTC_USD'])
roll_bat_btc.name = 'roll_bat_btc'
roll_df = pd.concat([roll_bat, roll_usd, roll_bat_btc], axis=1)
return roll_df
def create_holdings_portfolio(start_date, today, hold_only_columns = ['BAT','USD','BTC']):
index = pd.date_range(start_date, today)
port_df = pd.DataFrame(index=index,columns=hold_only_columns)
trading_dates = | pd.date_range(start_date, today, freq='1M') | pandas.date_range |
import os
import re
import pandas as pd
#you only need to provide result.txt from yolo output
# with open("coco_paper_names.txt") as f:
with open("coco_class_list.txt") as f:
# with open("bdd_names_list.txt") as f:
obj_list = f.readlines()
## remove whitespace characters like `\n` at the end of each line
obj_list = [x.strip() for x in obj_list]
# IN_FILE = '/home/mayank_s/codebase/cplus_plus/ai/darknet_AlexeyAB/mank_result/result_yolo_v4.txt'
# IN_FILE = '/home/mayank_s/codebase/cplus_plus/ai/darknet_AlexeyAB/mank_result/result_yolo_v3.txt'
# IN_FILE = '/home/mayank_s/codebase/cplus_plus/ai/darknet_AlexeyAB/mank_result/result_gaussian_bdd.txt'
# IN_FILE = '/home/mayank_s/codebase/cplus_plus/ai/darknet_AlexeyAB/result_yolo_512_v3.txt'
IN_FILE = '/home/mayank_s/codebase/cplus_plus/ai/darknet_AlexeyAB/mank_result/yolo_v4_coco_412.txt'
SEPARATOR_KEY = 'Enter Image Path:'
IMG_FORMAT = '.jpg'
bblabel=[]
outfile = None
flag=False
with open(IN_FILE) as infile:
for line in infile:
if SEPARATOR_KEY in line:
if IMG_FORMAT not in line:
break
# get text between two substrings (SEPARATOR_KEY and IMG_FORMAT)
image_path = re.search(SEPARATOR_KEY + '(.*)' + IMG_FORMAT, line)
# get the image name (the final component of a image_path)
# e.g., from 'data/horses_1' to 'horses_1'
image_name = os.path.basename(image_path.group(1))
#############################3333
file_name=image_name.split('COCO_val2014_')[-1]
# file_name="00100200"
# b = [(lambda x: x.strip('0') if isinstance(x, str) and len(x) != 1 else x)(x) for x in file_name]
#
# trailing_removed = [s.rstrip("0") for s in file_name]
# leading_removed = [s.lstrip("0") for s in file_name]
# both_removed = [s.strip("0") for s in file_name]
file_name=file_name.lstrip("0")
####################################
# file_name=image_name
flag=True
elif flag:
# split line on first occurrence of the character ':' and '%'
class_name, info = line.split(':', 1)
# if class_name.split(" "):
# if len(class_name.split(" "))>1:
# print(class_name)
# class_name=class_name.replace(" ", "_")
# print(class_name)
confidence, bbox = info.split('%', 1)
# get all the coordinates of the bounding box
bbox = bbox.replace(')','') # remove the character ')'
# go through each of the parts of the string and check if it is a digit
left, top, width, height = [int(s) for s in bbox.split() if s.lstrip('-').isdigit()]
right = left + width
bottom = top + height
# outfile.write("{} {} {} {} {} {}\n".format(class_name, float(confidence)/100, left, top, right, bottom))
conf=float(confidence)/100
obj_name=class_name
print(obj_name)
obj_id=obj_list.index(obj_name)
# obj_id = obj['category_id']
img_width = img_height = 0
data_label = [file_name, img_width, img_height, obj_name, obj_id, left, top, right, bottom, conf]
bblabel.append(data_label)
columns = ['filename', 'width', 'height', 'class', 'obj_category', 'xmin', 'ymin', 'xmax', 'ymax', 'conf']
df = | pd.DataFrame(bblabel, columns=columns) | pandas.DataFrame |
"""
Computational Cancer Analysis Library
Authors:
Huwate (Kwat) Yeerna (Medetgul-Ernar)
<EMAIL>
Computational Cancer Analysis Laboratory, UCSD Cancer Center
<NAME>
<EMAIL>
Computational Cancer Analysis Laboratory, UCSD Cancer Center
"""
from os.path import isfile
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.cm import bwr, gist_rainbow
from matplotlib.colorbar import ColorbarBase, make_axes
from matplotlib.colors import (ColorConverter, LinearSegmentedColormap,
ListedColormap, Normalize)
from matplotlib.gridspec import GridSpec
from matplotlib.pyplot import (figure, gca, savefig, sca, subplot, suptitle,
tight_layout)
from numpy import array, unique
from pandas import DataFrame, Series, isnull
from seaborn import (barplot, boxplot, clustermap, despine, distplot, heatmap,
set_style, violinplot)
from .d2 import get_dendrogram_leaf_indices, normalize_2d_or_1d
from .file import establish_filepath
# ==============================================================================
# Style
# ==============================================================================
FIGURE_SIZE = (16, 10)
SPACING = 0.05
FONT_LARGEST = {'fontsize': 24, 'weight': 'bold', 'color': '#220530'}
FONT_LARGER = {'fontsize': 20, 'weight': 'bold', 'color': '#220530'}
FONT_STANDARD = {'fontsize': 16, 'weight': 'bold', 'color': '#220530'}
FONT_SMALLER = {'fontsize': 12, 'weight': 'bold', 'color': '#220530'}
# Color maps
C_BAD = 'wheat'
# Continuous 1
CMAP_CONTINUOUS = bwr
CMAP_CONTINUOUS.set_bad(C_BAD)
# Continuous 2
reds = [0.26, 0.26, 0.26, 0.39, 0.69, 1, 1, 1, 1, 1, 1]
greens_half = [0.26, 0.16, 0.09, 0.26, 0.69]
colordict = {
'red':
tuple([(0.1 * i, r, r) for i, r in enumerate(reds)]),
'green':
tuple([
(0.1 * i, r, r)
for i, r in enumerate(greens_half + [1] + list(reversed(greens_half)))
]),
'blue':
tuple([(0.1 * i, r, r) for i, r in enumerate(reversed(reds))])
}
CMAP_CONTINUOUS_ASSOCIATION = LinearSegmentedColormap('association', colordict)
CMAP_CONTINUOUS_ASSOCIATION.set_bad(C_BAD)
# Categorical
CMAP_CATEGORICAL = gist_rainbow
CMAP_CATEGORICAL.set_bad(C_BAD)
# Binary
CMAP_BINARY = ListedColormap(['#cdcdcd', '#404040'])
CMAP_BINARY.set_bad(C_BAD)
DPI = 300
# ==============================================================================
# Functions
# ==============================================================================
def plot_points(*args,
title='',
xlabel='',
ylabel='',
filepath=None,
file_extension='pdf',
dpi=DPI,
ax=None,
**kwargs):
"""
:param args:
:param title:
:param xlabel:
:param ylabel:
:param filepath:
:param file_extension:
:param dpi:
:param kwargs:
:return: None
"""
if not ax:
figure(figsize=FIGURE_SIZE)
ax = gca()
if 'linestyle' not in kwargs:
kwargs['linestyle'] = ''
if 'marker' not in kwargs:
kwargs['marker'] = '.'
ax.plot(*args, **kwargs)
decorate(style='ticks', title=title, xlabel=xlabel, ylabel=ylabel)
if filepath:
save_plot(filepath, file_extension=file_extension, dpi=dpi)
def plot_distribution(a,
bins=None,
hist=True,
kde=True,
rug=False,
fit=None,
hist_kws=None,
kde_kws=None,
rug_kws=None,
fit_kws=None,
color=None,
vertical=False,
norm_hist=False,
axlabel=None,
label=None,
ax=None,
title='',
xlabel='',
ylabel='Frequency',
filepath=None,
file_extension='pdf',
dpi=DPI):
"""
:param a:
:param bins:
:param hist:
:param kde:
:param rug:
:param fit:
:param hist_kws:
:param kde_kws:
:param rug_kws:
:param fit_kws:
:param color:
:param vertical:
:param norm_hist:
:param axlabel:
:param label:
:param ax:
:param title:
:param xlabel:
:param ylabel:
:param filepath:
:param file_extension:
:param dpi:
:return: None
"""
if not ax:
figure(figsize=FIGURE_SIZE)
distplot(
a,
bins=bins,
hist=hist,
kde=kde,
rug=rug,
fit=fit,
hist_kws=hist_kws,
kde_kws=kde_kws,
rug_kws=rug_kws,
fit_kws=fit_kws,
color=color,
vertical=vertical,
norm_hist=norm_hist,
axlabel=axlabel,
label=label,
ax=ax)
decorate(style='ticks', title=title, xlabel=xlabel, ylabel=ylabel)
if filepath:
save_plot(filepath, file_extension=file_extension, dpi=dpi)
def plot_violin_box_or_bar(x=None,
y=None,
hue=None,
data=None,
order=None,
hue_order=None,
bw='scott',
cut=2,
scale='count',
scale_hue=True,
gridsize=100,
width=0.8,
inner='quartile',
split=False,
orient=None,
linewidth=None,
color=None,
palette=None,
saturation=0.75,
ax=None,
fliersize=5,
whis=1.5,
notch=False,
ci=95,
n_boot=1000,
units=None,
errcolor='0.26',
errwidth=None,
capsize=None,
violin_or_box='violin',
colors=(),
figure_size=FIGURE_SIZE,
title=None,
xlabel=None,
ylabel=None,
filepath=None,
file_extension='pdf',
dpi=DPI,
**kwargs):
"""
Plot violin plot.
:param x:
:param y:
:param hue:
:param data:
:param order:
:param hue_order:
:param bw:
:param cut:
:param scale:
:param scale_hue:
:param gridsize:
:param width:
:param inner:
:param split:
:param orient:
:param linewidth:
:param color:
:param palette:
:param saturation:
:param ax:
:param fliersize:
:param whis:
:param notch:
:param ci:
:param n_boot:
:param units:
:param errcolor:
:param errwidth:
:param capsize:
:param violin_or_box:
:param colors: iterable;
:param figure_size: tuple;
:param title:
:param xlabel:
:param ylabel:
:param filepath:
:param file_extension:
:param dpi:
:param kwargs:
:return: None
"""
# Initialize a figure
if not ax:
figure(figsize=figure_size)
if isinstance(x, str):
x = data[x]
if isinstance(y, str):
y = data[y]
if not palette:
palette = assign_colors_to_states(x, colors=colors)
if len(set([v for v in y
if v and ~isnull(v)])) <= 2: # Use barplot for binary
barplot(
x=x,
y=y,
hue=hue,
data=data,
order=order,
hue_order=hue_order,
ci=ci,
n_boot=n_boot,
units=units,
orient=orient,
color=color,
palette=palette,
saturation=saturation,
errcolor=errcolor,
ax=ax,
errwidth=errwidth,
capsize=capsize,
**kwargs)
else: # Use violin or box plot for continuous or categorical
if violin_or_box == 'violin':
violinplot(
x=x,
y=y,
hue=hue,
data=data,
order=order,
hue_order=hue_order,
bw=bw,
cut=cut,
scale=scale,
scale_hue=scale_hue,
gridsize=gridsize,
width=width,
inner=inner,
split=split,
orient=orient,
linewidth=linewidth,
color=color,
palette=palette,
saturation=saturation,
ax=ax,
**kwargs)
elif violin_or_box == 'box':
boxplot(
x=x,
y=y,
hue=hue,
data=data,
order=order,
hue_order=hue_order,
orient=orient,
color=color,
palette=palette,
saturation=saturation,
width=width,
fliersize=fliersize,
linewidth=linewidth,
whis=whis,
notch=notch,
ax=ax,
**kwargs)
else:
raise ValueError(
'\'violin_or_box\' must be either \'violin\' or \'box\'.')
decorate(style='ticks', title=title, xlabel=xlabel, ylabel=ylabel)
if filepath:
save_plot(filepath, file_extension=file_extension, dpi=dpi)
def plot_heatmap(dataframe,
vmin=None,
vmax=None,
cmap=None,
center=None,
robust=False,
annot=None,
fmt='.2g',
annot_kws=None,
linewidths=0,
linecolor='white',
cbar=False,
cbar_kws=None,
cbar_ax=None,
square=False,
xticklabels=False,
yticklabels=False,
mask=None,
figure_size=FIGURE_SIZE,
data_type='continuous',
normalization_method=None,
normalization_axis=0,
max_std=3,
axis_to_sort=None,
cluster=False,
row_annotation=(),
column_annotation=(),
annotation_colors=(),
title=None,
xlabel=None,
ylabel=None,
xlabel_rotation=0,
ylabel_rotation=90,
xtick_rotation=90,
ytick_rotation=0,
filepath=None,
file_extension='pdf',
dpi=DPI,
**kwargs):
"""
Plot heatmap.
:param dataframe:
:param vmin:
:param vmax:
:param cmap:
:param center:
:param robust:
:param annot:
:param fmt:
:param annot_kws:
:param linewidths:
:param linecolor:
:param cbar:
:param cbar_kws:
:param cbar_ax:
:param square:
:param xticklabels:
:param yticklabels:
:param mask:
:param figure_size:
:param data_type:
:param normalization_method:
:param normalization_axis:
:param max_std:
:param axis_to_sort:
:param cluster:
:param row_annotation:
:param column_annotation:
:param annotation_colors: list; a list of matplotlib color specifications
:param title:
:param xlabel:
:param ylabel:
:param xlabel_rotation:
:param ylabel_rotation:
:param xtick_rotation:
:param ytick_rotation:
:param filepath:
:param file_extension:
:param dpi:
:param kwargs:
:return: None
"""
df = dataframe.copy()
if normalization_method:
df = normalize_2d_or_1d(
df, normalization_method,
axis=normalization_axis).clip(-max_std, max_std)
if len(row_annotation) or len(column_annotation):
if len(row_annotation):
if isinstance(row_annotation, Series):
row_annotation = row_annotation.copy()
if not len(row_annotation.index & df.index): # Series
# but without proper index
row_annotation.index = df.index
else:
row_annotation = Series(row_annotation, index=df.index)
row_annotation.sort_values(inplace=True)
df = df.ix[row_annotation.index, :]
if len(column_annotation):
if isinstance(column_annotation, Series):
column_annotation = column_annotation.copy()
# Series but without proper index
if not len(column_annotation.index & df.columns):
column_annotation.index = df.columns
else:
column_annotation = Series(column_annotation, index=df.columns)
column_annotation.sort_values(inplace=True)
df = df.ix[:, column_annotation.index]
if axis_to_sort in (0, 1):
a = array(df)
a.sort(axis=axis_to_sort)
df = | DataFrame(a, index=df.index) | pandas.DataFrame |
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
import numpy as np
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 11 Oct 2018
# Function: batsman4s
# This function plots the number of 4s vs the runs scored in the innings by the batsman
#
###########################################################################################
def batsman4s(file, name="A Hookshot"):
'''
Plot the numbers of 4s against the runs scored by batsman
Description
This function plots the number of 4s against the total runs scored by batsman. A 2nd order polynomial regression curve is also plotted. The predicted number of 4s for 50 runs and 100 runs scored is also plotted
Usage
batsman4s(file, name="A Hookshot")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsman6s
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
tendulkar = getPlayerData(35320,dir="../",file="tendulkar.csv",type="batting")
homeOrAway=[1,2],result=[1,2,4]
'''
# Clean the batsman file and create a complete data frame
df = clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
# Get numnber of 4s and runs scored
x4s = pd.to_numeric(df['4s'])
runs = pd.to_numeric(df['Runs'])
atitle = name + "-" + "Runs scored vs No of 4s"
# Plot no of 4s and a 2nd order curve fit
plt.scatter(runs, x4s, alpha=0.5)
plt.xlabel('Runs')
plt.ylabel('4s')
plt.title(atitle)
# Create a polynomial of degree 2
poly = PolynomialFeatures(degree=2)
runsPoly = poly.fit_transform(runs.reshape(-1,1))
linreg = LinearRegression().fit(runsPoly,x4s)
plt.plot(runs,linreg.predict(runsPoly),'-r')
# Predict the number of 4s for 50 runs
b=poly.fit_transform((np.array(50)))
c=linreg.predict(b)
plt.axhline(y=c, color='b', linestyle=':')
plt.axvline(x=50, color='b', linestyle=':')
# Predict the number of 4s for 100 runs
b=poly.fit_transform((np.array(100)))
c=linreg.predict(b)
plt.axhline(y=c, color='b', linestyle=':')
plt.axvline(x=100, color='b', linestyle=':')
plt.text(180, 0.5,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
import seaborn as sns
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 13 Oct 2018
# Function: batsman6s
# This function plots the number of 6s vs the runs scored in the innings by the batsman
#
###########################################################################################
def batsman6s(file, name="A Hookshot") :
'''
Description
Compute and plot the number of 6s in the total runs scored by batsman
Usage
batsman6s(file, name="A Hookshot")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
# tendulkar = getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
'''
x6s = []
# Set figure size
rcParams['figure.figsize'] = 10,6
# Clean the batsman file and create a complete data frame
df = clean (file)
# Remove all rows where 6s are 0
a= df['6s'] !=0
b= df[a]
x6s=b['6s'].astype(int)
runs=pd.to_numeric(b['Runs'])
# Plot the 6s as a boxplot
atitle =name + "-" + "Runs scored vs No of 6s"
df1=pd.concat([runs,x6s],axis=1)
fig = sns.boxplot(x="6s", y="Runs", data=df1)
plt.title(atitle)
plt.text(2.2, 10,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
import seaborn as sns
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 14 Oct 2018
# Function: batsmanAvgRunsGround
# This function plots the average runs scored by batsman at the ground. The xlabels indicate
# the number of innings at ground
#
###########################################################################################
def batsmanAvgRunsGround(file, name="A Latecut"):
'''
Description
This function computed the Average Runs scored on different pitches and also indicates the number of innings played at these venues
Usage
batsmanAvgRunsGround(file, name = "A Latecut")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanDismissals, batsmanMovingAverage, batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
##tendulkar = getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=[1,2],result=[1,2,4])
'''
batsman = clean(file)
rcParams['figure.figsize'] = 10,6
batsman['Runs']=pd.to_numeric(batsman['Runs'])
# Aggregate as sum, mean and count
df=batsman[['Runs','Ground']].groupby('Ground').agg(['sum','mean','count'])
#Flatten multi-levels to column names
df.columns= ['_'.join(col).strip() for col in df.columns.values]
# Reset index
df1=df.reset_index(inplace=False)
atitle = name + "'s Average Runs at Ground"
plt.xticks(rotation='vertical')
plt.axhline(y=50, color='b', linestyle=':')
plt.axhline(y=100, color='r', linestyle=':')
ax=sns.barplot(x='Ground', y="Runs_mean", data=df1)
plt.title(atitle)
plt.text(30, 180,'Data source-Courtesy:ESPN Cricinfo',\
horizontalalignment='center',\
verticalalignment='center',\
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
import seaborn as sns
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 14 Oct 2018
# Function: batsmanAvgRunsOpposition
# This function plots the average runs scored by batsman versus the opposition. The xlabels indicate
# the Opposition and the number of innings at ground
#
###########################################################################################
def batsmanAvgRunsOpposition(file, name="A Latecut"):
'''
This function computes and plots the Average runs against different opposition played by batsman
Description
This function computes the mean runs scored by batsman against different opposition
Usage
batsmanAvgRunsOpposition(file, name = "A Latecut")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanDismissals, batsmanMovingAverage, batsmanPerfBoxHist batsmanAvgRunsGround
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
#tendulkar = getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=[1,2],result=[1,2,4])
'''
batsman = clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
batsman['Runs']=pd.to_numeric(batsman['Runs'])
# Aggregate as sum, mean and count
df=batsman[['Runs','Opposition']].groupby('Opposition').agg(['sum','mean','count'])
#Flatten multi-levels to column names
df.columns= ['_'.join(col).strip() for col in df.columns.values]
# Reset index
df1=df.reset_index(inplace=False)
atitle = name + "'s Average Runs vs Opposition"
plt.xticks(rotation='vertical')
ax=sns.barplot(x='Opposition', y="Runs_mean", data=df1)
plt.axhline(y=50, color='b', linestyle=':')
plt.title(atitle)
plt.text(5, 50, 'Data source-Courtesy:ESPN Cricinfo',\
horizontalalignment='center',\
verticalalignment='center',\
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
import seaborn as sns
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: batsmanContributionWonLost
# This plots the batsman's contribution to won and lost matches
#
###########################################################################################
def batsmanContributionWonLost(file,name="A Hitter"):
'''
Display the batsman's contribution in matches that were won and those that were lost
Description
Plot the comparative contribution of the batsman in matches that were won and lost as box plots
Usage
batsmanContributionWonLost(file, name = "A Hitter")
Arguments
file
CSV file of batsman from ESPN Cricinfo obtained with getPlayerDataSp()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanMovingAverage batsmanRunsPredict batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
#tendulkarsp = getPlayerDataSp(35320,".","tendulkarsp.csv","batting")
batsmanContributionWonLost(tendulkarsp,"<NAME>")
'''
playersp = clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
# Create a column based on result
won = playersp[playersp['result'] == 1]
lost = playersp[(playersp['result']==2) | (playersp['result']==4)]
won['status']="won"
lost['status']="lost"
# Stack dataframes
df= pd.concat([won,lost])
df['Runs']= pd.to_numeric(df['Runs'])
ax = sns.boxplot(x='status',y='Runs',data=df)
atitle = name + "-" + "- Runs in games won/lost-drawn"
plt.title(atitle)
plt.text(0.5, 200,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 17 Oct 2018
# Function: batsmanCumulativeAverageRuns
# This function computes and plots the cumulative average runs by a batsman
#
###########################################################################################
def batsmanCumulativeAverageRuns(file,name="A Leg Glance"):
'''
Batsman's cumulative average runs
Description
This function computes and plots the cumulative average runs of a batsman
Usage
batsmanCumulativeAverageRuns(file,name= "A Leg Glance")
Arguments
file
Data frame
name
Name of batsman
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanCumulativeStrikeRate bowlerCumulativeAvgEconRate bowlerCumulativeAvgWickets
Examples
## Not run:
# retrieve the file path of a data file installed with cricketr
batsmanCumulativeAverageRuns(pathToFile, "<NAME>")
'''
batsman= clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
runs=pd.to_numeric(batsman['Runs'])
# Compute cumulative average
cumAvg = runs.cumsum()/pd.Series(np.arange(1, len(runs)+1), runs.index)
atitle = name + "- Cumulative Average vs No of innings"
plt.plot(cumAvg)
plt.xlabel('Innings')
plt.ylabel('Cumulative average')
plt.title(atitle)
plt.text(200,20,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 17 Oct 2018
# Function: batsmanCumulativeStrikeRate
# This function computes and plots the cumulative average strike rate of a batsman
#
###########################################################################################
def batsmanCumulativeStrikeRate(file,name="A Leg Glance"):
'''
Batsman's cumulative average strike rate
Description
This function computes and plots the cumulative average strike rate of a batsman
Usage
batsmanCumulativeStrikeRate(file,name= "A Leg Glance")
Arguments
file
Data frame
name
Name of batsman
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanCumulativeAverageRuns bowlerCumulativeAvgEconRate bowlerCumulativeAvgWickets
Examples
## Not run:
batsmanCumulativeStrikeRate(pathToFile, "<NAME>")
'''
batsman= clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
strikeRate=pd.to_numeric(batsman['SR'])
# Compute cumulative strike rate
cumStrikeRate = strikeRate.cumsum()/pd.Series(np.arange(1, len(strikeRate)+1), strikeRate.index)
atitle = name + "- Cumulative Strike rate vs No of innings"
plt.xlabel('Innings')
plt.ylabel('Cumulative Strike Rate')
plt.title(atitle)
plt.plot(cumStrikeRate)
plt.text(200,60,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
import seaborn as sns
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 13 Oct 2018
# Function: batsman6s
# This function plots the batsman dismissals
#
###########################################################################################
def batsmanDismissals(file, name="A Squarecut"):
'''
Display a 3D Pie Chart of the dismissals of the batsman
Description
Display the dismissals of the batsman (caught, bowled, hit wicket etc) as percentages
Usage
batsmanDismissals(file, name="A Squarecut")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanMeanStrikeRate, batsmanMovingAverage, batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
#tendulkar= getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
batsmanDismissals(pathToFile,"<NAME>")
'''
batsman = clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
d = batsman['Dismissal']
# Convert to data frame
df = pd.DataFrame(d)
df1=df['Dismissal'].groupby(df['Dismissal']).count()
df2 = pd.DataFrame(df1)
df2.columns=['Count']
df3=df2.reset_index(inplace=False)
# Plot a pie chart
plt.pie(df3['Count'], labels=df3['Dismissal'],autopct='%.1f%%')
atitle = name + "-Pie chart of dismissals"
plt.suptitle(atitle, fontsize=16)
plt.show()
plt.gcf().clear()
return
import numpy as np
import matplotlib.pyplot as plt
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 13 Oct 2018
# Function: batsmanMeanStrikeRate
# This function plot the Mean Strike Rate of the batsman against Runs scored as a continous graph
#
###########################################################################################
def batsmanMeanStrikeRate(file, name="A Hitter"):
'''
batsmanMeanStrikeRate {cricketr} R Documentation
Calculate and plot the Mean Strike Rate of the batsman on total runs scored
Description
This function calculates the Mean Strike Rate of the batsman for each interval of runs scored
Usage
batsmanMeanStrikeRate(file, name = "A Hitter")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanDismissals, batsmanMovingAverage, batsmanPerfBoxHist batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
#tendulkar <- getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
batsmanMeanStrikeRate(pathToFile,"<NAME>")
'''
batsman = clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
runs= pd.to_numeric(batsman['Runs'])
# Create the histogram
hist, bins = np.histogram(runs, bins = 20)
midBin=[]
SR=[]
# Loop through
for i in range(1,len(bins)):
# Find the mean of the bins (Runs)
midBin.append(np.mean([bins[i-1],bins[i]]))
# Filter runs that are are between 2 bins
batsman['Runs']=pd.to_numeric(batsman['Runs'])
a=(batsman['Runs'] > bins[i-1]) & (batsman['Runs'] <= bins[i])
df=batsman[a]
SR.append(np.mean(df['SR']))
atitle = name + "-" + "Strike rate in run ranges"
# Plot no of 4s and a 2nd order curve fit
plt.scatter(midBin, SR, alpha=0.5)
plt.plot(midBin, SR,color="r", alpha=0.5)
plt.xlabel('Runs')
plt.ylabel('Strike Rate')
plt.title(atitle)
plt.text(180, 50,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
import numpy as np
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 17 Oct 2018
# Function: batsmanMovingAverage
# This function computes and plots the Moving Average of the batsman across his career
#
###########################################################################################
# Compute a moving average
def movingaverage(interval, window_size):
window= np.ones(int(window_size))/float(window_size)
return np.convolve(interval, window, 'same')
def batsmanMovingAverage(file,name="A Squarecut") :
'''
Calculate and plot the Moving Average of the batsman in his career
Description
This function calculates and plots the Moving Average of the batsman in his career
Usage
batsmanMovingAverage(file,name="A Squarecut")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanDismissals, batsmanMeanStrikeRate, batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
#tendulkar <- getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
batsmanMovingAverage(pathToFile,"<NAME>")
'''
# Compute the moving average of the time series
batsman = clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
runs=pd.to_numeric(batsman['Runs'])
date= pd.to_datetime(batsman['Start Date'])
atitle = name + "'s Moving average (Runs)"
# Plot the runs in grey colo
plt.plot(date,runs,"-",color = '0.75')
# Compute and plot moving average
y_av = movingaverage(runs, 50)
plt.xlabel('Date')
plt.ylabel('Runs')
plt.plot(date, y_av,"b")
plt.title(atitle)
plt.text('2002-01-03',150,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
import seaborn as sns
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 14 Oct 2018
# Function: batsmanPerfBoxHist
# This function makes a box plot showing the mean, median and the 25th & 75th percentile runs. The
# histogram shows the frequency of scoring runs in different run ranges
#
###########################################################################################
# Plot the batting performance as a combined box plot and histogram
def batsmanPerfBoxHist(file, name="A Hitter"):
'''
Make a boxplot and a histogram of the runs scored by the batsman
Description
Make a boxplot and histogram of the runs scored by the batsman. Plot the Mean, Median, 25th and 75th quantile
Usage
batsmanPerfBoxHist(file, name="A Hitter")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanDismissals, batsmanMeanStrikeRate, batsmanMovingAverage, batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
#tendulkar = getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
batsman4s(pathToFile,"<NAME>")
'''
batsman = clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
batsman['Runs']=pd.to_numeric(batsman['Runs'])
plt.subplot(2,1,1)
sns.boxplot(batsman['Runs'])
plt.subplot(2,1,2);
atitle = name + "'s" + " - Runs Frequency vs Runs"
plt.hist(batsman['Runs'],bins=20, edgecolor='black')
plt.xlabel('Runs')
plt.ylabel('Strike Rate')
plt.title(atitle,size=16)
plt.text(180, 70,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
from statsmodels.tsa.arima_model import ARIMA
import pandas as pd
import numpy as np
from statsmodels.tsa.seasonal import seasonal_decompose
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 20 Oct 2018
# Function: batsmanPerfForecast
# This function forecasts the batsmans performance based on past performance -
# To update
###########################################################################################
def batsmanPerfForecast(file, name="A Squarecut"):
'''
# To do: Currently ARIMA is used.
Forecast the batting performance based on past performances using Holt-Winters forecasting
Description
This function forecasts the performance of the batsman based on past performances using HoltWinters forecasting model
Usage
batsmanPerfForecast(file, name="A Squarecut")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanDismissals, batsmanMeanStrikeRate, batsmanMovingAverage, batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
#tendulkar = getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
batsmanPerfForecast(pathToFile,"Sachin Tendulkar")
# Note: The above example uses the file tendulkar.csv from the /data directory. However
# you can use any directory as long as the data file exists in that directory.
# The general format is pkg-function(pathToFile,par1,...)
'''
batsman= clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
runs=batsman['Runs'].astype('float')
# Fit a ARIMA model
date= pd.to_datetime(batsman['Start Date'])
df=pd.DataFrame({'date':date,'runs':runs})
df1=df.set_index('date')
model = ARIMA(df1, order=(5,1,0))
model_fit = model.fit(disp=0)
print(model_fit.summary())
# plot residual errors
residuals = pd.DataFrame(model_fit.resid)
residuals.plot()
plt.show()
residuals.plot(kind='kde')
plt.show()
plt.gcf().clear()
print(residuals.describe())
import matplotlib.pyplot as plt
import seaborn as sns
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: batsmanPerfHomeAway
# This plots the batsman's performance in home versus abroad
#
###########################################################################################
def batsmanPerfHomeAway(file,name="A Hitter"):
'''
This function analyses the performance of the batsman at home and overseas
Description
This function plots the runs scored by the batsman at home and overseas
Usage
batsmanPerfHomeAway(file, name = "A Hitter")
Arguments
file
CSV file of batsman from ESPN Cricinfo obtained with getPlayerDataSp()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanMovingAverage batsmanRunsPredict batsmanPerfBoxHist bowlerContributionWonLost
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
#tendulkarSp <-getPlayerDataSp(35320,".","tendulkarsp.csv","batting")
batsmanPerfHomeAway(pathToFile,"<NAME>")
'''
playersp = clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
# Create separate DFs for home and away
home = playersp[playersp['ha'] == 1]
away = playersp[playersp['ha']==2]
home['venue']="Home"
away['venue']="Overseas"
df= pd.concat([home,away])
df['Runs']= pd.to_numeric(df['Runs'])
atitle = name + "-" + "- - Runs-Home & overseas"
ax = sns.boxplot(x='venue',y='Runs',data=df)
plt.title(atitle)
plt.text(0.5, 200,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import numpy as np
import matplotlib.pyplot as plt
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 30 Jun 2015
# Function: batsmanRunsFreqPerf
# This function computes and plots the Moving Average of the batsman across his career
#
###########################################################################################
# Plot the performance of the batsman as a continous graph
# Create a performance plot between Runs and RunsFrequency
def batsmanRunsFreqPerf(file, name="A Hookshot"):
'''
Calculate and run frequencies in ranges of 10 runs and plot versus Runs the performance of the batsman
Description
This function calculates frequencies of runs in 10 run buckets and plots this percentage
Usage
batsmanRunsFreqPerf(file, name="A Hookshot")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanDismissals, batsmanMovingAverage, batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
#tendulkar <- getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
batsmanRunsFreqPerf(pathToFile,"Sachin Tendulkar")
'''
df = clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
runs=pd.to_numeric(df['Runs'])
# Plot histogram
runs.plot.hist(grid=True, bins=20, rwidth=0.9, color='#607c8e')
atitle = name + "'s" + " Runs histogram"
plt.title(atitle)
plt.xlabel('Runs')
plt.grid(axis='y', alpha=0.75)
plt.text(180, 90,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 14 Oct 2018
# Function: batsmanRunsLikelihood
# This function used K-Means to compute and plot the runs likelihood for the batsman
# To do - Include scatterplot
###########################################################################################
def batsmanRunsLikelihood(file, name="A Squarecut") :
'''
This function uses K-Means to determine the likelihood of the batsman to get runs
Description
This function used K-Means to get the likelihood of getting runs based on clusters of runs the batsman made in the past.It uses K-Means for this.
Usage
batsmanRunsLikelihood(file, name = "A Squarecut")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanMovingAverage batsmanRunsPredict battingPerf3d batsmanContributionWonLost
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
# tendulkar= getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
batsmanRunsLikelihood(pathToFile,"<NAME>")
'''
batsman =clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
data = batsman[['Runs','BF','Mins']]
# Create 3 different clusters
kmeans = KMeans(n_clusters=3,max_iter=300)
# Compute the clusters
kmeans.fit(data)
y_kmeans = kmeans.predict(data)
# Get the cluster centroids
centers = kmeans.cluster_centers_
centers
# Add a title
atitle= name + '-' + "Runs Likelihood"
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# Draw vertical line 1st centroid
x=[centers[0][0],centers[0][0]]
y=[centers[0][1],centers[0][1]]
z=[0,centers[0][2]]
ax.plot(x,y,z,'k-',color='r',alpha=0.8, linewidth=2)
# Draw vertical line 2nd centroid
x=[centers[1][0],centers[1][0]]
y=[centers[1][1],centers[1][1]]
z=[0,centers[1][2]]
ax.plot(x,y,z,'k-',color='b',alpha=0.8, linewidth=2)
# Draw vertical line 2nd centroid
x=[centers[2][0],centers[2][0]]
y=[centers[2][1],centers[2][1]]
z=[0,centers[2][2]]
ax.plot(x,y,z,'k-',color='k',alpha=0.8, linewidth=2)
ax.set_xlabel('BallsFaced')
ax.set_ylabel('Minutes')
ax.set_zlabel('Runs');
plt.title(atitle)
plt.show()
plt.gcf().clear()
return
from sklearn.linear_model import LinearRegression
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: batsmanRunsPredict
# This function predicts the runs that will be scored by the batsman for a given numbers
# of balls faced and minutes at crease
#
###########################################################################################
def batsmanRunsPredict(file, newDF, name="A Coverdrive"):
'''
Predict the runs for the batsman given the Balls Faced and Minutes in crease
Description
Fit a linear regression plane between Runs scored and Minutes in Crease and Balls Faced. This will be used to predict the batsman runs for time in crease and balls faced
Usage
batsmanRunsPredict(file, name="A Coverdrive", newdataframe)
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
newdataframe
This is a data frame with 2 columns BF(Balls Faced) and Mins(Minutes)
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
Returns a data frame with the predicted runs for the Balls Faced and Minutes at crease
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanMovingAverage battingPerf3d batsmanContributionWonLost
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
# tendulkar <- getPlayerData(35320,file="tendulkar.csv",type="batting",
# homeOrAway=c(1,2), result=c(1,2,4))
# Use a single value for BF and Mins
BF= 30
Mins= 20
# retrieve the file path of a data file installed with cricketr
pathToFile <- system.file("data", "tendulkar.csv", package = "cricketr")
batsmanRunsPredict(pathToFile,"<NAME>",newdataframe=data.frame(BF,Mins))
#or give a data frame
BF = np.linspace( 10, 400,15)
Mins = np.linspace(30,220,15)
newDF= pd.DataFrame({'BF':BF,'Mins':Mins}
#values <- batsmanRunsPredict("../cricketr/data/tendulkar.csv","<NAME>",
#print(values)
'''
batsman = clean(file)
df=batsman[['BF','Mins','Runs']]
df['BF']=pd.to_numeric(df['BF'])
df['Runs']=pd.to_numeric(df['Runs'])
xtrain=df.iloc[:,0:2]
ytrain=df.iloc[:,2]
linreg = LinearRegression().fit(xtrain, ytrain)
newDF['Runs']=linreg.predict(newDF)
return(newDF)
import matplotlib.pyplot as plt
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 13 Oct 2018
# Function: batsmanRunsRanges
# This plots the percentage runs in different run ranges
#
###########################################################################################
def batsmanRunsRanges(file, name= "A Hookshot") :
'''
Compute and plot a histogram of the runs scored in ranges of 10
Description
Compute and plot a histogram of the runs scored in ranges of 10
Usage
batsmanRunsRanges(file, name="A Hookshot")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanDismissals, batsmanMovingAverage, batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
#tendulkar= getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
batsmanRunsRanges(pathToFile,"<NAME>")
'''
# Clean file
batsman = clean(file)
runs= pd.to_numeric(batsman['Runs'])
hist, bins = np.histogram(runs, bins = 20)
midBin=[]
# Loop through
for i in range(1,len(bins)):
# Find the mean of the bins (Runs)
midBin.append(np.mean([bins[i-1],bins[i]]))
# Compute binWidth. Subtract '2' to separate the bars
binWidth=bins[1]-bins[0]-2
# Plot a barplot
plt.bar(midBin, hist, bins[1]-bins[0]-2, color="blue")
plt.xlabel('Run ranges')
plt.ylabel('Frequency')
# Add a title
atitle= name + '-' + "Runs % vs Run frequencies"
plt.title(atitle)
plt.text(180, 70,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.linear_model import LinearRegression
import numpy as np
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 20 Oct 2018
# Function: battingPerf3d
# This function creates a 3D scatter plot of Runs scored vs Balls Faced and Minutes in crease.
# A regression plane is fitted to this.
#
###########################################################################################
def battingPerf3d(file, name="A Hookshot") :
'''
Make a 3D scatter plot of the Runs scored versus the Balls Faced and Minutes at Crease.
Description
Make a 3D plot of the Runs scored by batsman vs Minutes in crease and Balls faced. Fit a linear regression plane
Usage
battingPerf3d(file, name="A Hookshot")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanDismissals, batsmanMeanStrikeRate, batsmanMovingAverage, batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
# tendulkar<- getPlayerData(35320,file="tendulkar.csv",type="batting",
#homeOrAway=[1,2],result=[1,2,4])
battingPerf3d(pathToFile,"Sachin Tendulkar")
'''
# Set figure size
rcParams['figure.figsize'] = 10,6
# Clean the batsman file and create a complete data frame
batsman = clean(file)
# Make a 3 D plot and fit a regression plane
atitle = name + "- Runs vs BallsFaced & Minutes"
df2=batsman[['BF','Mins','Runs']]
df2['BF']=pd.to_numeric(df2['BF'])
df2['Mins']=pd.to_numeric(df2['Mins'])
df2['Runs']=pd.to_numeric(df2['Runs'])
X=df2.iloc[:,0:2]
Y=df2.iloc[:,2]
# Fit a Regression place
linreg = LinearRegression().fit(X,Y)
bf= np.linspace(0,400,20)
mins=np.linspace(0,620,20)
xx, yy = np.meshgrid(bf,mins)
xx1=xx.reshape(-1)
yy1=yy.reshape(-1)
test=pd.DataFrame({"BallsFaced": xx1, "Minutes":yy1})
predictedRuns=linreg.predict(test).reshape(20,20)
plt3d = plt.figure().gca(projection='3d')
plt3d.scatter(df2['BF'],df2['Mins'],df2['Runs'])
plt3d.plot_surface(xx.reshape(20,20),yy,predictedRuns, alpha=0.2)
plt3d.set_xlabel('BallsFaced')
plt3d.set_ylabel('Minutes')
plt3d.set_zlabel('Runs');
plt.title(atitle)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: bowlerAvgWktsGround
# This function plots the average runs scored by batsman at the ground. The xlabels indicate
# the number of innings at ground
# To do - Append number of matches to Ground
###########################################################################################
def bowlerAvgWktsGround(file, name="A Chinaman"):
'''
This function computes and plot the average wickets in different ground
Description
This function computes the average wickets taken against different grounds by the bowler. It also shows the number innings at each venue
Usage
bowlerAvgWktsGround(file, name = "A Chinaman")
Arguments
file
This is the <bowler>.csv file obtained with an initial getPlayerData()
name
Name of the bowler
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
bowlerWktsFreqPercent relativeBowlingER relativeBowlingPerf
Examples
# Get or use the <bowler>.csv obtained with getPlayerData()
# a <- getPlayerData(30176,file="kumble.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
bowlerAvgWktsGround(pathToFile,"<NAME>")
'''
bowler = cleanBowlerData(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
bowler['Wkts']=pd.to_numeric(bowler['Wkts'])
# Aggregate as sum, mean and count
df=bowler[['Wkts','Ground']].groupby('Ground').agg(['sum','mean','count'])
#Flatten multi-levels to column names
df.columns= ['_'.join(col).strip() for col in df.columns.values]
# Reset index
df1=df.reset_index(inplace=False)
atitle = name + "-" + "'s Average Wickets at Ground"
plt.xticks(rotation='vertical')
plt.axhline(y=4, color='r', linestyle=':')
plt.title(atitle)
ax=sns.barplot(x='Ground', y="Wkts_mean", data=df1)
#plt.bar(df1['Ground'],df1['Wkts_mean'])
plt.text(15, 4,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: bowlerAvgWktsOpposition
# This function plots the average runs scored by batsman at the ground. The xlabels indicate
# the number of innings at ground
# To do - Append no of matches in Opposition
###########################################################################################
def bowlerAvgWktsOpposition(file, name="A Chinaman"):
'''
This function computes and plot the average wickets against different oppositon
Description
This function computes the average wickets taken against different opposition by the bowler. It also shows the number innings against each opposition
Usage
bowlerAvgWktsOpposition(file, name = "A Chinaman")
Arguments
file
This is the <bowler>.csv file obtained with an initial getPlayerData()
name
Name of the bowler
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
bowlerWktsFreqPercent relativeBowlingER relativeBowlingPerf bowlerAvgWktsGround
Examples
# Get or use the <bowler>.csv obtained with getPlayerData()
# a <- getPlayerData(30176,file="kumble.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
bowlerAvgWktsOpposition(pathToFile,"<NAME>")
'''
bowler = cleanBowlerData(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
bowler['Wkts']=pd.to_numeric(bowler['Wkts'])
# Aggregate as sum, mean and count
df=bowler[['Opposition','Wkts']].groupby('Opposition').agg(['sum','mean','count'])
#Flatten multi-levels to column names
df.columns= ['_'.join(col).strip() for col in df.columns.values]
# Reset index
df1=df.reset_index(inplace=False)
atitle = name + "-" + "'s Average Wickets vs Opposition"
plt.xticks(rotation='vertical')
plt.axhline(y=3, color='r', linestyle=':')
ax=sns.barplot(x='Opposition', y="Wkts_mean", data=df1)
plt.title(atitle)
plt.text(2, 3,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: bowlerContributionWonLost
# This plots the bowler's contribution to won and lost matches
#
###########################################################################################
def bowlerContributionWonLost(file,name="A Doosra"):
'''
Display the bowler's contribution in matches that were won and those that were lost
Description
Plot the comparative contribution of the bowler in matches that were won and lost as box plots
Usage
bowlerContributionWonLost(file, name = "A Doosra")
Arguments
file
CSV file of bowler from ESPN Cricinfo obtained with getPlayerDataSp()
name
Name of the bowler
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
bowlerMovingAverage bowlerPerfForecast checkBowlerInForm
Examples
# Get or use the <bowler>.csv obtained with getPlayerDataSp()
#kumbleSp <-getPlayerDataSp(30176,".","kumblesp.csv","bowling")
bowlerContributionWonLost(pathToFile,"<NAME>")
'''
playersp = cleanBowlerData(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
# Create DFs for won and lost/drawn
won = playersp[playersp['result'] == 1]
lost = playersp[(playersp['result']==2) | (playersp['result']==4)]
won['status']="won"
lost['status']="lost"
# Stack DFs
df= pd.concat([won,lost])
df['Wkts']= pd.to_numeric(df['Wkts'])
ax = sns.boxplot(x='status',y='Wkts',data=df)
atitle = name + "-" + "- Wickets in games won/lost-drawn"
plt.xlabel('Status')
plt.ylabel('Wickets')
plt.title(atitle)
plt.text(0.5, 200,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: bowlerCumulativeAvgEconRate
# This function computes and plots the cumulative average economy rate of a bowler
#
###########################################################################################
def bowlerCumulativeAvgEconRate(file,name="A Googly"):
'''
Bowler's cumulative average economy rate
Description
This function computes and plots the cumulative average economy rate of a bowler
Usage
bowlerCumulativeAvgEconRate(file,name)
Arguments
file
Data frame
name
Name of batsman
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanCumulativeAverageRuns bowlerCumulativeAvgWickets batsmanCumulativeStrikeRate
Examples
bowlerCumulativeAvgEconRate(pathToFile,"<NAME>")
'''
bowler=cleanBowlerData(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
economyRate= | pd.to_numeric(bowler['Econ']) | pandas.to_numeric |
# Copyright 2019 Verily Life Sciences LLC
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import datetime
import unittest
from re import escape
from typing import Any, List, Optional, Sequence, Tuple, Union, cast # noqa: F401
import numpy as np
import pandas as pd
import six
from ddt import data, ddt, unpack
from purplequery.binary_expression import BinaryExpression
from purplequery.bq_abstract_syntax_tree import (EMPTY_CONTEXT, EMPTY_NODE, # noqa: F401
AbstractSyntaxTreeNode, EvaluatableNode,
EvaluationContext, Field, GroupedBy, _EmptyNode)
from purplequery.bq_types import (BQArray, BQScalarType, BQStructType, BQType, # noqa: F401
PythonType, TypedDataFrame, TypedSeries)
from purplequery.dataframe_node import QueryExpression, Select, TableReference
from purplequery.evaluatable_node import LiteralType # noqa: F401
from purplequery.evaluatable_node import (Case, Cast, Exists, Extract, FunctionCall, If, InCheck,
Not, NullCheck, Selector, UnaryNegation, Value)
from purplequery.grammar import select as select_rule
from purplequery.grammar import query_expression
from purplequery.query_helper import apply_rule
from purplequery.storage import DatasetTableContext
from purplequery.tokenizer import tokenize
@ddt
class EvaluatableNodeTest(unittest.TestCase):
def setUp(self):
# type: () -> None
self.small_table_context = DatasetTableContext({
'my_project': {
'my_dataset': {
'my_table': TypedDataFrame(
pd.DataFrame([[1], [2]], columns=['a']),
types=[BQScalarType.INTEGER]
)
}
}
})
self.large_table_context = DatasetTableContext({
'my_project': {
'my_dataset': {
'my_table': TypedDataFrame(
pd.DataFrame([[1, 2, 3], [1, 4, 3]], columns=['a', 'b', 'c']),
types=[BQScalarType.INTEGER, BQScalarType.INTEGER, BQScalarType.INTEGER]
)
}
}
})
def test_selector(self):
# type: () -> None
selector = Selector(Field(('a',)), 'field_alias')
context = EvaluationContext(self.small_table_context)
context.add_table_from_node(TableReference(('my_project', 'my_dataset', 'my_table')),
EMPTY_NODE)
typed_series = selector.evaluate(context)
assert isinstance(typed_series, TypedSeries)
self.assertEqual(list(typed_series.series), [1, 2])
self.assertEqual(list(typed_series.dataframe), ['field_alias'])
self.assertEqual(typed_series.types, [BQScalarType.INTEGER])
def test_selector_group_by_success(self):
# type: () -> None
selector = Selector(Field(('c',)), EMPTY_NODE)
selector.position = 1
context = EvaluationContext(self.large_table_context)
context.add_table_from_node(TableReference(('my_project', 'my_dataset', 'my_table')),
EMPTY_NODE)
context.exclude_aggregation = True
updated_selector, = context.do_group_by([selector], [Field(('my_table', 'c'))])
typed_series = updated_selector.evaluate(context)
assert isinstance(typed_series, TypedSeries)
self.assertEqual(list(typed_series.series), [3])
@data((5, BQScalarType.INTEGER),
(1.23, BQScalarType.FLOAT),
("something", BQScalarType.STRING),
(True, BQScalarType.BOOLEAN),
(None, None))
@unpack
def test_value_repr(self, value, type_):
# type: (Optional[LiteralType], Optional[BQScalarType]) -> None
'''Check Value's string representation'''
node = Value(value, type_)
representation = 'Value(type_={}, value={})'.format(type_.__repr__(), value.__repr__())
self.assertEqual(node.__repr__(), representation)
@data((5, None),
(None, BQScalarType.INTEGER))
@unpack
def test_invalid_value(self, value, type_):
# type: (Optional[LiteralType], Optional[BQScalarType]) -> None
'''Check that None is only allowed as both value and type_ or neither.'''
with self.assertRaises(ValueError):
Value(value, type_)
def test_value_eval(self):
# type: () -> None
# A constant is repeated for each row in the context table.
value = Value(12345, BQScalarType.INTEGER)
context = EvaluationContext(self.small_table_context)
context.add_table_from_node(TableReference(('my_project', 'my_dataset', 'my_table')), 'foo')
typed_series = value.evaluate(context)
assert isinstance(typed_series, TypedSeries)
self.assertEqual(list(typed_series.series), [12345, 12345])
def test_field(self):
# type: () -> None
field = Field(('a',))
context = EvaluationContext(self.small_table_context)
context.add_table_from_node(TableReference(('my_project', 'my_dataset', 'my_table')),
EMPTY_NODE)
typed_series = field.evaluate(context)
assert isinstance(typed_series, TypedSeries)
self.assertEqual(list(typed_series.series), [1, 2])
self.assertEqual(typed_series.series.name, 'a')
@data(
dict(function_name='sum', args=[Field(('a',))], expected_result=[3],
is_aggregating=True),
dict(function_name='max', args=[Field(('a',))], expected_result=[2],
is_aggregating=True),
dict(function_name='min', args=[Field(('a',))], expected_result=[1],
is_aggregating=True),
dict(function_name='concat',
args=[Value('foo', BQScalarType.STRING), Value('bar', BQScalarType.STRING)],
expected_result=['foobar'] * 2), # two copies to match length of context table.
dict(function_name='mod',
args=[Field(('a',)), Value(2, BQScalarType.INTEGER)],
expected_result=[1, 0]),
dict(function_name='mod',
args=[Value(1.0, BQScalarType.FLOAT), Value(2, BQScalarType.INTEGER)],
expected_result=[1.0, 1.0]),
dict(function_name='timestamp',
args=[Value("2019-04-22", BQScalarType.STRING)],
expected_result=[datetime.datetime(2019, 4, 22)] * 2), # two copies to match table len
)
@unpack
def test_functions(self, function_name, args, expected_result, is_aggregating=False):
# type: (str, List[EvaluatableNode], List[PythonType], bool) -> None
context = EvaluationContext(self.small_table_context)
context.add_table_from_node(TableReference(('my_project', 'my_dataset', 'my_table')),
EMPTY_NODE)
if is_aggregating:
context.do_group_by((), [])
result = FunctionCall.create(function_name, args, EMPTY_NODE).evaluate(context)
assert isinstance(result, TypedSeries)
self.assertEqual(
[result.type_.convert(elt) for elt in result.series],
expected_result)
def test_current_timestamp(self):
# type: () -> None
node, leftover = apply_rule(query_expression, tokenize(
'select current_timestamp(), a from unnest([struct(1 as a), struct(2), struct(3)])'))
assert isinstance(node, QueryExpression)
self.assertFalse(leftover)
result, _ = node.get_dataframe(DatasetTableContext({}))
table = cast(List[List[datetime.datetime]], result.to_list_of_lists())
self.assertEqual(len(table), 3)
# CURRENT_TIMESTAMP() returns a very recent timestamp
self.assertLess((datetime.datetime.now() - table[0][0]).seconds, 2)
# All rows have the same timestamp value.
self.assertEqual(table[0][0], table[1][0])
self.assertEqual(table[0][0], table[2][0])
@data(
# These expressions are ones whose EvaluatableNode subclass constructs a
# new pandas Series rather than computing on existing ones. See below:
# this runs the risk of constructing it with an incorrect index.
dict(query='select 10, c', expected_result=[[10, 6], [10, 9]]),
dict(query='select [a, b], c', expected_result=[[(4, 5), 6], [(7, 8), 9]]),
dict(query='select (a, b), c', expected_result=[[(4, 5), 6], [(7, 8), 9]]),
dict(query='select exists(select 1), c', expected_result=[[True, 6], [True, 9]]),
dict(query='select a in (1, 4), c', expected_result=[[True, 6], [False, 9]]),
dict(query='select row_number() over (), c', expected_result=[[1, 6], [2, 9]]),
dict(query='select current_timestamp() > timestamp("2019-01-01"), c',
expected_result=[[True, 6], [True, 9]]),
)
@unpack
def test_constructed_column_has_correct_index(self, query, expected_result):
# type: (str, List[List[int]]) -> None
'''Checks that manually constructed columns have the same index as the data.
A manually constructed column will usually have an index 0, 1, 2, ...
(e.g. pd.Series(['a', 'b', 'c']) has index 0, 1, 2).
The data may not; filtering, sorting or other changes might result in an index of
different numbers. If one column's index doesn't match the index of other columns,
it can't be compared or joined with them properly.
'''
table_context = DatasetTableContext(
{'my_project': {'my_dataset': {'my_table': TypedDataFrame(
pd.DataFrame([[1, 2, -1], [4, 5, 6], [7, 8, 9]], columns=['a', 'b', 'c']),
types=[BQScalarType.INTEGER, BQScalarType.INTEGER, BQScalarType.INTEGER])}}})
# Skip the first row of the table, so that the index of the table that
# the test queries operate on is [1, 2]; this makes sure that the index is
# different from the default index you would get for a two-row column,
# which would be [0, 1], to test that expressions are not incorrectly
# using that default index.
node, leftover = select_rule(tokenize(query + ' from (select * from my_table where c > 0)'))
assert isinstance(node, Select)
result, unused_table_name = node.get_dataframe(table_context)
self.assertFalse(leftover)
self.assertEqual(result.to_list_of_lists(), expected_result)
self.assertEqual(list(result.dataframe.index), [1, 2])
def test_bad_function(self):
# type: () -> None
context = EvaluationContext(self.small_table_context)
context.add_table_from_node(TableReference(('my_project', 'my_dataset', 'my_table')),
EMPTY_NODE)
with self.assertRaisesRegexp(NotImplementedError, 'NOT_A_FUNCTION not implemented'):
FunctionCall.create('not_a_function', [], EMPTY_NODE).evaluate(context)
@data(
# Explore each aggregate function, along with a non-aggregate function to make sure we
# can compute both at once.
dict(selectors='sum(a), b+10', expected_result=[[6, 11], [5, 12]]),
dict(selectors='sum(a), 20+10', expected_result=[[6, 30], [5, 30]]),
dict(selectors='sum(a+1), b+10', expected_result=[[8, 11], [6, 12]]),
dict(selectors='max(a), b+10', expected_result=[[4, 11], [5, 12]]),
dict(selectors='min(a), b+10', expected_result=[[2, 11], [5, 12]]),
dict(selectors='count(a), b+10', expected_result=[[2, 11], [1, 12]]),
dict(selectors='count(*), b+10', expected_result=[[2, 11], [2, 12]]),
dict(selectors='array_agg(a), []', expected_result=[[(2, 4), ()], [(5, None), ()]]),
dict(selectors='array_agg(a), [b]', expected_result=[[(2, 4), (1,)], [(5, None), (2,)]]),
dict(selectors='array_agg(a), [7, 8]', expected_result=[[(2, 4), (7, 8)],
[(5, None), (7, 8)]]),
dict(selectors='array_agg(a), b+10', expected_result=[[(2, 4), 11], [(5, None), 12]]),
)
@unpack
def test_aggregate_functions_in_group_by(self, selectors, expected_result):
# type: (str, List[List[int]]) -> None
table_context = DatasetTableContext(
{'my_project': {'my_dataset': {'my_table': TypedDataFrame(
pd.DataFrame([[2, 1], [4, 1], [5, 2], [np.nan, 2]], columns=['a', 'b']),
types=[BQScalarType.INTEGER, BQScalarType.INTEGER])}}})
tokens = tokenize('select {} from my_table group by b'.format(selectors))
node, leftover = select_rule(tokens)
assert isinstance(node, Select)
result, unused_table_name = node.get_dataframe(table_context)
self.assertFalse(leftover)
self.assertEqual(result.to_list_of_lists(), expected_result)
@data(
dict(query='select sum(a + 1) + 2, count(*) + 3, 4 from my_table',
expected_result=[[11, 6, 4]]),
)
@unpack
def test_aggregate_functions_in_expressions(self, query, expected_result):
# type: (str, List[List[int]]) -> None
table_context = DatasetTableContext(
{'my_project': {'my_dataset': {'my_table': TypedDataFrame(
pd.DataFrame([[1], [2], [3]], columns=['a']),
types=[BQScalarType.INTEGER])}}})
node, leftover = select_rule(tokenize(query))
assert isinstance(node, Select)
result, unused_table_name = node.get_dataframe(table_context)
self.assertFalse(leftover)
self.assertEqual(result.to_list_of_lists(), expected_result)
@data(
# Test all variations of creating a struct (typed, typeless, tuple),
# with and without named fields, with one field, and then with two
# fields.
dict(query='SELECT STRUCT<INTEGER>(1)',
expected_result=(1,),
expected_type=BQStructType([None], [BQScalarType.INTEGER])),
dict(query='SELECT STRUCT<a INTEGER>(1)',
expected_result=(1,),
expected_type=BQStructType(['a'], [BQScalarType.INTEGER])),
dict(query='SELECT STRUCT(1 AS a)',
expected_result=(1,),
expected_type=BQStructType(['a'], [BQScalarType.INTEGER])),
dict(query='SELECT STRUCT(1)',
expected_result=(1,),
expected_type=BQStructType([None], [BQScalarType.INTEGER])),
# Note: no test of single-element tuple syntax, as that would just be a
# parenthesized expression, there's no analogue to Python's trailing comma.
dict(query='SELECT STRUCT<INTEGER, STRING>(1, "a")',
expected_result=(1, 'a'),
expected_type=BQStructType([None, None], [BQScalarType.INTEGER, BQScalarType.STRING])),
dict(query='SELECT STRUCT<a INTEGER, STRING>(1, "a")',
expected_result=(1, 'a'),
expected_type=BQStructType(['a', None], [BQScalarType.INTEGER, BQScalarType.STRING])),
dict(query='SELECT STRUCT<INTEGER, b STRING>(1, "a")',
expected_result=(1, 'a'),
expected_type=BQStructType([None, 'b'], [BQScalarType.INTEGER, BQScalarType.STRING])),
dict(query='SELECT STRUCT<a INTEGER, b STRING>(1, "a")',
expected_result=(1, 'a'),
expected_type=BQStructType(['a', 'b'], [BQScalarType.INTEGER, BQScalarType.STRING])),
dict(query='SELECT STRUCT(1 AS a, "a" as b)',
expected_result=(1, 'a'),
expected_type=BQStructType(['a', 'b'], [BQScalarType.INTEGER, BQScalarType.STRING])),
dict(query='SELECT STRUCT(1, "a" as b)',
expected_result=(1, 'a'),
expected_type=BQStructType([None, 'b'], [BQScalarType.INTEGER, BQScalarType.STRING])),
dict(query='SELECT STRUCT(1 AS a, "a")',
expected_result=(1, 'a'),
expected_type=BQStructType(['a', None], [BQScalarType.INTEGER, BQScalarType.STRING])),
dict(query='SELECT STRUCT(1, "a")',
expected_result=(1, 'a'),
expected_type=BQStructType([None, None], [BQScalarType.INTEGER, BQScalarType.STRING])),
dict(query='SELECT (1, "a")',
expected_result=(1, 'a'),
expected_type=BQStructType([None, None], [BQScalarType.INTEGER, BQScalarType.STRING])),
)
@unpack
def test_struct_constant_expressions(self, query, expected_result, expected_type):
# type: (str, Tuple[Optional[int], ...], BQStructType) -> None
table_context = DatasetTableContext({})
node, leftover = select_rule(tokenize(query))
self.assertFalse(leftover)
assert isinstance(node, Select)
result, unused_table_name = node.get_dataframe(table_context)
self.assertEqual(result.to_list_of_lists(), [[expected_result]])
self.assertEqual(result.types, [expected_type])
@data(
# Test all three struct syntaxes, selecting a column as one field, a
# constant as the other.
dict(query='SELECT (a, "a") FROM my_table',
expected_result=[[(1, 'a')], [(2, 'a')]],
expected_types=[
BQStructType([None, None], [BQScalarType.INTEGER, BQScalarType.STRING])]),
dict(query='SELECT STRUCT(a as x, "a" as y) FROM my_table',
expected_result=[[(1, 'a')], [(2, 'a')]],
expected_types=[
BQStructType(['x', 'y'], [BQScalarType.INTEGER, BQScalarType.STRING])]),
dict(query='SELECT STRUCT<x INTEGER, y STRING>(a, "a") FROM my_table',
expected_result=[[(1, 'a')], [(2, 'a')]],
expected_types=[
BQStructType(['x', 'y'], [BQScalarType.INTEGER, BQScalarType.STRING])]),
)
@unpack
def test_struct_field_and_constant(self, query, expected_result, expected_types):
# type: (str, List[List[Tuple[Optional[int], ...]]], Sequence[BQStructType]) -> None
node, leftover = select_rule(tokenize(query))
self.assertFalse(leftover)
assert isinstance(node, Select)
result, unused_table_name = node.get_dataframe(self.small_table_context)
self.assertEqual(result.to_list_of_lists(), expected_result)
self.assertEqual(result.types, expected_types)
@data(
# Test combination types of arrays and structs.
dict(query='SELECT ([1], "a")',
expected_result=((1,), 'a'),
expected_type=BQStructType([None, None], [BQArray(BQScalarType.INTEGER),
BQScalarType.STRING])),
dict(query='SELECT STRUCT<x ARRAY<INTEGER>, y STRING>(ARRAY<INTEGER>[1], "a")',
expected_result=((1,), 'a'),
expected_type=BQStructType(['x', 'y'], [BQArray(BQScalarType.INTEGER),
BQScalarType.STRING])),
dict(query='SELECT [(1, "a")]',
expected_result=((1, 'a'), ),
expected_type=BQArray(BQStructType([None, None], [BQScalarType.INTEGER,
BQScalarType.STRING]))),
dict(query='SELECT [STRUCT<a INTEGER, b STRING>(1, "a"), (2, "b")]',
expected_result=((1, 'a'), (2, 'b')),
expected_type=BQArray(BQStructType(['a', 'b'], [BQScalarType.INTEGER,
BQScalarType.STRING]))),
# Test that an array of structs merges and coerces the types of the
# structs.
dict(query='SELECT [STRUCT<a FLOAT, STRING>(1.0, "a"), STRUCT<INTEGER, b STRING>(2, "b")]',
expected_result=((1.0, 'a'), (2.0, 'b')),
expected_type=BQArray(BQStructType(['a', 'b'], [BQScalarType.FLOAT,
BQScalarType.STRING]))),
dict(query='SELECT [STRUCT<a INTEGER, b ARRAY<STRING> >(1, ["a"]), (2, ["b", "c"])]',
expected_result=((1, ('a',)), (2, ('b', 'c'))),
expected_type=BQArray(BQStructType(['a', 'b'], [BQScalarType.INTEGER,
BQArray(BQScalarType.STRING)]))),
)
@unpack
def test_complex_types(self, query, expected_result, expected_type):
# type: (str, Tuple[Optional[int], ...], BQType) -> None
table_context = DatasetTableContext({})
node, leftover = select_rule(tokenize(query))
self.assertFalse(leftover)
assert isinstance(node, Select)
result, unused_table_name = node.get_dataframe(table_context)
self.assertEqual(result.to_list_of_lists(), [[expected_result]])
self.assertEqual(result.types, [expected_type])
@data(
dict(query='SELECT ARRAY_AGG(a)',
expected_result=(1, 1, 2, None)),
dict(query='SELECT ARRAY_AGG(a RESPECT NULLS)',
expected_result=(1, 1, 2, None)),
dict(query='SELECT ARRAY_AGG(DISTINCT a)',
expected_result=(1, 2, None)),
dict(query='SELECT ARRAY_AGG(DISTINCT a RESPECT NULLS)',
expected_result=(1, 2, None)),
dict(query='SELECT ARRAY_AGG(a IGNORE NULLS)',
expected_result=(1, 1, 2)),
dict(query='SELECT ARRAY_AGG(DISTINCT a IGNORE NULLS)',
expected_result=(1, 2)),
)
@unpack
def test_array_agg_arguments(self, query, expected_result):
# type: (str, Tuple[Optional[int], ...]) -> None
table_context = DatasetTableContext(
{'p': {'d': {'t':
TypedDataFrame(pd.DataFrame([[1], [1], [2], [None]], columns=['a']),
types=[BQScalarType.INTEGER])}}})
node, leftover = select_rule(tokenize(query + ' FROM p.d.t'))
self.assertFalse(leftover)
assert isinstance(node, Select)
result, unused_table_name = node.get_dataframe(table_context)
self.assertEqual(result.to_list_of_lists(), [[expected_result]])
@data(
dict(query='SELECT [1,2,"a"]',
error='Cannot implicitly coerce the given types'),
dict(query='SELECT STRUCT<INT64>(3.7)',
error='Struct field 1 has type .*FLOAT which does not coerce to .*INTEGER'),
dict(query='SELECT ARRAY<INT64>[3.7]',
error='Array specifies type .*INTEGER, incompatible with values of type .*FLOAT'),
dict(query='SELECT ARRAY<INT64>[1,2,"a"]',
error='Cannot implicitly coerce the given types'),
dict(query='SELECT ARRAY<string>[1,2]',
error='Cannot implicitly coerce the given types'),
dict(query='SELECT [[1]]',
error='Cannot create arrays of arrays'),
dict(query='SELECT [(1, 2), (3, 4, 5)]',
error='Cannot merge .* number of fields varies'),
dict(query='SELECT [STRUCT(1 as a, 2 as b), STRUCT(3 as x, 4 as b)]',
error='Cannot merge Structs; field names .* do not match'),
# same types in different orders can't merge.
dict(query='SELECT [(1, "a"), ("b", 2)]',
error='Cannot implicitly coerce the given types'),
# same names in different orders can't merge
dict(query='SELECT [STRUCT(1 as a, 2 as b), STRUCT(3 as b, 4 as a)]',
error='Cannot merge Structs; field names .* do not match'),
)
@unpack
def test_complex_type_errors(self, query, error):
# type: (str, str) -> None
node, leftover = select_rule(tokenize(query))
self.assertFalse(leftover)
assert isinstance(node, Select)
with self.assertRaisesRegexp(ValueError, error):
node.get_dataframe(self.small_table_context)
@data(
# Row number over whole dataset; order is not guaranteed
dict(selectors='row_number() over ()', expected_result=[[1], [2], [3], [4]]),
dict(selectors='row_number() over (order by a), a',
expected_result=[[1, 10], [2, 20], [3, 30], [4, 30]]),
dict(selectors='row_number() over (order by a asc), a',
expected_result=[[1, 10], [2, 20], [3, 30], [4, 30]]),
dict(selectors='row_number() over (order by a desc), a',
expected_result=[[4, 10], [3, 20], [2, 30], [1, 30]]),
dict(selectors='row_number() over (partition by b order by a), a',
expected_result=[[1, 10], [2, 20], [1, 30], [2, 30]]),
dict(selectors='sum(a) over (), a',
expected_result=[[90, 10], [90, 20], [90, 30], [90, 30]]),
dict(selectors='sum(a) over (partition by b), a',
expected_result=[[30, 10], [30, 20], [60, 30], [60, 30]]),
dict(selectors='count(*) over (), a',
expected_result=[[4, 10], [4, 20], [4, 30], [4, 30]]),
dict(selectors='count(a) over (), a',
expected_result=[[4, 10], [4, 20], [4, 30], [4, 30]]),
dict(selectors='count(*) over (partition by b), a',
expected_result=[[2, 10], [2, 20], [2, 30], [2, 30]]),
dict(selectors='count(a) over (partition by b), a',
expected_result=[[2, 10], [2, 20], [2, 30], [2, 30]]),
dict(selectors='sum(count(*)) over ()',
expected_result=[[4]]),
)
@unpack
def test_analytic_function(self, selectors, expected_result):
table_context = DatasetTableContext(
{'my_project': {'my_dataset': {'my_table': TypedDataFrame(
pd.DataFrame([[20, 200], [10, 200], [30, 300], [30, 300]], columns=['a', 'b']),
types=[BQScalarType.INTEGER, BQScalarType.INTEGER])}}})
tokens = tokenize('select {} from my_table'.format(selectors))
node, leftover = select_rule(tokens)
result, unused_table_name = node.get_dataframe(table_context)
self.assertFalse(leftover)
# Note: BQ docs say if ORDER BY clause (for the select as a whole) is not present, order of
# results is undefined, so we do not assert on the order.
six.assertCountEqual(self, result.to_list_of_lists(), expected_result)
@data(
dict(selectors='sum(count(*)) over (), count(*)',
expected_result=[[5, 2], [5, 3]]),
)
@unpack
def test_analytic_function_with_group_by(self, selectors, expected_result):
table_context = DatasetTableContext(
{'my_project': {'my_dataset': {'my_table': TypedDataFrame(
pd.DataFrame([[20, 2], [10, 2], [30, 3], [31, 3], [32, 3]], columns=['a', 'b']),
types=[BQScalarType.INTEGER, BQScalarType.INTEGER])}}})
tokens = tokenize('select {} from my_table group by b'.format(selectors))
node, leftover = select_rule(tokens)
result, unused_table_name = node.get_dataframe(table_context)
self.assertFalse(leftover)
# Note: BQ docs say if ORDER BY clause (for the select as a whole) is not present, order of
# results is undefined, so we do not assert on the order.
six.assertCountEqual(self, result.to_list_of_lists(), expected_result)
def test_non_aggregate_function_in_group_by(self):
table_context = DatasetTableContext(
{'my_project': {'my_dataset': {'my_table': TypedDataFrame(
pd.DataFrame([['one', '1'], ['two', '1'], ['three', '2'], ['four', '2']],
columns=['a', 'b']),
types=[BQScalarType.STRING, BQScalarType.INTEGER])}}})
tokens = tokenize('select max(concat(b, "hi")) from my_table group by b')
node, leftover = select_rule(tokens)
self.assertFalse(leftover)
result, unused_table_name = node.get_dataframe(table_context)
self.assertEqual(result.to_list_of_lists(), [['1hi'], ['2hi']])
@data(
dict(count='COUNT(*)', expected_result=[[2]]),
dict(count='COUNT(c)', expected_result=[[2]]),
dict(count='COUNT(DISTINCT c)', expected_result=[[1]]),
dict(count='COUNT(b)', expected_result=[[2]]),
dict(count='COUNT(DISTINCT b)', expected_result=[[2]]),
dict(count='COUNT(a)', expected_result=[[1]]),
)
@unpack
def test_count(self, count, expected_result):
# type: (str, List[List[int]]) -> None
count_table_context = DatasetTableContext({
'my_project': {
'my_dataset': {
'my_table': TypedDataFrame(
pd.DataFrame([[1, 2, 3], [None, 4, 3]], columns=['a', 'b', 'c']),
types=[BQScalarType.INTEGER, BQScalarType.INTEGER, BQScalarType.INTEGER]
)
}
}
})
select, leftover = select_rule(tokenize('SELECT {} FROM my_table'.format(count)))
self.assertFalse(leftover)
assert isinstance(select, Select)
dataframe, unused_table_name = select.get_dataframe(count_table_context)
self.assertEqual(dataframe.to_list_of_lists(), expected_result)
@data(('IS_NULL', [True, False]), ('IS_NOT_NULL', [False, True]))
@unpack
def test_null_check(self, direction, result):
# type: (str, List[bool]) -> None
table_context = DatasetTableContext({
'my_project': {
'my_dataset': {
'my_table': TypedDataFrame(
pd.DataFrame([[1, None], [2, 3]], columns=['a', 'b']),
types=[BQScalarType.INTEGER, BQScalarType.INTEGER]
)
}
}
})
context = EvaluationContext(table_context)
context.add_table_from_node(TableReference(('my_project', 'my_dataset', 'my_table')),
EMPTY_NODE)
expression = Field(('b',))
null_check = NullCheck(expression, direction)
typed_series = null_check.evaluate(context)
assert isinstance(typed_series, TypedSeries)
self.assertEqual(list(typed_series.series), result)
@data(('IN', [True, False]), ('NOT_IN', [False, True]))
@unpack
def test_in_check(self, direction, result):
# type: (str, List[bool]) -> None
expression = Field(('a',))
elements = (Value(1, type_=BQScalarType.INTEGER), Value(3, type_=BQScalarType.INTEGER))
in_check = InCheck(expression, direction, elements)
context = EvaluationContext(self.small_table_context)
context.add_table_from_node(TableReference(('my_project', 'my_dataset', 'my_table')),
EMPTY_NODE)
typed_series = in_check.evaluate(context)
assert isinstance(typed_series, TypedSeries)
self.assertEqual(list(typed_series.series), result)
@data(
(True, 0),
(False, 1)
)
@unpack
def test_if_empty_context(self, condition_bool, result):
# type: (bool, int) -> None
condition = Value(condition_bool, BQScalarType.BOOLEAN)
then = Value(0, BQScalarType.INTEGER)
else_ = Value(1, BQScalarType.INTEGER)
# IF [condition] THEN 0 ELSE 1
if_expression = If(condition, then, else_)
typed_series = if_expression.evaluate(EMPTY_CONTEXT)
assert isinstance(typed_series, TypedSeries)
self.assertEqual(list(typed_series.series), [result])
def test_if(self):
condition = BinaryExpression(Field(('a',)), '>', Value(1, BQScalarType.INTEGER))
then = Value('yes', BQScalarType.STRING)
else_ = Value('no', BQScalarType.STRING)
# IF a > 1 THEN "yes" ELSE "no"
if_expression = If(condition, then, else_)
context = EvaluationContext(self.small_table_context)
context.add_table_from_node(TableReference(('my_project', 'my_dataset', 'my_table')),
EMPTY_NODE)
typed_series = if_expression.evaluate(context)
assert isinstance(typed_series, TypedSeries)
self.assertEqual(list(typed_series.series), ['no', 'yes'])
def test_if_different_types(self):
condition = Value(True, BQScalarType.BOOLEAN)
then = Value('yes', BQScalarType.STRING)
else_ = Value(1, BQScalarType.INTEGER)
if_expression = If(condition, then, else_)
error = (r"Cannot implicitly coerce the given types: "
r"\(BQScalarType.STRING, BQScalarType.INTEGER\)")
with self.assertRaisesRegexp(ValueError, error):
if_expression.evaluate(EMPTY_CONTEXT)
def test_if_error(self):
condition = Value(5, BQScalarType.INTEGER)
then = Value(0, BQScalarType.INTEGER)
else_ = Value(1, BQScalarType.INTEGER)
if_expression = If(condition, then, else_)
error = escape("IF condition isn't boolean! Found: {}".format(
str(condition.evaluate(EMPTY_CONTEXT))))
with self.assertRaisesRegexp(ValueError, error):
if_expression.evaluate(EMPTY_CONTEXT)
def test_not(self):
expression = Value(True, BQScalarType.BOOLEAN)
not_expression = Not(expression)
typed_series = not_expression.evaluate(EMPTY_CONTEXT)
assert isinstance(typed_series, TypedSeries)
self.assertEqual(list(typed_series.series), [False])
def test_not_type_error(self):
expression = Value(5, BQScalarType.INTEGER)
not_expression = Not(expression)
with self.assertRaisesRegexp(ValueError, ""):
not_expression.evaluate(EMPTY_CONTEXT)
@data(
(1, BQScalarType.INTEGER, -1),
(1.0, BQScalarType.FLOAT, -1.0),
)
@unpack
def test_unary_negation(self, initial_value, value_type, result_value):
# type: (Any, BQScalarType, Any) -> None
expression = Value(initial_value, value_type)
negation = UnaryNegation(expression)
typed_series = negation.evaluate(EMPTY_CONTEXT)
assert isinstance(typed_series, TypedSeries)
self.assertEqual(list(typed_series.series), [result_value])
@data(
("abc", BQScalarType.STRING),
(True, BQScalarType.BOOLEAN),
)
@unpack
def test_unary_negation_error(self, value, value_type):
# type: (Any, BQScalarType) -> None
expression = Value(value, value_type)
negation = UnaryNegation(expression)
error = ("UnaryNegation expression supports only integers and floats, got: {}"
.format(value_type))
with self.assertRaisesRegexp(TypeError, error):
negation.evaluate(EMPTY_CONTEXT)
@data(
dict(
comparand=Field(('a',)),
whens=[(Value(1, BQScalarType.INTEGER), Value("one", BQScalarType.STRING)),
(Value(2, BQScalarType.INTEGER), Value("two", BQScalarType.STRING))],
else_=Value("other", BQScalarType.STRING),
result=["one", "two"]
),
dict(
comparand=Field(('a',)),
whens=[(Value(1, BQScalarType.INTEGER), Value("one", BQScalarType.STRING))],
else_=Value("other", BQScalarType.STRING),
result=["one", "other"]
),
dict(
comparand=EMPTY_NODE,
whens=[(Value(True, BQScalarType.BOOLEAN), Value("yes", BQScalarType.STRING)),
(Value(False, BQScalarType.BOOLEAN), Value("no", BQScalarType.STRING))],
else_=EMPTY_NODE,
result=["yes", "yes"]
),
dict(
comparand=Field(('a',)),
whens=[(Value(1, BQScalarType.INTEGER), Value("one", BQScalarType.STRING))],
else_=EMPTY_NODE,
result=["one", None]
),
)
@unpack
def test_case_with_comparand(
self, comparand, # type: Union[_EmptyNode, EvaluatableNode]
whens, # type: List[Tuple[AbstractSyntaxTreeNode, EvaluatableNode]]
else_, # type: EvaluatableNode
result # type: List[str]
):
# type: (...) -> None
case = Case(comparand, whens, else_)
context = EvaluationContext(self.small_table_context)
context.add_table_from_node(TableReference(('my_project', 'my_dataset', 'my_table')),
EMPTY_NODE)
typed_series = case.evaluate(context)
assert isinstance(typed_series, TypedSeries)
self.assertEqual(list(typed_series.series), result)
def test_case_no_whens(self):
comparand = EMPTY_NODE
whens = []
else_ = EMPTY_NODE
error = "Must provide at least one WHEN for a CASE"
with self.assertRaisesRegexp(ValueError, error):
Case(comparand, whens, else_)
@data(
dict(
comparand=EMPTY_NODE,
whens=[(Value(1, BQScalarType.INTEGER), Value("one", BQScalarType.STRING))],
else_=EMPTY_NODE,
error="CASE condition isn't boolean! Found: {!r}".format(
TypedSeries(pd.Series([1, 1]), BQScalarType.INTEGER))
),
dict(
comparand=Field(('a',)),
whens=[(Value(1, BQScalarType.INTEGER), Value("one", BQScalarType.STRING))],
else_=Value(100, BQScalarType.INTEGER),
error="Cannot implicitly coerce the given types: "
"(BQScalarType.STRING, BQScalarType.INTEGER)"
),
)
@unpack
def test_case_error(self, comparand, # type: Union[_EmptyNode, EvaluatableNode]
whens, # type: List[Tuple[AbstractSyntaxTreeNode, EvaluatableNode]]
else_, # type: EvaluatableNode
error # type: str
):
# type: (...) -> None
case = Case(comparand, whens, else_)
context = EvaluationContext(self.small_table_context)
context.add_table_from_node(TableReference(('my_project', 'my_dataset', 'my_table')),
EMPTY_NODE)
with self.assertRaisesRegexp(ValueError, escape(error)):
case.evaluate(context)
@data(
dict(
value=Value(1, BQScalarType.INTEGER),
cast_type='STRING',
result='1'
),
dict(
value=Value(1, BQScalarType.INTEGER),
cast_type='FLOAT',
result=1.0
),
dict(
value=Value(1, BQScalarType.INTEGER),
cast_type='BOOLEAN',
result=True
),
dict(
value=Value(1.0, BQScalarType.FLOAT),
cast_type='STRING',
result='1.0'
),
dict(
value=Value(1.0, BQScalarType.FLOAT),
cast_type='INTEGER',
result=1
),
dict(
value=Value(True, BQScalarType.BOOLEAN),
cast_type='STRING',
result='True'
),
dict(
value=Value(True, BQScalarType.BOOLEAN),
cast_type='INTEGER',
result=1
),
dict(
value=Value('1', BQScalarType.STRING),
cast_type='INTEGER',
result=1
),
dict(
value=Value('1.0', BQScalarType.STRING),
cast_type='FLOAT',
result=1.0
),
dict(
value=Value('TRUE', BQScalarType.STRING),
cast_type='BOOLEAN',
result=True
),
dict(
value=Value('2019-12-01', BQScalarType.STRING),
cast_type='DATETIME',
result=datetime.datetime(2019, 12, 1),
),
dict(
value=Value('2019-12-01', BQScalarType.STRING),
cast_type='DATE',
result=datetime.date(2019, 12, 1),
),
dict(
value=Value('2019-12-01 01:02:03', BQScalarType.STRING),
cast_type='TIMESTAMP',
result=datetime.datetime(2019, 12, 1, 1, 2, 3),
),
dict(
value=Value(pd.Timestamp('2019-12-01'), BQScalarType.DATE),
cast_type='DATETIME',
result=datetime.datetime(2019, 12, 1),
),
dict(
value=Value(pd.Timestamp('2019-12-01'), BQScalarType.DATE),
cast_type='TIMESTAMP',
result=datetime.datetime(2019, 12, 1),
),
dict(
value=Value(pd.Timestamp('2019-12-01 00:01:02'), BQScalarType.DATETIME),
cast_type='DATE',
result=datetime.date(2019, 12, 1),
),
dict(
value=Value(pd.Timestamp('2019-12-01 00:01:02'), BQScalarType.DATETIME),
cast_type='TIMESTAMP',
result=datetime.datetime(2019, 12, 1, 0, 1, 2),
),
)
@unpack
def test_cast(self, value, cast_type, result):
# type: (Value, str, pd.Timestamp) -> None
cast = Cast(value, cast_type)
series = cast.evaluate(EMPTY_CONTEXT)
assert isinstance(series, TypedSeries)
self.assertEqual(series.to_list(), [result])
@data(
dict(
value=Value("abc", BQScalarType.STRING),
cast_type=BQScalarType.INTEGER,
# TODO: This error message should be about converting to
# int, not float. But bq_types currently defines
# BQScalarType.INTEGER converting to float64.
#
# Python 3 surrounds the expression with quotes, Python 2 doesn't, so the
# regex .? matches the Py3-only quote.
error="could not convert string to float: .?abc"
),
dict(
value=Value("abc", BQScalarType.STRING),
cast_type=BQScalarType.FLOAT,
# Python 3 surrounds the expression with quotes, Python 2 doesn't, so the
# regex .? matches the Py3-only quote.
error="could not convert string to float: .?abc"
),
dict(
value=Value("abc", BQScalarType.STRING),
cast_type=BQScalarType.TIMESTAMP,
error="abc",
),
)
@unpack
def test_cast_error(self, value, cast_type, error):
# type: (Value, BQScalarType, str) -> None
cast = Cast(value, cast_type.value)
with self.assertRaisesRegexp(ValueError, error):
cast.evaluate(EMPTY_CONTEXT)
@data(
("select a from `my_project.my_dataset.my_table` where a=1", [True, True]),
("select a from `my_project.my_dataset.my_table` where a=10", [False, False]),
)
@unpack
def test_exists(self, select_query, result):
# type: (str, List[bool]) -> None
subquery_node, leftover = apply_rule(query_expression, tokenize(select_query))
assert isinstance(subquery_node, QueryExpression)
self.assertFalse(leftover)
exists = Exists(subquery_node)
context = EvaluationContext(self.small_table_context)
context.add_table_from_node(TableReference(('my_project', 'my_dataset', 'my_table')),
EMPTY_NODE)
typed_series = exists.evaluate(context)
assert isinstance(typed_series, TypedSeries)
self.assertEqual(list(typed_series.series), result)
def test_exists_reference_outer(self):
table_context = DatasetTableContext({
'my_project': {
'my_dataset': {
'my_table': TypedDataFrame(
pd.DataFrame([[1], [4]], columns=['a']),
types=[BQScalarType.INTEGER]
),
'my_table2': TypedDataFrame(
| pd.DataFrame([[4], [2]], columns=['b']) | pandas.DataFrame |
# Author: <NAME>
#
# License: BSD 3 clause
import logging
import numpy as np
import pandas as pd
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
from gensim.utils import simple_preprocess
from gensim.parsing.preprocessing import strip_tags
import umap
import hdbscan
from wordcloud import WordCloud
import matplotlib.pyplot as plt
from joblib import dump, load
from sklearn.cluster import dbscan
import tempfile
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.preprocessing import normalize
from scipy.special import softmax
try:
import hnswlib
_HAVE_HNSWLIB = True
except ImportError:
_HAVE_HNSWLIB = False
try:
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_text
_HAVE_TENSORFLOW = True
except ImportError:
_HAVE_TENSORFLOW = False
try:
from sentence_transformers import SentenceTransformer
_HAVE_TORCH = True
except ImportError:
_HAVE_TORCH = False
logger = logging.getLogger('top2vec')
logger.setLevel(logging.WARNING)
sh = logging.StreamHandler()
sh.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
logger.addHandler(sh)
use_models = ["universal-sentence-encoder-multilingual",
"universal-sentence-encoder",
"universal-sentence-encoder-large",
"universal-sentence-encoder-multilingual-large"]
use_model_urls = {
"universal-sentence-encoder-multilingual": "https://tfhub.dev/google/universal-sentence-encoder-multilingual/3",
"universal-sentence-encoder": "https://tfhub.dev/google/universal-sentence-encoder/4",
"universal-sentence-encoder-large": "https://tfhub.dev/google/universal-sentence-encoder-large/5",
"universal-sentence-encoder-multilingual-large": "https://tfhub.dev/google/universal-sentence-encoder"
"-multilingual-large/3"
}
sbert_models = ["distiluse-base-multilingual-cased",
"all-MiniLM-L6-v2",
"paraphrase-multilingual-MiniLM-L12-v2"]
acceptable_embedding_models = use_models + sbert_models
def default_tokenizer(document):
"""Tokenize a document for training and remove too long/short words
Parameters
----------
document: List of str
Input document.
Returns
-------
tokenized_document: List of str
List of tokens.
"""
return simple_preprocess(strip_tags(document), deacc=True)
def get_chunks(tokens, chunk_length, max_num_chunks, chunk_overlap_ratio):
"""Split a document into sequential chunks
Parameters
----------
tokens: List of str
Input document tokens.
chunk_length: int
Length of each document chunk.
max_num_chunks: int (Optional, default None)
Limit the number of document chunks
chunk_overlap_ratio: float
Fraction of overlapping tokens between sequential chunks.
Returns
-------
chunked_document: List of str
List of document chunks.
"""
num_tokens = len(tokens)
if num_tokens == 0:
return [""]
num_chunks = int(np.ceil(num_tokens / chunk_length))
if max_num_chunks is not None:
num_chunks = min(num_chunks, max_num_chunks)
return [" ".join(tokens[i:i + chunk_length])
for i in list(range(0, num_tokens, int(chunk_length * (1 - chunk_overlap_ratio))))[0:num_chunks]]
def get_random_chunks(tokens, chunk_length, chunk_len_coverage_ratio, max_num_chunks):
"""Split a document into chunks starting at random positions
Parameters
----------
tokens: List of str
Input document tokens.
chunk_length: int
Length of each document chunk.
chunk_len_coverage_ratio: float
Proportion of token length that will be covered by chunks. Default
value of 1.0 means chunk lengths will add up to number of tokens.
This does not mean all tokens will be covered.
max_num_chunks: int (Optional, default None)
Limit the number of document chunks
Returns
-------
chunked_document: List of str
List of document chunks.
"""
num_tokens = len(tokens)
if num_tokens == 0:
return [""]
num_chunks = int(np.ceil(num_tokens * chunk_len_coverage_ratio / chunk_length))
if max_num_chunks is not None:
num_chunks = min(num_chunks, max_num_chunks)
starts = np.random.choice(range(0, num_tokens), size=num_chunks)
return [" ".join(tokens[i:i + chunk_length]) for i in starts]
class Top2Vec:
"""
Top2Vec
Creates jointly embedded topic, document and word vectors.
Parameters
----------
documents: List of str
Input corpus, should be a list of strings.
min_count: int (Optional, default 50)
Ignores all words with total frequency lower than this. For smaller
corpora a smaller min_count will be necessary.
embedding_model: string or callable
This will determine which model is used to generate the document and
word embeddings. The valid string options are:
* doc2vec
* universal-sentence-encoder
* universal-sentence-encoder-large
* universal-sentence-encoder-multilingual
* universal-sentence-encoder-multilingual-large
* distiluse-base-multilingual-cased
* all-MiniLM-L6-v2
* paraphrase-multilingual-MiniLM-L12-v2
For large data sets and data sets with very unique vocabulary doc2vec
could produce better results. This will train a doc2vec model from
scratch. This method is language agnostic. However multiple languages
will not be aligned.
Using the universal sentence encoder options will be much faster since
those are pre-trained and efficient models. The universal sentence
encoder options are suggested for smaller data sets. They are also
good options for large data sets that are in English or in languages
covered by the multilingual model. It is also suggested for data sets
that are multilingual.
For more information on universal-sentence-encoder options visit:
https://tfhub.dev/google/collections/universal-sentence-encoder/1
The SBERT pre-trained sentence transformer options are
distiluse-base-multilingual-cased,
paraphrase-multilingual-MiniLM-L12-v2, and all-MiniLM-L6-v2.
The distiluse-base-multilingual-cased and
paraphrase-multilingual-MiniLM-L12-v2 are suggested for multilingual
datasets and languages that are not
covered by the multilingual universal sentence encoder. The
transformer is significantly slower than the universal sentence
encoder options(except for the large options).
For more information on SBERT options visit:
https://www.sbert.net/docs/pretrained_models.html
If passing a callable embedding_model note that it will not be saved
when saving a top2vec model. After loading such a saved top2vec model
the set_embedding_model method will need to be called and the same
embedding_model callable used during training must be passed to it.
embedding_model_path: string (Optional)
Pre-trained embedding models will be downloaded automatically by
default. However they can also be uploaded from a file that is in the
location of embedding_model_path.
Warning: the model at embedding_model_path must match the
embedding_model parameter type.
embedding_batch_size: int (default=32)
Batch size for documents being embedded.
split_documents: bool (default False)
If set to True, documents will be split into parts before embedding.
After embedding the multiple document part embeddings will be averaged
to create a single embedding per document. This is useful when documents
are very large or when the embedding model has a token limit.
Document chunking or a senticizer can be used for document splitting.
document_chunker: string or callable (default 'sequential')
This will break the document into chunks. The valid string options are:
* sequential
* random
The sequential chunker will split the document into chunks of specified
length and ratio of overlap. This is the recommended method.
The random chunking option will take random chunks of specified length
from the document. These can overlap and should be thought of as
sampling chunks with replacement from the document.
If a callable is passed it must take as input a list of tokens of
a document and return a list of strings representing the resulting
document chunks.
Only one of document_chunker or sentincizer should be used.
chunk_length: int (default 100)
The number of tokens per document chunk if using the document chunker
string options.
max_num_chunks: int (Optional)
The maximum number of chunks generated per document if using the
document chunker string options.
chunk_overlap_ratio: float (default 0.5)
Only applies to the 'sequential' document chunker.
Fraction of overlapping tokens between sequential chunks. A value of
0 will result i no overlap, where as 0.5 will overlap half of the
previous chunk.
chunk_len_coverage_ratio: float (default 1.0)
Only applies to the 'random' document chunker option.
Proportion of token length that will be covered by chunks. Default
value of 1.0 means chunk lengths will add up to number of tokens of
the document. This does not mean all tokens will be covered since
chunks can be overlapping.
sentencizer: callable (Optional)
A sentincizer callable can be passed. The input should be a string
representing the document and the output should be a list of strings
representing the document sentence chunks.
Only one of document_chunker or sentincizer should be used.
speed: string (Optional, default 'learn')
This parameter is only used when using doc2vec as embedding_model.
It will determine how fast the model takes to train. The
fast-learn option is the fastest and will generate the lowest quality
vectors. The learn option will learn better quality vectors but take
a longer time to train. The deep-learn option will learn the best
quality vectors but will take significant time to train. The valid
string speed options are:
* fast-learn
* learn
* deep-learn
use_corpus_file: bool (Optional, default False)
This parameter is only used when using doc2vec as embedding_model.
Setting use_corpus_file to True can sometimes provide speedup for
large datasets when multiple worker threads are available. Documents
are still passed to the model as a list of str, the model will create
a temporary corpus file for training.
document_ids: List of str, int (Optional)
A unique value per document that will be used for referring to
documents in search results. If ids are not given to the model, the
index of each document in the original corpus will become the id.
keep_documents: bool (Optional, default True)
If set to False documents will only be used for training and not saved
as part of the model. This will reduce model size. When using search
functions only document ids will be returned, not the actual
documents.
workers: int (Optional)
The amount of worker threads to be used in training the model. Larger
amount will lead to faster training.
tokenizer: callable (Optional, default None)
Override the default tokenization method. If None then
gensim.utils.simple_preprocess will be used.
Tokenizer must take a document and return a list of tokens.
use_embedding_model_tokenizer: bool (Optional, default False)
If using an embedding model other than doc2vec, use the model's
tokenizer for document embedding. If set to True the tokenizer, either
default or passed callable will be used to tokenize the text to
extract the vocabulary for word embedding.
umap_args: dict (Optional, default None)
Pass custom arguments to UMAP.
hdbscan_args: dict (Optional, default None)
Pass custom arguments to HDBSCAN.
verbose: bool (Optional, default True)
Whether to print status data during training.
"""
def __init__(self,
documents,
min_count=50,
embedding_model='doc2vec',
embedding_model_path=None,
embedding_batch_size=32,
split_documents=False,
document_chunker='sequential',
chunk_length=100,
max_num_chunks=None,
chunk_overlap_ratio=0.5,
chunk_len_coverage_ratio=1.0,
sentencizer=None,
speed='learn',
use_corpus_file=False,
document_ids=None,
keep_documents=True,
workers=None,
tokenizer=None,
use_embedding_model_tokenizer=False,
umap_args=None,
hdbscan_args=None,
verbose=True
):
if verbose:
logger.setLevel(logging.DEBUG)
self.verbose = True
else:
logger.setLevel(logging.WARNING)
self.verbose = False
if tokenizer is None:
tokenizer = default_tokenizer
# validate documents
if not (isinstance(documents, list) or isinstance(documents, np.ndarray)):
raise ValueError("Documents need to be a list of strings")
if not all((isinstance(doc, str) or isinstance(doc, np.str_)) for doc in documents):
raise ValueError("Documents need to be a list of strings")
if keep_documents:
self.documents = np.array(documents, dtype="object")
else:
self.documents = None
# validate document ids
if document_ids is not None:
if not (isinstance(document_ids, list) or isinstance(document_ids, np.ndarray)):
raise ValueError("Documents ids need to be a list of str or int")
if len(documents) != len(document_ids):
raise ValueError("Document ids need to match number of documents")
elif len(document_ids) != len(set(document_ids)):
raise ValueError("Document ids need to be unique")
if all((isinstance(doc_id, str) or isinstance(doc_id, np.str_)) for doc_id in document_ids):
self.doc_id_type = np.str_
elif all((isinstance(doc_id, int) or isinstance(doc_id, np.int_)) for doc_id in document_ids):
self.doc_id_type = np.int_
else:
raise ValueError("Document ids need to be str or int")
self.document_ids_provided = True
self.document_ids = np.array(document_ids)
self.doc_id2index = dict(zip(document_ids, list(range(0, len(document_ids)))))
else:
self.document_ids_provided = False
self.document_ids = np.array(range(0, len(documents)))
self.doc_id2index = dict(zip(self.document_ids, list(range(0, len(self.document_ids)))))
self.doc_id_type = np.int_
self.embedding_model_path = embedding_model_path
# validate document splitting
use_sentencizer = False
custom_chunker = False
if split_documents:
if document_chunker == 'sequential':
document_chunker = get_chunks
document_chunker_args = {"chunk_length": chunk_length,
"max_num_chunks": max_num_chunks,
"chunk_overlap_ratio": chunk_overlap_ratio}
elif document_chunker == 'random':
document_chunker = get_random_chunks
document_chunker_args = {"chunk_length": chunk_length,
"max_num_chunks": max_num_chunks,
"chunk_len_coverage_ratio": chunk_len_coverage_ratio}
elif callable(document_chunker):
custom_chunker = True
elif sentencizer is None:
raise ValueError(f"{document_chunker} is an invalid document chunker.")
elif callable(sentencizer):
use_sentencizer = True
else:
raise ValueError(f"{sentencizer} is invalid. Document sentencizer must be callable.")
if embedding_model == 'doc2vec':
# validate training inputs
if speed == "fast-learn":
hs = 0
negative = 5
epochs = 40
elif speed == "learn":
hs = 1
negative = 0
epochs = 40
elif speed == "deep-learn":
hs = 1
negative = 0
epochs = 400
elif speed == "test-learn":
hs = 0
negative = 5
epochs = 1
else:
raise ValueError("speed parameter needs to be one of: fast-learn, learn or deep-learn")
if workers is None:
pass
elif isinstance(workers, int):
pass
else:
raise ValueError("workers needs to be an int")
doc2vec_args = {"vector_size": 300,
"min_count": min_count,
"window": 15,
"sample": 1e-5,
"negative": negative,
"hs": hs,
"epochs": epochs,
"dm": 0,
"dbow_words": 1}
if workers is not None:
doc2vec_args["workers"] = workers
logger.info('Pre-processing documents for training')
if use_corpus_file:
processed = [' '.join(tokenizer(doc)) for doc in documents]
lines = "\n".join(processed)
temp = tempfile.NamedTemporaryFile(mode='w+t')
temp.write(lines)
doc2vec_args["corpus_file"] = temp.name
else:
train_corpus = [TaggedDocument(tokenizer(doc), [i]) for i, doc in enumerate(documents)]
doc2vec_args["documents"] = train_corpus
logger.info('Creating joint document/word embedding')
self.embedding_model = 'doc2vec'
self.model = Doc2Vec(**doc2vec_args)
self.word_vectors = self.model.wv.get_normed_vectors()
self.word_indexes = self.model.wv.key_to_index
self.vocab = list(self.model.wv.key_to_index.keys())
self.document_vectors = self.model.dv.get_normed_vectors()
if use_corpus_file:
temp.close()
elif (embedding_model in acceptable_embedding_models) or callable(embedding_model):
self.embed = None
self.embedding_model = embedding_model
self._check_import_status()
logger.info('Pre-processing documents for training')
# preprocess documents
tokenized_corpus = [tokenizer(doc) for doc in documents]
def return_doc(doc):
return doc
# preprocess vocabulary
vectorizer = CountVectorizer(tokenizer=return_doc, preprocessor=return_doc)
doc_word_counts = vectorizer.fit_transform(tokenized_corpus)
words = vectorizer.get_feature_names()
word_counts = np.array(np.sum(doc_word_counts, axis=0).tolist()[0])
vocab_inds = np.where(word_counts > min_count)[0]
if len(vocab_inds) == 0:
raise ValueError(f"A min_count of {min_count} results in "
f"all words being ignored, choose a lower value.")
self.vocab = [words[ind] for ind in vocab_inds]
self._check_model_status()
logger.info('Creating joint document/word embedding')
# embed words
self.word_indexes = dict(zip(self.vocab, range(len(self.vocab))))
self.word_vectors = self._l2_normalize(np.array(self.embed(self.vocab)))
# embed documents
# split documents
if split_documents:
if use_sentencizer:
chunk_id = 0
chunked_docs = []
chunked_doc_ids = []
for doc in documents:
doc_chunks = sentencizer(doc)
doc_chunk_ids = [chunk_id] * len(doc_chunks)
chunk_id += 1
chunked_docs.extend(doc_chunks)
chunked_doc_ids.extend(doc_chunk_ids)
else:
chunk_id = 0
chunked_docs = []
chunked_doc_ids = []
for tokens in tokenized_corpus:
if custom_chunker:
doc_chunks = document_chunker(tokens)
else:
doc_chunks = document_chunker(tokens, **document_chunker_args)
doc_chunk_ids = [chunk_id] * len(doc_chunks)
chunk_id += 1
chunked_docs.extend(doc_chunks)
chunked_doc_ids.extend(doc_chunk_ids)
chunked_doc_ids = np.array(chunked_doc_ids)
document_chunk_vectors = self._embed_documents(chunked_docs, embedding_batch_size)
self.document_vectors = self._l2_normalize(
np.vstack([document_chunk_vectors[np.where(chunked_doc_ids == label)[0]]
.mean(axis=0) for label in set(chunked_doc_ids)]))
# original documents
else:
if use_embedding_model_tokenizer:
self.document_vectors = self._embed_documents(documents, embedding_batch_size)
else:
train_corpus = [' '.join(tokens) for tokens in tokenized_corpus]
self.document_vectors = self._embed_documents(train_corpus, embedding_batch_size)
else:
raise ValueError(f"{embedding_model} is an invalid embedding model.")
# create 5D embeddings of documents
logger.info('Creating lower dimension embedding of documents')
if umap_args is None:
umap_args = {'n_neighbors': 15,
'n_components': 5,
'metric': 'cosine'}
umap_model = umap.UMAP(**umap_args).fit(self.document_vectors)
# find dense areas of document vectors
logger.info('Finding dense areas of documents')
if hdbscan_args is None:
hdbscan_args = {'min_cluster_size': 15,
'metric': 'euclidean',
'cluster_selection_method': 'eom'}
cluster = hdbscan.HDBSCAN(**hdbscan_args).fit(umap_model.embedding_)
# calculate topic vectors from dense areas of documents
logger.info('Finding topics')
# create topic vectors
self._create_topic_vectors(cluster.labels_)
# deduplicate topics
self._deduplicate_topics()
# find topic words and scores
self.topic_words, self.topic_word_scores = self._find_topic_words_and_scores(topic_vectors=self.topic_vectors)
# assign documents to topic
self.doc_top, self.doc_dist = self._calculate_documents_topic(self.topic_vectors,
self.document_vectors)
# calculate topic sizes
self.topic_sizes = self._calculate_topic_sizes(hierarchy=False)
# re-order topics
self._reorder_topics(hierarchy=False)
# initialize variables for hierarchical topic reduction
self.topic_vectors_reduced = None
self.doc_top_reduced = None
self.doc_dist_reduced = None
self.topic_sizes_reduced = None
self.topic_words_reduced = None
self.topic_word_scores_reduced = None
self.hierarchy = None
# initialize document indexing variables
self.document_index = None
self.serialized_document_index = None
self.documents_indexed = False
self.index_id2doc_id = None
self.doc_id2index_id = None
# initialize word indexing variables
self.word_index = None
self.serialized_word_index = None
self.words_indexed = False
def save(self, file):
"""
Saves the current model to the specified file.
Parameters
----------
file: str
File where model will be saved.
"""
document_index_temp = None
word_index_temp = None
# do not save sentence encoders, sentence transformers and custom embedding
if self.embedding_model not in ["doc2vec"]:
self.embed = None
# serialize document index so that it can be saved
if self.documents_indexed:
temp = tempfile.NamedTemporaryFile(mode='w+b')
self.document_index.save_index(temp.name)
self.serialized_document_index = temp.read()
temp.close()
document_index_temp = self.document_index
self.document_index = None
# serialize word index so that it can be saved
if self.words_indexed:
temp = tempfile.NamedTemporaryFile(mode='w+b')
self.word_index.save_index(temp.name)
self.serialized_word_index = temp.read()
temp.close()
word_index_temp = self.word_index
self.word_index = None
dump(self, file)
self.document_index = document_index_temp
self.word_index = word_index_temp
@classmethod
def load(cls, file):
"""
Load a pre-trained model from the specified file.
Parameters
----------
file: str
File where model will be loaded from.
"""
top2vec_model = load(file)
# load document index
if top2vec_model.documents_indexed:
if not _HAVE_HNSWLIB:
raise ImportError(f"Cannot load document index.\n\n"
"Try: pip install top2vec[indexing]\n\n"
"Alternatively try: pip install hnswlib")
temp = tempfile.NamedTemporaryFile(mode='w+b')
temp.write(top2vec_model.serialized_document_index)
document_vectors = top2vec_model.document_vectors
top2vec_model.document_index = hnswlib.Index(space='ip',
dim=document_vectors.shape[1])
top2vec_model.document_index.load_index(temp.name, max_elements=document_vectors.shape[0])
temp.close()
top2vec_model.serialized_document_index = None
# load word index
if top2vec_model.words_indexed:
if not _HAVE_HNSWLIB:
raise ImportError(f"Cannot load word index.\n\n"
"Try: pip install top2vec[indexing]\n\n"
"Alternatively try: pip install hnswlib")
temp = tempfile.NamedTemporaryFile(mode='w+b')
temp.write(top2vec_model.serialized_word_index)
word_vectors = top2vec_model.word_vectors
top2vec_model.word_index = hnswlib.Index(space='ip',
dim=word_vectors.shape[1])
top2vec_model.word_index.load_index(temp.name, max_elements=word_vectors.shape[0])
temp.close()
top2vec_model.serialized_word_index = None
return top2vec_model
@staticmethod
def _l2_normalize(vectors):
if vectors.ndim == 2:
return normalize(vectors)
else:
return normalize(vectors.reshape(1, -1))[0]
def _embed_documents(self, train_corpus, batch_size):
self._check_import_status()
self._check_model_status()
# embed documents
document_vectors = []
if (self.embedding_model in use_models) or self.embedding_model == "custom":
current = 0
batches = int(len(train_corpus) / batch_size)
extra = len(train_corpus) % batch_size
for ind in range(0, batches):
document_vectors.append(self.embed(train_corpus[current:current + batch_size]))
current += batch_size
if extra > 0:
document_vectors.append(self.embed(train_corpus[current:current + extra]))
document_vectors = self._l2_normalize(np.array(np.vstack(document_vectors)))
else:
document_vectors = self.embed(train_corpus, batch_size=batch_size)
return document_vectors
def _embed_query(self, query):
self._check_import_status()
self._check_model_status()
return self._l2_normalize(np.array(self.embed([query])[0]))
def _create_topic_vectors(self, cluster_labels):
unique_labels = set(cluster_labels)
if -1 in unique_labels:
unique_labels.remove(-1)
self.topic_vectors = self._l2_normalize(
np.vstack([self.document_vectors[np.where(cluster_labels == label)[0]]
.mean(axis=0) for label in unique_labels]))
def _deduplicate_topics(self):
core_samples, labels = dbscan(X=self.topic_vectors,
eps=0.1,
min_samples=2,
metric="cosine")
duplicate_clusters = set(labels)
if len(duplicate_clusters) > 1 or -1 not in duplicate_clusters:
# unique topics
unique_topics = self.topic_vectors[np.where(labels == -1)[0]]
if -1 in duplicate_clusters:
duplicate_clusters.remove(-1)
# merge duplicate topics
for unique_label in duplicate_clusters:
unique_topics = np.vstack(
[unique_topics, self._l2_normalize(self.topic_vectors[np.where(labels == unique_label)[0]]
.mean(axis=0))])
self.topic_vectors = unique_topics
def _calculate_topic_sizes(self, hierarchy=False):
if hierarchy:
topic_sizes = pd.Series(self.doc_top_reduced).value_counts()
else:
topic_sizes = pd.Series(self.doc_top).value_counts()
return topic_sizes
def _reorder_topics(self, hierarchy=False):
if hierarchy:
self.topic_vectors_reduced = self.topic_vectors_reduced[self.topic_sizes_reduced.index]
self.topic_words_reduced = self.topic_words_reduced[self.topic_sizes_reduced.index]
self.topic_word_scores_reduced = self.topic_word_scores_reduced[self.topic_sizes_reduced.index]
old2new = dict(zip(self.topic_sizes_reduced.index, range(self.topic_sizes_reduced.index.shape[0])))
self.doc_top_reduced = np.array([old2new[i] for i in self.doc_top_reduced])
self.hierarchy = [self.hierarchy[i] for i in self.topic_sizes_reduced.index]
self.topic_sizes_reduced.reset_index(drop=True, inplace=True)
else:
self.topic_vectors = self.topic_vectors[self.topic_sizes.index]
self.topic_words = self.topic_words[self.topic_sizes.index]
self.topic_word_scores = self.topic_word_scores[self.topic_sizes.index]
old2new = dict(zip(self.topic_sizes.index, range(self.topic_sizes.index.shape[0])))
self.doc_top = np.array([old2new[i] for i in self.doc_top])
self.topic_sizes.reset_index(drop=True, inplace=True)
@staticmethod
def _calculate_documents_topic(topic_vectors, document_vectors, dist=True, num_topics=None):
batch_size = 10000
doc_top = []
if dist:
doc_dist = []
if document_vectors.shape[0] > batch_size:
current = 0
batches = int(document_vectors.shape[0] / batch_size)
extra = document_vectors.shape[0] % batch_size
for ind in range(0, batches):
res = np.inner(document_vectors[current:current + batch_size], topic_vectors)
if num_topics is None:
doc_top.extend(np.argmax(res, axis=1))
if dist:
doc_dist.extend(np.max(res, axis=1))
else:
doc_top.extend(np.flip(np.argsort(res), axis=1)[:, :num_topics])
if dist:
doc_dist.extend(np.flip(np.sort(res), axis=1)[:, :num_topics])
current += batch_size
if extra > 0:
res = np.inner(document_vectors[current:current + extra], topic_vectors)
if num_topics is None:
doc_top.extend(np.argmax(res, axis=1))
if dist:
doc_dist.extend(np.max(res, axis=1))
else:
doc_top.extend(np.flip(np.argsort(res), axis=1)[:, :num_topics])
if dist:
doc_dist.extend(np.flip(np.sort(res), axis=1)[:, :num_topics])
if dist:
doc_dist = np.array(doc_dist)
else:
res = np.inner(document_vectors, topic_vectors)
if num_topics is None:
doc_top = np.argmax(res, axis=1)
if dist:
doc_dist = np.max(res, axis=1)
else:
doc_top.extend(np.flip(np.argsort(res), axis=1)[:, :num_topics])
if dist:
doc_dist.extend(np.flip(np.sort(res), axis=1)[:, :num_topics])
if num_topics is not None:
doc_top = np.array(doc_top)
if dist:
doc_dist = np.array(doc_dist)
if dist:
return doc_top, doc_dist
else:
return doc_top
def _find_topic_words_and_scores(self, topic_vectors):
topic_words = []
topic_word_scores = []
res = np.inner(topic_vectors, self.word_vectors)
top_words = np.flip(np.argsort(res, axis=1), axis=1)
top_scores = np.flip(np.sort(res, axis=1), axis=1)
for words, scores in zip(top_words, top_scores):
topic_words.append([self.vocab[i] for i in words[0:50]])
topic_word_scores.append(scores[0:50])
topic_words = np.array(topic_words)
topic_word_scores = np.array(topic_word_scores)
return topic_words, topic_word_scores
def _assign_documents_to_topic(self, document_vectors, hierarchy=False):
if hierarchy:
doc_top_new, doc_dist_new = self._calculate_documents_topic(self.topic_vectors_reduced,
document_vectors,
dist=True)
self.doc_top_reduced = np.array(list(self.doc_top_reduced) + list(doc_top_new))
self.doc_dist_reduced = np.array(list(self.doc_dist_reduced) + list(doc_dist_new))
topic_sizes_new = pd.Series(doc_top_new).value_counts()
for top in topic_sizes_new.index.tolist():
self.topic_sizes_reduced[top] += topic_sizes_new[top]
self.topic_sizes_reduced.sort_values(ascending=False, inplace=True)
self._reorder_topics(hierarchy)
else:
doc_top_new, doc_dist_new = self._calculate_documents_topic(self.topic_vectors, document_vectors, dist=True)
self.doc_top = np.array(list(self.doc_top) + list(doc_top_new))
self.doc_dist = np.array(list(self.doc_dist) + list(doc_dist_new))
topic_sizes_new = pd.Series(doc_top_new).value_counts()
for top in topic_sizes_new.index.tolist():
self.topic_sizes[top] += topic_sizes_new[top]
self.topic_sizes.sort_values(ascending=False, inplace=True)
self._reorder_topics(hierarchy)
def _unassign_documents_from_topic(self, doc_indexes, hierarchy=False):
if hierarchy:
doc_top_remove = self.doc_top_reduced[doc_indexes]
self.doc_top_reduced = np.delete(self.doc_top_reduced, doc_indexes, 0)
self.doc_dist_reduced = np.delete(self.doc_dist_reduced, doc_indexes, 0)
topic_sizes_remove = pd.Series(doc_top_remove).value_counts()
for top in topic_sizes_remove.index.tolist():
self.topic_sizes_reduced[top] -= topic_sizes_remove[top]
self.topic_sizes_reduced.sort_values(ascending=False, inplace=True)
self._reorder_topics(hierarchy)
else:
doc_top_remove = self.doc_top[doc_indexes]
self.doc_top = np.delete(self.doc_top, doc_indexes, 0)
self.doc_dist = np.delete(self.doc_dist, doc_indexes, 0)
topic_sizes_remove = pd.Series(doc_top_remove).value_counts()
for top in topic_sizes_remove.index.tolist():
self.topic_sizes[top] -= topic_sizes_remove[top]
self.topic_sizes.sort_values(ascending=False, inplace=True)
self._reorder_topics(hierarchy)
def _get_document_ids(self, doc_index):
return self.document_ids[doc_index]
def _get_document_indexes(self, doc_ids):
if self.document_ids is None:
return doc_ids
else:
return [self.doc_id2index[doc_id] for doc_id in doc_ids]
def _words2word_vectors(self, keywords):
return self.word_vectors[[self.word_indexes[word] for word in keywords]]
def _get_combined_vec(self, vecs, vecs_neg):
combined_vector = np.zeros(self.document_vectors.shape[1], dtype=np.float64)
for vec in vecs:
combined_vector += vec
for vec in vecs_neg:
combined_vector -= vec
combined_vector /= (len(vecs) + len(vecs_neg))
combined_vector = self._l2_normalize(combined_vector)
return combined_vector
@staticmethod
def _search_vectors_by_vector(vectors, vector, num_res):
ranks = np.inner(vectors, vector)
indexes = np.flip(np.argsort(ranks)[-num_res:])
scores = np.array([ranks[res] for res in indexes])
return indexes, scores
@staticmethod
def _check_hnswlib_status():
if not _HAVE_HNSWLIB:
raise ImportError(f"Indexing is not available.\n\n"
"Try: pip install top2vec[indexing]\n\n"
"Alternatively try: pip install hnswlib")
def _check_document_index_status(self):
if self.document_index is None:
raise ImportError("There is no document index.\n\n"
"Call index_document_vectors method before setting use_index=True.")
def _check_word_index_status(self):
if self.word_index is None:
raise ImportError("There is no word index.\n\n"
"Call index_word_vectors method before setting use_index=True.")
def _check_import_status(self):
if self.embedding_model in use_models:
if not _HAVE_TENSORFLOW:
raise ImportError(f"{self.embedding_model} is not available.\n\n"
"Try: pip install top2vec[sentence_encoders]\n\n"
"Alternatively try: pip install tensorflow tensorflow_hub tensorflow_text")
elif self.embedding_model in sbert_models:
if not _HAVE_TORCH:
raise ImportError(f"{self.embedding_model} is not available.\n\n"
"Try: pip install top2vec[sentence_transformers]\n\n"
"Alternatively try: pip install torch sentence_transformers")
def _check_model_status(self):
if self.embed is None:
if self.verbose is False:
logger.setLevel(logging.DEBUG)
if self.embedding_model in use_models:
if self.embedding_model_path is None:
logger.info(f'Downloading {self.embedding_model} model')
module = use_model_urls[self.embedding_model]
else:
logger.info(f'Loading {self.embedding_model} model at {self.embedding_model_path}')
module = self.embedding_model_path
self.embed = hub.load(module)
elif self.embedding_model in sbert_models:
if self.embedding_model_path is None:
logger.info(f'Downloading {self.embedding_model} model')
module = self.embedding_model
else:
logger.info(f'Loading {self.embedding_model} model at {self.embedding_model_path}')
module = self.embedding_model_path
model = SentenceTransformer(module)
self.embed = model.encode
elif callable(self.embedding_model):
self.embed = self.embedding_model
self.embedding_model = "custom"
elif self.embedding_model == "custom":
raise ValueError("Call set_embedding_model method and pass callable"
" embedding_model used during training.")
if self.verbose is False:
logger.setLevel(logging.WARNING)
@staticmethod
def _less_than_zero(num, var_name):
if num < 0:
raise ValueError(f"{var_name} cannot be less than 0.")
def _validate_hierarchical_reduction(self):
if self.hierarchy is None:
raise ValueError("Hierarchical topic reduction has not been performed.")
def _validate_hierarchical_reduction_num_topics(self, num_topics):
current_num_topics = len(self.topic_vectors)
if num_topics >= current_num_topics:
raise ValueError(f"Number of topics must be less than {current_num_topics}.")
def _validate_num_docs(self, num_docs):
self._less_than_zero(num_docs, "num_docs")
document_count = len(self.doc_top)
if num_docs > document_count:
raise ValueError(f"num_docs cannot exceed the number of documents: {document_count}.")
def _validate_num_topics(self, num_topics, reduced):
self._less_than_zero(num_topics, "num_topics")
if reduced:
topic_count = len(self.topic_vectors_reduced)
if num_topics > topic_count:
raise ValueError(f"num_topics cannot exceed the number of reduced topics: {topic_count}.")
else:
topic_count = len(self.topic_vectors)
if num_topics > topic_count:
raise ValueError(f"num_topics cannot exceed the number of topics: {topic_count}.")
def _validate_topic_num(self, topic_num, reduced):
self._less_than_zero(topic_num, "topic_num")
if reduced:
topic_count = len(self.topic_vectors_reduced) - 1
if topic_num > topic_count:
raise ValueError(f"Invalid topic number: valid reduced topics numbers are 0 to {topic_count}.")
else:
topic_count = len(self.topic_vectors) - 1
if topic_num > topic_count:
raise ValueError(f"Invalid topic number: valid original topics numbers are 0 to {topic_count}.")
def _validate_topic_search(self, topic_num, num_docs, reduced):
self._less_than_zero(num_docs, "num_docs")
if reduced:
if num_docs > self.topic_sizes_reduced[topic_num]:
raise ValueError(f"Invalid number of documents: reduced topic {topic_num}"
f" only has {self.topic_sizes_reduced[topic_num]} documents.")
else:
if num_docs > self.topic_sizes[topic_num]:
raise ValueError(f"Invalid number of documents: original topic {topic_num}"
f" only has {self.topic_sizes[topic_num]} documents.")
def _validate_doc_ids(self, doc_ids, doc_ids_neg):
if not (isinstance(doc_ids, list) or isinstance(doc_ids, np.ndarray)):
raise ValueError("doc_ids must be a list of string or int.")
if not (isinstance(doc_ids_neg, list) or isinstance(doc_ids_neg, np.ndarray)):
raise ValueError("doc_ids_neg must be a list of string or int.")
if isinstance(doc_ids, np.ndarray):
doc_ids = list(doc_ids)
if isinstance(doc_ids_neg, np.ndarray):
doc_ids_neg = list(doc_ids_neg)
doc_ids_all = doc_ids + doc_ids_neg
if self.document_ids is not None:
for doc_id in doc_ids_all:
if doc_id not in self.doc_id2index:
raise ValueError(f"{doc_id} is not a valid document id.")
elif min(doc_ids) < 0:
raise ValueError(f"{min(doc_ids)} is not a valid document id.")
elif max(doc_ids) > len(self.doc_top) - 1:
raise ValueError(f"{max(doc_ids)} is not a valid document id.")
def _validate_keywords(self, keywords, keywords_neg):
if not (isinstance(keywords, list) or isinstance(keywords, np.ndarray)):
raise ValueError("keywords must be a list of strings.")
if not (isinstance(keywords_neg, list) or isinstance(keywords_neg, np.ndarray)):
raise ValueError("keywords_neg must be a list of strings.")
keywords_lower = [keyword.lower() for keyword in keywords]
keywords_neg_lower = [keyword.lower() for keyword in keywords_neg]
vocab = self.vocab
for word in keywords_lower + keywords_neg_lower:
if word not in vocab:
raise ValueError(f"'{word}' has not been learned by the model so it cannot be searched.")
return keywords_lower, keywords_neg_lower
def _validate_document_ids_add_doc(self, documents, document_ids):
if document_ids is None:
raise ValueError("Document ids need to be provided.")
if len(documents) != len(document_ids):
raise ValueError("Document ids need to match number of documents.")
if len(document_ids) != len(set(document_ids)):
raise ValueError("Document ids need to be unique.")
if len(set(document_ids).intersection(self.document_ids)) > 0:
raise ValueError("Some document ids already exist in model.")
if self.doc_id_type == np.str_:
if not all((isinstance(doc_id, str) or isinstance(doc_id, np.str_)) for doc_id in document_ids):
raise ValueError("Document ids need to be of type str.")
if self.doc_id_type == np.int_:
if not all((isinstance(doc_id, int) or isinstance(doc_id, np.int_)) for doc_id in document_ids):
raise ValueError("Document ids need to be of type int.")
@staticmethod
def _validate_documents(documents):
if not all((isinstance(doc, str) or isinstance(doc, np.str_)) for doc in documents):
raise ValueError("Documents need to be a list of strings.")
@staticmethod
def _validate_query(query):
if not isinstance(query, str) or isinstance(query, np.str_):
raise ValueError("Query needs to be a string.")
def _validate_vector(self, vector):
if not isinstance(vector, np.ndarray):
raise ValueError("Vector needs to be a numpy array.")
vec_size = self.document_vectors.shape[1]
if not vector.shape[0] == vec_size:
raise ValueError(f"Vector needs to be of {vec_size} dimensions.")
def index_document_vectors(self, ef_construction=200, M=64):
"""
Creates an index of the document vectors using hnswlib. This will
lead to faster search times for models with a large number of
documents.
For more information on hnswlib see: https://github.com/nmslib/hnswlib
Parameters
----------
ef_construction: int (Optional default 200)
This parameter controls the trade-off between index construction
time and index accuracy. Larger values will lead to greater
accuracy but will take longer to construct.
M: int (Optional default 64)
This parameter controls the trade-off between both index size as
well as construction time and accuracy. Larger values will lead to
greater accuracy but will result in a larger index as well as
longer construction time.
For more information on the parameters see:
https://github.com/nmslib/hnswlib/blob/master/ALGO_PARAMS.md
"""
self._check_hnswlib_status()
document_vectors = self.document_vectors
vec_dim = document_vectors.shape[1]
num_vecs = document_vectors.shape[0]
index_ids = list(range(0, len(self.document_ids)))
self.index_id2doc_id = dict(zip(index_ids, self.document_ids))
self.doc_id2index_id = dict(zip(self.document_ids, index_ids))
self.document_index = hnswlib.Index(space='ip', dim=vec_dim)
self.document_index.init_index(max_elements=num_vecs, ef_construction=ef_construction, M=M)
self.document_index.add_items(document_vectors, index_ids)
self.documents_indexed = True
def index_word_vectors(self, ef_construction=200, M=64):
"""
Creates an index of the word vectors using hnswlib. This will
lead to faster search times for models with a large number of
words.
For more information on hnswlib see: https://github.com/nmslib/hnswlib
Parameters
----------
ef_construction: int (Optional default 200)
This parameter controls the trade-off between index construction
time and index accuracy. Larger values will lead to greater
accuracy but will take longer to construct.
M: int (Optional default 64)
This parameter controls the trade-off between both index size as
well as construction time and accuracy. Larger values will lead to
greater accuracy but will result in a larger index as well as
longer construction time.
For more information on the parameters see:
https://github.com/nmslib/hnswlib/blob/master/ALGO_PARAMS.md
"""
self._check_hnswlib_status()
word_vectors = self.word_vectors
vec_dim = word_vectors.shape[1]
num_vecs = word_vectors.shape[0]
index_ids = list(range(0, num_vecs))
self.word_index = hnswlib.Index(space='ip', dim=vec_dim)
self.word_index.init_index(max_elements=num_vecs, ef_construction=ef_construction, M=M)
self.word_index.add_items(word_vectors, index_ids)
self.words_indexed = True
def set_embedding_model(self, embedding_model):
"""
Set the embedding model. This is called after loading a saved Top2Vec
model which was trained with a passed callable embedding_model.
Parameters
----------
embedding_model: callable
This must be the same embedding model used during training.
"""
if not callable(embedding_model):
raise ValueError("embedding_model must be callable.")
self.embed = embedding_model
def update_embedding_model_path(self, embedding_model_path):
"""
Update the path of the embedding model to be loaded. The model will
no longer be downloaded but loaded from the path location.
Warning: the model at embedding_model_path must match the
embedding_model parameter type.
Parameters
----------
embedding_model_path: Str
Path to downloaded embedding model.
"""
self.embedding_model_path = embedding_model_path
def change_to_download_embedding_model(self):
"""
Use automatic download to load embedding model used for training.
Top2Vec will no longer try and load the embedding model from a file
if a embedding_model path was previously added.
"""
self.embedding_model_path = None
def get_documents_topics(self, doc_ids, reduced=False, num_topics=1):
"""
Get document topics.
The topic of each document will be returned.
The corresponding original topics are returned unless reduced=True,
in which case the reduced topics will be returned.
Parameters
----------
doc_ids: List of str, int
A unique value per document that is used for referring to
documents in search results. If ids were not given to the model,
the index of each document in the model is the id.
reduced: bool (Optional, default False)
Original topics are returned by default. If True the
reduced topics will be returned.
num_topics: int (Optional, default 1)
The number of topics to return per document.
Returns
-------
topic_nums: array of int, shape(len(doc_ids), num_topics)
The topic number(s) of the document corresponding to each doc_id.
topic_score: array of float, shape(len(doc_ids), num_topics)
Semantic similarity of document to topic(s). The cosine similarity
of the document and topic vector.
topics_words: array of shape(len(doc_ids), num_topics, 50)
For each topic the top 50 words are returned, in order
of semantic similarity to topic.
Example:
[['data', 'deep', 'learning' ... 'artificial'], <Topic 4>
['environment', 'warming', 'climate ... 'temperature'] <Topic 21>
...]
word_scores: array of shape(num_topics, 50)
For each topic the cosine similarity scores of the
top 50 words to the topic are returned.
Example:
[[0.7132, 0.6473, 0.5700 ... 0.3455], <Topic 4>
[0.7818', 0.7671, 0.7603 ... 0.6769] <Topic 21>
...]
"""
if reduced:
self._validate_hierarchical_reduction()
# make sure documents exist
self._validate_doc_ids(doc_ids, doc_ids_neg=[])
# get document indexes from ids
doc_indexes = self._get_document_indexes(doc_ids)
if num_topics == 1:
if reduced:
doc_topics = self.doc_top_reduced[doc_indexes]
doc_dist = self.doc_dist_reduced[doc_indexes]
topic_words = self.topic_words_reduced[doc_topics]
topic_word_scores = self.topic_word_scores_reduced[doc_topics]
else:
doc_topics = self.doc_top[doc_indexes]
doc_dist = self.doc_dist[doc_indexes]
topic_words = self.topic_words[doc_topics]
topic_word_scores = self.topic_word_scores[doc_topics]
else:
if reduced:
topic_vectors = self.topic_vectors_reduced
else:
topic_vectors = self.topic_vectors
doc_topics, doc_dist = self._calculate_documents_topic(topic_vectors,
self.document_vectors[doc_indexes],
num_topics=num_topics)
topic_words = np.array([self.topic_words[topics] for topics in doc_topics])
topic_word_scores = np.array([self.topic_word_scores[topics] for topics in doc_topics])
return doc_topics, doc_dist, topic_words, topic_word_scores
def add_documents(self,
documents,
doc_ids=None,
tokenizer=None,
use_embedding_model_tokenizer=False,
embedding_batch_size=32):
"""
Update the model with new documents.
The documents will be added to the current model without changing
existing document, word and topic vectors. Topic sizes will be updated.
If adding a large quantity of documents relative to the current model
size, or documents containing a largely new vocabulary, a new model
should be trained for best results.
Parameters
----------
documents: List of str
doc_ids: List of str, int (Optional)
Only required when doc_ids were given to the original model.
A unique value per document that will be used for referring to
documents in search results.
tokenizer: callable (Optional, default None)
Override the default tokenization method. If None then
gensim.utils.simple_preprocess will be used.
use_embedding_model_tokenizer: bool (Optional, default False)
If using an embedding model other than doc2vec, use the model's
tokenizer for document embedding.
embedding_batch_size: int (default=32)
Batch size for documents being embedded.
"""
# if tokenizer is not passed use default
if tokenizer is None:
tokenizer = default_tokenizer
# add documents
self._validate_documents(documents)
if self.documents is not None:
self.documents = np.array((list(self.documents) + list(documents)), dtype="object")
# add document ids
if self.document_ids_provided is True:
self._validate_document_ids_add_doc(documents, doc_ids)
doc_ids_len = len(self.document_ids)
self.document_ids = np.array(list(self.document_ids) + list(doc_ids))
self.doc_id2index.update(dict(zip(doc_ids, list(range(doc_ids_len, doc_ids_len + len(doc_ids))))))
elif doc_ids is None:
num_docs = len(documents)
start_id = max(self.document_ids) + 1
doc_ids = list(range(start_id, start_id + num_docs))
doc_ids_len = len(self.document_ids)
self.document_ids = np.array(list(self.document_ids) + list(doc_ids))
self.doc_id2index.update(dict(zip(doc_ids, list(range(doc_ids_len, doc_ids_len + len(doc_ids))))))
else:
raise ValueError("doc_ids cannot be used because they were not provided to model during training.")
if self.embedding_model == "doc2vec":
docs_processed = [tokenizer(doc) for doc in documents]
document_vectors = np.vstack([self.model.infer_vector(doc_words=doc,
alpha=0.025,
min_alpha=0.01,
epochs=100) for doc in docs_processed])
document_vectors = self._l2_normalize(document_vectors)
self.document_vectors = np.vstack([self.document_vectors, document_vectors])
else:
if use_embedding_model_tokenizer:
docs_training = documents
else:
docs_processed = [tokenizer(doc) for doc in documents]
docs_training = [' '.join(doc) for doc in docs_processed]
document_vectors = self._embed_documents(docs_training, embedding_batch_size)
self.document_vectors = np.vstack([self.document_vectors, document_vectors])
# update index
if self.documents_indexed:
# update capacity of index
current_max = self.document_index.get_max_elements()
updated_max = current_max + len(documents)
self.document_index.resize_index(updated_max)
# update index_id and doc_ids
start_index_id = max(self.index_id2doc_id.keys()) + 1
new_index_ids = list(range(start_index_id, start_index_id + len(doc_ids)))
self.index_id2doc_id.update(dict(zip(new_index_ids, doc_ids)))
self.doc_id2index_id.update(dict(zip(doc_ids, new_index_ids)))
self.document_index.add_items(document_vectors, new_index_ids)
# update topics
self._assign_documents_to_topic(document_vectors, hierarchy=False)
if self.hierarchy is not None:
self._assign_documents_to_topic(document_vectors, hierarchy=True)
def delete_documents(self, doc_ids):
"""
Delete documents from current model.
Warning: If document ids were not used in original model, deleting
documents will change the indexes and therefore doc_ids.
The documents will be deleted from the current model without changing
existing document, word and topic vectors. Topic sizes will be updated.
If deleting a large quantity of documents relative to the current model
size a new model should be trained for best results.
Parameters
----------
doc_ids: List of str, int
A unique value per document that is used for referring to documents
in search results.
"""
# make sure documents exist
self._validate_doc_ids(doc_ids, doc_ids_neg=[])
# update index
if self.documents_indexed:
# delete doc_ids from index
index_ids = [self.doc_id2index_id(doc_id) for doc_id in doc_ids]
for index_id in index_ids:
self.document_index.mark_deleted(index_id)
# update index_id and doc_ids
for doc_id in doc_ids:
self.doc_id2index_id.pop(doc_id)
for index_id in index_ids:
self.index_id2doc_id.pop(index_id)
# get document indexes from ids
doc_indexes = self._get_document_indexes(doc_ids)
# delete documents
if self.documents is not None:
self.documents = np.delete(self.documents, doc_indexes, 0)
# delete document ids
if self.document_ids is not None:
for doc_id in doc_ids:
self.doc_id2index.pop(doc_id)
keys = list(self.doc_id2index.keys())
self.document_ids = np.array(keys)
values = list(range(0, len(self.doc_id2index.values())))
self.doc_id2index = dict(zip(keys, values))
# delete document vectors
self.document_vectors = np.delete(self.document_vectors, doc_indexes, 0)
# update topics
self._unassign_documents_from_topic(doc_indexes, hierarchy=False)
if self.hierarchy is not None:
self._unassign_documents_from_topic(doc_indexes, hierarchy=True)
def get_num_topics(self, reduced=False):
"""
Get number of topics.
This is the number of topics Top2Vec has found in the data by default.
If reduced is True, the number of reduced topics is returned.
Parameters
----------
reduced: bool (Optional, default False)
The number of original topics will be returned by default. If True
will return the number of reduced topics, if hierarchical topic
reduction has been performed.
Returns
-------
num_topics: int
"""
if reduced:
self._validate_hierarchical_reduction()
return len(self.topic_vectors_reduced)
else:
return len(self.topic_vectors)
def get_topic_sizes(self, reduced=False):
"""
Get topic sizes.
The number of documents most similar to each topic. Topics are
in increasing order of size.
The sizes of the original topics is returned unless reduced=True,
in which case the sizes of the reduced topics will be returned.
Parameters
----------
reduced: bool (Optional, default False)
Original topic sizes are returned by default. If True the
reduced topic sizes will be returned.
Returns
-------
topic_sizes: array of int, shape(num_topics)
The number of documents most similar to the topic.
topic_nums: array of int, shape(num_topics)
The unique number of every topic will be returned.
"""
if reduced:
self._validate_hierarchical_reduction()
return np.array(self.topic_sizes_reduced.values), np.array(self.topic_sizes_reduced.index)
else:
return np.array(self.topic_sizes.values), np.array(self.topic_sizes.index)
def get_topics(self, num_topics=None, reduced=False):
"""
Get topics, ordered by decreasing size. All topics are returned
if num_topics is not specified.
The original topics found are returned unless reduced=True,
in which case reduced topics will be returned.
Each topic will consist of the top 50 semantically similar words
to the topic. These are the 50 words closest to topic vector
along with cosine similarity of each word from vector. The
higher the score the more relevant the word is to the topic.
Parameters
----------
num_topics: int, (Optional)
Number of topics to return.
reduced: bool (Optional, default False)
Original topics are returned by default. If True the
reduced topics will be returned.
Returns
-------
topics_words: array of shape(num_topics, 50)
For each topic the top 50 words are returned, in order
of semantic similarity to topic.
Example:
[['data', 'deep', 'learning' ... 'artificial'], <Topic 0>
['environment', 'warming', 'climate ... 'temperature'] <Topic 1>
...]
word_scores: array of shape(num_topics, 50)
For each topic the cosine similarity scores of the
top 50 words to the topic are returned.
Example:
[[0.7132, 0.6473, 0.5700 ... 0.3455], <Topic 0>
[0.7818', 0.7671, 0.7603 ... 0.6769] <Topic 1>
...]
topic_nums: array of int, shape(num_topics)
The unique number of every topic will be returned.
"""
if reduced:
self._validate_hierarchical_reduction()
if num_topics is None:
num_topics = len(self.topic_vectors_reduced)
else:
self._validate_num_topics(num_topics, reduced)
return self.topic_words_reduced[0:num_topics], self.topic_word_scores_reduced[0:num_topics], np.array(
range(0, num_topics))
else:
if num_topics is None:
num_topics = len(self.topic_vectors)
else:
self._validate_num_topics(num_topics, reduced)
return self.topic_words[0:num_topics], self.topic_word_scores[0:num_topics], np.array(range(0, num_topics))
def get_topic_hierarchy(self):
"""
Get the hierarchy of reduced topics. The mapping of each original topic
to the reduced topics is returned.
Hierarchical topic reduction must be performed before calling this
method.
Returns
-------
hierarchy: list of ints
Each index of the hierarchy corresponds to the topic number of a
reduced topic. For each reduced topic the topic numbers of the
original topics that were merged to create it are listed.
Example:
[[3] <Reduced Topic 0> contains original Topic 3
[2,4] <Reduced Topic 1> contains original Topics 2 and 4
[0,1] <Reduced Topic 3> contains original Topics 0 and 1
...]
"""
self._validate_hierarchical_reduction()
return self.hierarchy
def hierarchical_topic_reduction(self, num_topics):
"""
Reduce the number of topics discovered by Top2Vec.
The most representative topics of the corpus will be found, by
iteratively merging each smallest topic to the most similar topic until
num_topics is reached.
Parameters
----------
num_topics: int
The number of topics to reduce to.
Returns
-------
hierarchy: list of ints
Each index of hierarchy corresponds to the reduced topics, for each
reduced topic the indexes of the original topics that were merged
to create it are listed.
Example:
[[3] <Reduced Topic 0> contains original Topic 3
[2,4] <Reduced Topic 1> contains original Topics 2 and 4
[0,1] <Reduced Topic 3> contains original Topics 0 and 1
...]
"""
self._validate_hierarchical_reduction_num_topics(num_topics)
num_topics_current = self.topic_vectors.shape[0]
top_vecs = self.topic_vectors
top_sizes = [self.topic_sizes[i] for i in range(0, len(self.topic_sizes))]
hierarchy = [[i] for i in range(self.topic_vectors.shape[0])]
count = 0
interval = max(int(self.document_vectors.shape[0] / 50000), 1)
while num_topics_current > num_topics:
# find smallest and most similar topics
smallest = np.argmin(top_sizes)
res = np.inner(top_vecs[smallest], top_vecs)
sims = np.flip(np.argsort(res))
most_sim = sims[1]
if most_sim == smallest:
most_sim = sims[0]
# calculate combined topic vector
top_vec_smallest = top_vecs[smallest]
smallest_size = top_sizes[smallest]
top_vec_most_sim = top_vecs[most_sim]
most_sim_size = top_sizes[most_sim]
combined_vec = self._l2_normalize(((top_vec_smallest * smallest_size) +
(top_vec_most_sim * most_sim_size)) / (smallest_size + most_sim_size))
# update topic vectors
ix_keep = list(range(len(top_vecs)))
ix_keep.remove(smallest)
ix_keep.remove(most_sim)
top_vecs = top_vecs[ix_keep]
top_vecs = np.vstack([top_vecs, combined_vec])
num_topics_current = top_vecs.shape[0]
# update topics sizes
if count % interval == 0:
doc_top = self._calculate_documents_topic(topic_vectors=top_vecs,
document_vectors=self.document_vectors,
dist=False)
topic_sizes = | pd.Series(doc_top) | pandas.Series |
# *****************************************************************************
# Copyright (c) 2019, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
"""
| :class:`pandas.Series` functions and operators implementations in SDC
| Also, it contains Numba internal operators which are required for Series type handling
"""
import numba
import numpy
import operator
import pandas
import math
import sys
from numba.errors import TypingError
from numba.extending import overload, overload_method, overload_attribute
from numba.typing import signature
from numba.extending import intrinsic
from numba import (types, numpy_support, cgutils)
from numba.typed import Dict
from numba import prange
import sdc
import sdc.datatypes.common_functions as common_functions
from sdc.datatypes.common_functions import (TypeChecker, check_index_is_numeric, find_common_dtype_from_numpy_dtypes,
sdc_join_series_indexes)
from sdc.datatypes.hpat_pandas_series_rolling_types import _hpat_pandas_series_rolling_init
from sdc.datatypes.hpat_pandas_stringmethods_types import StringMethodsType
from sdc.datatypes.hpat_pandas_getitem_types import SeriesGetitemAccessorType
from sdc.hiframes.pd_series_type import SeriesType
from sdc.str_arr_ext import (StringArrayType, string_array_type, str_arr_is_na, str_arr_set_na,
num_total_chars, pre_alloc_string_array, cp_str_list_to_array)
from sdc.utils import to_array, sdc_overload, sdc_overload_method, sdc_overload_attribute
from sdc.datatypes import hpat_pandas_series_autogenerated
@sdc_overload(operator.getitem)
def hpat_pandas_series_accessor_getitem(self, idx):
"""
Pandas Series operator :attr:`pandas.Series.get` implementation
**Algorithm**: result = series[idx]
**Test**: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_static_getitem_series1
Parameters
----------
series: :obj:`pandas.Series`
input series
idx: :obj:`int`, :obj:`slice` or :obj:`pandas.Series`
input index
Returns
-------
:class:`pandas.Series` or an element of the underneath type
object of :class:`pandas.Series`
"""
_func_name = 'Operator getitem().'
if not isinstance(self, SeriesGetitemAccessorType):
return None
accessor = self.accessor.literal_value
if accessor == 'iloc':
if isinstance(idx, (types.List, types.Array, types.SliceType)):
def hpat_pandas_series_iloc_list_slice_impl(self, idx):
result_data = self._series._data[idx]
result_index = self._series.index[idx]
return pandas.Series(result_data, result_index, self._series._name)
return hpat_pandas_series_iloc_list_slice_impl
if isinstance(idx, (int, types.Integer)):
def hpat_pandas_series_iloc_impl(self, idx):
return self._series._data[idx]
return hpat_pandas_series_iloc_impl
def hpat_pandas_series_iloc_callable_impl(self, idx):
index = numpy.asarray(list(map(idx, self._series._data)))
return pandas.Series(self._series._data[index], self._series.index[index], self._series._name)
return hpat_pandas_series_iloc_callable_impl
raise TypingError('{} The index must be an Integer, Slice or List of Integer or a callable.\
Given: {}'.format(_func_name, idx))
if accessor == 'iat':
if isinstance(idx, (int, types.Integer)):
def hpat_pandas_series_iat_impl(self, idx):
return self._series._data[idx]
return hpat_pandas_series_iat_impl
raise TypingError('{} The index must be a Integer. Given: {}'.format(_func_name, idx))
if accessor == 'loc':
# Note: Loc return Series
# Note: Index 0 in slice not supported
# Note: Loc slice and callable with String not implement
index_is_none = (self.series.index is None or
isinstance(self.series.index, numba.types.misc.NoneType))
if isinstance(idx, types.SliceType) and index_is_none:
def hpat_pandas_series_loc_slice_noidx_impl(self, idx):
max_slice = sys.maxsize
start = idx.start
stop = idx.stop
if idx.stop == max_slice:
stop = max_slice - 1
result_data = self._series._data[start:stop+1]
result_index = numpy.arange(start, stop + 1)
return | pandas.Series(result_data, result_index, self._series._name) | pandas.Series |
import pytest
import pandas as pd
import numpy as np
from ramprate.build_features import _find_uptime
def test__find_uptime_start_and_end_nonzero():
dt_idx = pd.date_range(start="2020-01-01 00:00", periods=6, freq="h", tz="UTC")
data = [2, 2, 0, 0, 0, 2]
# downtime=True
# first zero after non-zero
shutdown = pd.to_datetime(["2020-01-01 02:00"], utc=True)
# last zero before non-zero
startup = pd.to_datetime(["2020-01-01 04:00"], utc=True)
expected = pd.DataFrame({"shutdown": shutdown, "startup": startup})
actual = _find_uptime(pd.Series(data, index=dt_idx), downtime=True)
| pd.testing.assert_frame_equal(actual, expected) | pandas.testing.assert_frame_equal |
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import decimal
import json
import multiprocessing as mp
from collections import OrderedDict
from datetime import date, datetime, time, timedelta
import numpy as np
import numpy.testing as npt
import pandas as pd
import pandas.util.testing as tm
import pytest
import pyarrow as pa
import pyarrow.types as patypes
from pyarrow.compat import PY2
from .pandas_examples import dataframe_with_arrays, dataframe_with_lists
def _alltypes_example(size=100):
return pd.DataFrame({
'uint8': np.arange(size, dtype=np.uint8),
'uint16': np.arange(size, dtype=np.uint16),
'uint32': np.arange(size, dtype=np.uint32),
'uint64': np.arange(size, dtype=np.uint64),
'int8': np.arange(size, dtype=np.int16),
'int16': np.arange(size, dtype=np.int16),
'int32': np.arange(size, dtype=np.int32),
'int64': np.arange(size, dtype=np.int64),
'float32': np.arange(size, dtype=np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0,
# TODO(wesm): Pandas only support ns resolution, Arrow supports s, ms,
# us, ns
'datetime': np.arange("2016-01-01T00:00:00.001", size,
dtype='datetime64[ms]'),
'str': [str(x) for x in range(size)],
'str_with_nulls': [None] + [str(x) for x in range(size - 2)] + [None],
'empty_str': [''] * size
})
def _check_pandas_roundtrip(df, expected=None, use_threads=True,
expected_schema=None,
check_dtype=True, schema=None,
preserve_index=False,
as_batch=False):
klass = pa.RecordBatch if as_batch else pa.Table
table = klass.from_pandas(df, schema=schema,
preserve_index=preserve_index,
nthreads=2 if use_threads else 1)
result = table.to_pandas(use_threads=use_threads)
if expected_schema:
# all occurences of _check_pandas_roundtrip passes expected_schema
# without the pandas generated key-value metadata, so we need to
# add it before checking schema equality
expected_schema = expected_schema.add_metadata(table.schema.metadata)
assert table.schema.equals(expected_schema)
if expected is None:
expected = df
tm.assert_frame_equal(result, expected, check_dtype=check_dtype,
check_index_type=('equiv' if preserve_index
else False))
def _check_series_roundtrip(s, type_=None, expected_pa_type=None):
arr = pa.array(s, from_pandas=True, type=type_)
if type_ is not None and expected_pa_type is None:
expected_pa_type = type_
if expected_pa_type is not None:
assert arr.type == expected_pa_type
result = pd.Series(arr.to_pandas(), name=s.name)
if patypes.is_timestamp(arr.type) and arr.type.tz is not None:
result = (result.dt.tz_localize('utc')
.dt.tz_convert(arr.type.tz))
tm.assert_series_equal(s, result)
def _check_array_roundtrip(values, expected=None, mask=None,
type=None):
arr = pa.array(values, from_pandas=True, mask=mask, type=type)
result = arr.to_pandas()
values_nulls = pd.isnull(values)
if mask is None:
assert arr.null_count == values_nulls.sum()
else:
assert arr.null_count == (mask | values_nulls).sum()
if mask is None:
tm.assert_series_equal(pd.Series(result), pd.Series(values),
check_names=False)
else:
expected = pd.Series(np.ma.masked_array(values, mask=mask))
tm.assert_series_equal(pd.Series(result), expected,
check_names=False)
def _check_array_from_pandas_roundtrip(np_array, type=None):
arr = pa.array(np_array, from_pandas=True, type=type)
result = arr.to_pandas()
npt.assert_array_equal(result, np_array)
class TestConvertMetadata(object):
"""
Conversion tests for Pandas metadata & indices.
"""
def test_non_string_columns(self):
df = pd.DataFrame({0: [1, 2, 3]})
table = pa.Table.from_pandas(df)
assert table.column(0).name == '0'
def test_from_pandas_with_columns(self):
df = pd.DataFrame({0: [1, 2, 3], 1: [1, 3, 3], 2: [2, 4, 5]})
table = pa.Table.from_pandas(df, columns=[0, 1])
expected = pa.Table.from_pandas(df[[0, 1]])
assert expected.equals(table)
record_batch_table = pa.RecordBatch.from_pandas(df, columns=[0, 1])
record_batch_expected = pa.RecordBatch.from_pandas(df[[0, 1]])
assert record_batch_expected.equals(record_batch_table)
def test_column_index_names_are_preserved(self):
df = pd.DataFrame({'data': [1, 2, 3]})
df.columns.names = ['a']
_check_pandas_roundtrip(df, preserve_index=True)
def test_multiindex_columns(self):
columns = pd.MultiIndex.from_arrays([
['one', 'two'], ['X', 'Y']
])
df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns)
_check_pandas_roundtrip(df, preserve_index=True)
def test_multiindex_columns_with_dtypes(self):
columns = pd.MultiIndex.from_arrays(
[
['one', 'two'],
pd.DatetimeIndex(['2017-08-01', '2017-08-02']),
],
names=['level_1', 'level_2'],
)
df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns)
_check_pandas_roundtrip(df, preserve_index=True)
def test_multiindex_columns_unicode(self):
columns = pd.MultiIndex.from_arrays([[u'あ', u'い'], ['X', 'Y']])
df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns)
_check_pandas_roundtrip(df, preserve_index=True)
def test_integer_index_column(self):
df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')])
_check_pandas_roundtrip(df, preserve_index=True)
def test_index_metadata_field_name(self):
# test None case, and strangely named non-index columns
df = pd.DataFrame(
[(1, 'a', 3.1), (2, 'b', 2.2), (3, 'c', 1.3)],
index=pd.MultiIndex.from_arrays(
[['c', 'b', 'a'], [3, 2, 1]],
names=[None, 'foo']
),
columns=['a', None, '__index_level_0__'],
)
t = pa.Table.from_pandas(df, preserve_index=True)
raw_metadata = t.schema.metadata
js = json.loads(raw_metadata[b'pandas'].decode('utf8'))
col1, col2, col3, idx0, foo = js['columns']
assert col1['name'] == 'a'
assert col1['name'] == col1['field_name']
assert col2['name'] is None
assert col2['field_name'] == 'None'
assert col3['name'] == '__index_level_0__'
assert col3['name'] == col3['field_name']
idx0_name, foo_name = js['index_columns']
assert idx0_name == '__index_level_0__'
assert idx0['field_name'] == idx0_name
assert idx0['name'] is None
assert foo_name == 'foo'
assert foo['field_name'] == foo_name
assert foo['name'] == foo_name
def test_categorical_column_index(self):
df = pd.DataFrame(
[(1, 'a', 2.0), (2, 'b', 3.0), (3, 'c', 4.0)],
columns=pd.Index(list('def'), dtype='category')
)
t = pa.Table.from_pandas(df, preserve_index=True)
raw_metadata = t.schema.metadata
js = json.loads(raw_metadata[b'pandas'].decode('utf8'))
column_indexes, = js['column_indexes']
assert column_indexes['name'] is None
assert column_indexes['pandas_type'] == 'categorical'
assert column_indexes['numpy_type'] == 'int8'
md = column_indexes['metadata']
assert md['num_categories'] == 3
assert md['ordered'] is False
def test_string_column_index(self):
df = pd.DataFrame(
[(1, 'a', 2.0), (2, 'b', 3.0), (3, 'c', 4.0)],
columns=pd.Index(list('def'), name='stringz')
)
t = pa.Table.from_pandas(df, preserve_index=True)
raw_metadata = t.schema.metadata
js = json.loads(raw_metadata[b'pandas'].decode('utf8'))
column_indexes, = js['column_indexes']
assert column_indexes['name'] == 'stringz'
assert column_indexes['name'] == column_indexes['field_name']
assert column_indexes['pandas_type'] == ('bytes' if PY2 else 'unicode')
assert column_indexes['numpy_type'] == 'object'
md = column_indexes['metadata']
if not PY2:
assert len(md) == 1
assert md['encoding'] == 'UTF-8'
else:
assert md is None or 'encoding' not in md
def test_datetimetz_column_index(self):
df = pd.DataFrame(
[(1, 'a', 2.0), (2, 'b', 3.0), (3, 'c', 4.0)],
columns=pd.date_range(
start='2017-01-01', periods=3, tz='America/New_York'
)
)
t = pa.Table.from_pandas(df, preserve_index=True)
raw_metadata = t.schema.metadata
js = json.loads(raw_metadata[b'pandas'].decode('utf8'))
column_indexes, = js['column_indexes']
assert column_indexes['name'] is None
assert column_indexes['pandas_type'] == 'datetimetz'
assert column_indexes['numpy_type'] == 'datetime64[ns]'
md = column_indexes['metadata']
assert md['timezone'] == 'America/New_York'
def test_datetimetz_row_index(self):
df = pd.DataFrame({
'a': pd.date_range(
start='2017-01-01', periods=3, tz='America/New_York'
)
})
df = df.set_index('a')
_check_pandas_roundtrip(df, preserve_index=True)
def test_categorical_row_index(self):
df = pd.DataFrame({'a': [1, 2, 3], 'b': [1, 2, 3]})
df['a'] = df.a.astype('category')
df = df.set_index('a')
_check_pandas_roundtrip(df, preserve_index=True)
def test_duplicate_column_names_does_not_crash(self):
df = pd.DataFrame([(1, 'a'), (2, 'b')], columns=list('aa'))
with pytest.raises(ValueError):
pa.Table.from_pandas(df)
def test_dictionary_indices_boundscheck(self):
# ARROW-1658. No validation of indices leads to segfaults in pandas
indices = [[0, 1], [0, -1]]
for inds in indices:
arr = pa.DictionaryArray.from_arrays(inds, ['a'], safe=False)
batch = pa.RecordBatch.from_arrays([arr], ['foo'])
table = pa.Table.from_batches([batch, batch, batch])
with pytest.raises(pa.ArrowInvalid):
arr.to_pandas()
with pytest.raises(pa.ArrowInvalid):
table.to_pandas()
def test_unicode_with_unicode_column_and_index(self):
df = pd.DataFrame({u'あ': [u'い']}, index=[u'う'])
_check_pandas_roundtrip(df, preserve_index=True)
def test_mixed_unicode_column_names(self):
df = pd.DataFrame({u'あ': [u'い'], b'a': 1}, index=[u'う'])
# TODO(phillipc): Should this raise?
with pytest.raises(AssertionError):
_check_pandas_roundtrip(df, preserve_index=True)
def test_binary_column_name(self):
column_data = [u'い']
key = u'あ'.encode('utf8')
data = {key: column_data}
df = pd.DataFrame(data)
# we can't use _check_pandas_roundtrip here because our metdata
# is always decoded as utf8: even if binary goes in, utf8 comes out
t = pa.Table.from_pandas(df, preserve_index=True)
df2 = t.to_pandas()
assert df.values[0] == df2.values[0]
assert df.index.values[0] == df2.index.values[0]
assert df.columns[0] == key
def test_multiindex_duplicate_values(self):
num_rows = 3
numbers = list(range(num_rows))
index = pd.MultiIndex.from_arrays(
[['foo', 'foo', 'bar'], numbers],
names=['foobar', 'some_numbers'],
)
df = pd.DataFrame({'numbers': numbers}, index=index)
table = pa.Table.from_pandas(df)
result_df = table.to_pandas()
tm.assert_frame_equal(result_df, df)
def test_metadata_with_mixed_types(self):
df = pd.DataFrame({'data': [b'some_bytes', u'some_unicode']})
table = pa.Table.from_pandas(df)
metadata = table.schema.metadata
assert b'mixed' not in metadata[b'pandas']
js = json.loads(metadata[b'pandas'].decode('utf8'))
data_column = js['columns'][0]
assert data_column['pandas_type'] == 'bytes'
assert data_column['numpy_type'] == 'object'
def test_list_metadata(self):
df = pd.DataFrame({'data': [[1], [2, 3, 4], [5] * 7]})
schema = pa.schema([pa.field('data', type=pa.list_(pa.int64()))])
table = pa.Table.from_pandas(df, schema=schema)
metadata = table.schema.metadata
assert b'mixed' not in metadata[b'pandas']
js = json.loads(metadata[b'pandas'].decode('utf8'))
data_column = js['columns'][0]
assert data_column['pandas_type'] == 'list[int64]'
assert data_column['numpy_type'] == 'object'
def test_decimal_metadata(self):
expected = pd.DataFrame({
'decimals': [
decimal.Decimal('394092382910493.12341234678'),
-decimal.Decimal('314292388910493.12343437128'),
]
})
table = pa.Table.from_pandas(expected)
metadata = table.schema.metadata
assert b'mixed' not in metadata[b'pandas']
js = json.loads(metadata[b'pandas'].decode('utf8'))
data_column = js['columns'][0]
assert data_column['pandas_type'] == 'decimal'
assert data_column['numpy_type'] == 'object'
assert data_column['metadata'] == {'precision': 26, 'scale': 11}
def test_table_column_subset_metadata(self):
# ARROW-1883
df = pd.DataFrame({
'a': [1, 2, 3],
'b': pd.date_range("2017-01-01", periods=3, tz='Europe/Brussels')})
table = pa.Table.from_pandas(df)
table_subset = table.remove_column(1)
result = table_subset.to_pandas()
tm.assert_frame_equal(result, df[['a']])
table_subset2 = table_subset.remove_column(1)
result = table_subset2.to_pandas()
tm.assert_frame_equal(result, df[['a']])
# non-default index
for index in [
pd.Index(['a', 'b', 'c'], name='index'),
pd.date_range("2017-01-01", periods=3, tz='Europe/Brussels')]:
df = pd.DataFrame({'a': [1, 2, 3],
'b': [.1, .2, .3]}, index=index)
table = pa.Table.from_pandas(df)
table_subset = table.remove_column(1)
result = table_subset.to_pandas()
tm.assert_frame_equal(result, df[['a']])
table_subset2 = table_subset.remove_column(1)
result = table_subset2.to_pandas()
tm.assert_frame_equal(result, df[['a']].reset_index(drop=True))
def test_empty_list_metadata(self):
# Create table with array of empty lists, forced to have type
# list(string) in pyarrow
c1 = [["test"], ["a", "b"], None]
c2 = [[], [], []]
arrays = OrderedDict([
('c1', pa.array(c1, type=pa.list_(pa.string()))),
('c2', pa.array(c2, type=pa.list_(pa.string()))),
])
rb = pa.RecordBatch.from_arrays(
list(arrays.values()),
list(arrays.keys())
)
tbl = pa.Table.from_batches([rb])
# First roundtrip changes schema, because pandas cannot preserve the
# type of empty lists
df = tbl.to_pandas()
tbl2 = pa.Table.from_pandas(df, preserve_index=True)
md2 = json.loads(tbl2.schema.metadata[b'pandas'].decode('utf8'))
# Second roundtrip
df2 = tbl2.to_pandas()
expected = pd.DataFrame(OrderedDict([('c1', c1), ('c2', c2)]))
tm.assert_frame_equal(df2, expected)
assert md2['columns'] == [
{
'name': 'c1',
'field_name': 'c1',
'metadata': None,
'numpy_type': 'object',
'pandas_type': 'list[unicode]',
},
{
'name': 'c2',
'field_name': 'c2',
'metadata': None,
'numpy_type': 'object',
'pandas_type': 'list[empty]',
},
{
'name': None,
'field_name': '__index_level_0__',
'metadata': None,
'numpy_type': 'int64',
'pandas_type': 'int64',
}
]
class TestConvertPrimitiveTypes(object):
"""
Conversion tests for primitive (e.g. numeric) types.
"""
def test_float_no_nulls(self):
data = {}
fields = []
dtypes = [('f2', pa.float16()),
('f4', pa.float32()),
('f8', pa.float64())]
num_values = 100
for numpy_dtype, arrow_dtype in dtypes:
values = np.random.randn(num_values)
data[numpy_dtype] = values.astype(numpy_dtype)
fields.append(pa.field(numpy_dtype, arrow_dtype))
df = pd.DataFrame(data)
schema = pa.schema(fields)
_check_pandas_roundtrip(df, expected_schema=schema)
def test_float_nulls(self):
num_values = 100
null_mask = np.random.randint(0, 10, size=num_values) < 3
dtypes = [('f2', pa.float16()),
('f4', pa.float32()),
('f8', pa.float64())]
names = ['f2', 'f4', 'f8']
expected_cols = []
arrays = []
fields = []
for name, arrow_dtype in dtypes:
values = np.random.randn(num_values).astype(name)
arr = pa.array(values, from_pandas=True, mask=null_mask)
arrays.append(arr)
fields.append(pa.field(name, arrow_dtype))
values[null_mask] = np.nan
expected_cols.append(values)
ex_frame = pd.DataFrame(dict(zip(names, expected_cols)),
columns=names)
table = pa.Table.from_arrays(arrays, names)
assert table.schema.equals(pa.schema(fields))
result = table.to_pandas()
tm.assert_frame_equal(result, ex_frame)
def test_float_nulls_to_ints(self):
# ARROW-2135
df = pd.DataFrame({"a": [1.0, 2.0, pd.np.NaN]})
schema = pa.schema([pa.field("a", pa.int16(), nullable=True)])
table = pa.Table.from_pandas(df, schema=schema, safe=False)
assert table[0].to_pylist() == [1, 2, None]
tm.assert_frame_equal(df, table.to_pandas())
def test_integer_no_nulls(self):
data = OrderedDict()
fields = []
numpy_dtypes = [
('i1', pa.int8()), ('i2', pa.int16()),
('i4', pa.int32()), ('i8', pa.int64()),
('u1', pa.uint8()), ('u2', pa.uint16()),
('u4', pa.uint32()), ('u8', pa.uint64()),
('longlong', pa.int64()), ('ulonglong', pa.uint64())
]
num_values = 100
for dtype, arrow_dtype in numpy_dtypes:
info = np.iinfo(dtype)
values = np.random.randint(max(info.min, np.iinfo(np.int_).min),
min(info.max, np.iinfo(np.int_).max),
size=num_values)
data[dtype] = values.astype(dtype)
fields.append(pa.field(dtype, arrow_dtype))
df = pd.DataFrame(data)
schema = pa.schema(fields)
_check_pandas_roundtrip(df, expected_schema=schema)
def test_all_integer_types(self):
# Test all Numpy integer aliases
data = OrderedDict()
numpy_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8',
'byte', 'ubyte', 'short', 'ushort', 'intc', 'uintc',
'int_', 'uint', 'longlong', 'ulonglong']
for dtype in numpy_dtypes:
data[dtype] = np.arange(12, dtype=dtype)
df = pd.DataFrame(data)
_check_pandas_roundtrip(df)
# Do the same with pa.array()
# (for some reason, it doesn't use the same code paths at all)
for np_arr in data.values():
arr = pa.array(np_arr)
assert arr.to_pylist() == np_arr.tolist()
def test_integer_with_nulls(self):
# pandas requires upcast to float dtype
int_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8']
num_values = 100
null_mask = np.random.randint(0, 10, size=num_values) < 3
expected_cols = []
arrays = []
for name in int_dtypes:
values = np.random.randint(0, 100, size=num_values)
arr = pa.array(values, mask=null_mask)
arrays.append(arr)
expected = values.astype('f8')
expected[null_mask] = np.nan
expected_cols.append(expected)
ex_frame = pd.DataFrame(dict(zip(int_dtypes, expected_cols)),
columns=int_dtypes)
table = pa.Table.from_arrays(arrays, int_dtypes)
result = table.to_pandas()
tm.assert_frame_equal(result, ex_frame)
def test_array_from_pandas_type_cast(self):
arr = np.arange(10, dtype='int64')
target_type = pa.int8()
result = pa.array(arr, type=target_type)
expected = pa.array(arr.astype('int8'))
assert result.equals(expected)
def test_boolean_no_nulls(self):
num_values = 100
np.random.seed(0)
df = pd.DataFrame({'bools': np.random.randn(num_values) > 0})
field = pa.field('bools', pa.bool_())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected_schema=schema)
def test_boolean_nulls(self):
# pandas requires upcast to object dtype
num_values = 100
np.random.seed(0)
mask = np.random.randint(0, 10, size=num_values) < 3
values = np.random.randint(0, 10, size=num_values) < 5
arr = pa.array(values, mask=mask)
expected = values.astype(object)
expected[mask] = None
field = pa.field('bools', pa.bool_())
schema = pa.schema([field])
ex_frame = pd.DataFrame({'bools': expected})
table = pa.Table.from_arrays([arr], ['bools'])
assert table.schema.equals(schema)
result = table.to_pandas()
tm.assert_frame_equal(result, ex_frame)
def test_float_object_nulls(self):
arr = np.array([None, 1.5, np.float64(3.5)] * 5, dtype=object)
df = pd.DataFrame({'floats': arr})
expected = pd.DataFrame({'floats': pd.to_numeric(arr)})
field = pa.field('floats', pa.float64())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected=expected,
expected_schema=schema)
def test_int_object_nulls(self):
arr = np.array([None, 1, np.int64(3)] * 5, dtype=object)
df = pd.DataFrame({'ints': arr})
expected = pd.DataFrame({'ints': pd.to_numeric(arr)})
field = pa.field('ints', pa.int64())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected=expected,
expected_schema=schema)
def test_boolean_object_nulls(self):
arr = np.array([False, None, True] * 100, dtype=object)
df = pd.DataFrame({'bools': arr})
field = pa.field('bools', pa.bool_())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected_schema=schema)
def test_all_nulls_cast_numeric(self):
arr = np.array([None], dtype=object)
def _check_type(t):
a2 = pa.array(arr, type=t)
assert a2.type == t
assert a2[0].as_py() is None
_check_type(pa.int32())
_check_type(pa.float64())
def test_half_floats_from_numpy(self):
arr = np.array([1.5, np.nan], dtype=np.float16)
a = pa.array(arr, type=pa.float16())
x, y = a.to_pylist()
assert isinstance(x, np.float16)
assert x == 1.5
assert isinstance(y, np.float16)
assert np.isnan(y)
a = pa.array(arr, type=pa.float16(), from_pandas=True)
x, y = a.to_pylist()
assert isinstance(x, np.float16)
assert x == 1.5
assert y is None
@pytest.mark.parametrize('dtype',
['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8'])
def test_array_integer_object_nulls_option(dtype):
num_values = 100
null_mask = np.random.randint(0, 10, size=num_values) < 3
values = np.random.randint(0, 100, size=num_values, dtype=dtype)
array = pa.array(values, mask=null_mask)
if null_mask.any():
expected = values.astype('O')
expected[null_mask] = None
else:
expected = values
result = array.to_pandas(integer_object_nulls=True)
np.testing.assert_equal(result, expected)
@pytest.mark.parametrize('dtype',
['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8'])
def test_table_integer_object_nulls_option(dtype):
num_values = 100
null_mask = np.random.randint(0, 10, size=num_values) < 3
values = np.random.randint(0, 100, size=num_values, dtype=dtype)
array = pa.array(values, mask=null_mask)
if null_mask.any():
expected = values.astype('O')
expected[null_mask] = None
else:
expected = values
expected = pd.DataFrame({dtype: expected})
table = pa.Table.from_arrays([array], [dtype])
result = table.to_pandas(integer_object_nulls=True)
tm.assert_frame_equal(result, expected)
class TestConvertDateTimeLikeTypes(object):
"""
Conversion tests for datetime- and timestamp-like types (date64, etc.).
"""
def test_timestamps_notimezone_no_nulls(self):
df = pd.DataFrame({
'datetime64': np.array([
'2007-07-13T01:23:34.123456789',
'2006-01-13T12:34:56.432539784',
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ns]')
})
field = pa.field('datetime64', pa.timestamp('ns'))
schema = pa.schema([field])
_check_pandas_roundtrip(
df,
expected_schema=schema,
)
def test_timestamps_notimezone_nulls(self):
df = pd.DataFrame({
'datetime64': np.array([
'2007-07-13T01:23:34.123456789',
None,
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ns]')
})
field = pa.field('datetime64', pa.timestamp('ns'))
schema = pa.schema([field])
_check_pandas_roundtrip(
df,
expected_schema=schema,
)
def test_timestamps_with_timezone(self):
df = pd.DataFrame({
'datetime64': np.array([
'2007-07-13T01:23:34.123',
'2006-01-13T12:34:56.432',
'2010-08-13T05:46:57.437'],
dtype='datetime64[ms]')
})
df['datetime64'] = (df['datetime64'].dt.tz_localize('US/Eastern')
.to_frame())
_check_pandas_roundtrip(df)
_check_series_roundtrip(df['datetime64'])
# drop-in a null and ns instead of ms
df = pd.DataFrame({
'datetime64': np.array([
'2007-07-13T01:23:34.123456789',
None,
'2006-01-13T12:34:56.432539784',
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ns]')
})
df['datetime64'] = (df['datetime64'].dt.tz_localize('US/Eastern')
.to_frame())
_check_pandas_roundtrip(df)
def test_python_datetime(self):
# ARROW-2106
date_array = [datetime.today() + timedelta(days=x) for x in range(10)]
df = pd.DataFrame({
'datetime': pd.Series(date_array, dtype=object)
})
table = pa.Table.from_pandas(df)
assert isinstance(table[0].data.chunk(0), pa.TimestampArray)
result = table.to_pandas()
expected_df = pd.DataFrame({
'datetime': date_array
})
tm.assert_frame_equal(expected_df, result)
def test_python_datetime_subclass(self):
class MyDatetime(datetime):
# see https://github.com/pandas-dev/pandas/issues/21142
nanosecond = 0.0
date_array = [MyDatetime(2000, 1, 1, 1, 1, 1)]
df = pd.DataFrame({"datetime": pd.Series(date_array, dtype=object)})
table = pa.Table.from_pandas(df)
assert isinstance(table[0].data.chunk(0), pa.TimestampArray)
result = table.to_pandas()
expected_df = pd.DataFrame({"datetime": date_array})
# https://github.com/pandas-dev/pandas/issues/21142
expected_df["datetime"] = pd.to_datetime(expected_df["datetime"])
tm.assert_frame_equal(expected_df, result)
def test_python_date_subclass(self):
class MyDate(date):
pass
date_array = [MyDate(2000, 1, 1)]
df = pd.DataFrame({"date": pd.Series(date_array, dtype=object)})
table = pa.Table.from_pandas(df)
assert isinstance(table[0].data.chunk(0), pa.Date32Array)
result = table.to_pandas()
expected_df = pd.DataFrame(
{"date": np.array(["2000-01-01"], dtype="datetime64[ns]")}
)
tm.assert_frame_equal(expected_df, result)
def test_datetime64_to_date32(self):
# ARROW-1718
arr = pa.array([date(2017, 10, 23), None])
c = pa.Column.from_array("d", arr)
s = c.to_pandas()
arr2 = pa.Array.from_pandas(s, type=pa.date32())
assert arr2.equals(arr.cast('date32'))
@pytest.mark.parametrize('mask', [
None,
np.array([True, False, False]),
])
def test_pandas_datetime_to_date64(self, mask):
s = pd.to_datetime([
'2018-05-10T00:00:00',
'2018-05-11T00:00:00',
'2018-05-12T00:00:00',
])
arr = pa.Array.from_pandas(s, type=pa.date64(), mask=mask)
data = np.array([
date(2018, 5, 10),
date(2018, 5, 11),
date(2018, 5, 12)
])
expected = pa.array(data, mask=mask, type=pa.date64())
assert arr.equals(expected)
@pytest.mark.parametrize('mask', [
None,
np.array([True, False, False])
])
def test_pandas_datetime_to_date64_failures(self, mask):
s = pd.to_datetime([
'2018-05-10T10:24:01',
'2018-05-11T10:24:01',
'2018-05-12T10:24:01',
])
expected_msg = 'Timestamp value had non-zero intraday milliseconds'
with pytest.raises(pa.ArrowInvalid, match=expected_msg):
pa.Array.from_pandas(s, type=pa.date64(), mask=mask)
def test_array_date_as_object(self):
data = [date(2000, 1, 1),
None,
date(1970, 1, 1),
date(2040, 2, 26)]
expected = np.array(['2000-01-01',
None,
'1970-01-01',
'2040-02-26'], dtype='datetime64')
arr = pa.array(data)
assert arr.equals(pa.array(expected))
result = arr.to_pandas()
assert result.dtype == expected.dtype
npt.assert_array_equal(arr.to_pandas(), expected)
result = arr.to_pandas(date_as_object=True)
expected = expected.astype(object)
assert result.dtype == expected.dtype
npt.assert_array_equal(result, expected)
def test_chunked_array_convert_date_as_object(self):
data = [date(2000, 1, 1),
None,
date(1970, 1, 1),
date(2040, 2, 26)]
expected = np.array(['2000-01-01',
None,
'1970-01-01',
'2040-02-26'], dtype='datetime64')
carr = pa.chunked_array([data])
result = carr.to_pandas()
assert result.dtype == expected.dtype
npt.assert_array_equal(carr.to_pandas(), expected)
result = carr.to_pandas(date_as_object=True)
expected = expected.astype(object)
assert result.dtype == expected.dtype
npt.assert_array_equal(result, expected)
def test_column_convert_date_as_object(self):
data = [date(2000, 1, 1),
None,
date(1970, 1, 1),
date(2040, 2, 26)]
expected = np.array(['2000-01-01',
None,
'1970-01-01',
'2040-02-26'], dtype='datetime64')
arr = pa.array(data)
column = pa.column('date', arr)
result = column.to_pandas()
npt.assert_array_equal(column.to_pandas(), expected)
result = column.to_pandas(date_as_object=True)
expected = expected.astype(object)
assert result.dtype == expected.dtype
npt.assert_array_equal(result, expected)
def test_table_convert_date_as_object(self):
df = pd.DataFrame({
'date': [date(2000, 1, 1),
None,
date(1970, 1, 1),
date(2040, 2, 26)]})
table = pa.Table.from_pandas(df, preserve_index=False)
df_datetime = table.to_pandas()
df_object = table.to_pandas(date_as_object=True)
tm.assert_frame_equal(df.astype('datetime64[ns]'), df_datetime,
check_dtype=True)
tm.assert_frame_equal(df, df_object, check_dtype=True)
def test_date_infer(self):
df = pd.DataFrame({
'date': [date(2000, 1, 1),
None,
date(1970, 1, 1),
date(2040, 2, 26)]})
table = pa.Table.from_pandas(df, preserve_index=False)
field = pa.field('date', pa.date32())
# schema's metadata is generated by from_pandas conversion
expected_schema = pa.schema([field], metadata=table.schema.metadata)
assert table.schema.equals(expected_schema)
result = table.to_pandas()
expected = df.copy()
expected['date'] = pd.to_datetime(df['date'])
tm.assert_frame_equal(result, expected)
def test_date_mask(self):
arr = np.array([date(2017, 4, 3), date(2017, 4, 4)],
dtype='datetime64[D]')
mask = [True, False]
result = pa.array(arr, mask=np.array(mask))
expected = np.array([None, date(2017, 4, 4)], dtype='datetime64[D]')
expected = pa.array(expected, from_pandas=True)
assert expected.equals(result)
def test_date_objects_typed(self):
arr = np.array([
date(2017, 4, 3),
None,
date(2017, 4, 4),
date(2017, 4, 5)], dtype=object)
arr_i4 = np.array([17259, -1, 17260, 17261], dtype='int32')
arr_i8 = arr_i4.astype('int64') * 86400000
mask = np.array([False, True, False, False])
t32 = pa.date32()
t64 = pa.date64()
a32 = pa.array(arr, type=t32)
a64 = pa.array(arr, type=t64)
a32_expected = pa.array(arr_i4, mask=mask, type=t32)
a64_expected = pa.array(arr_i8, mask=mask, type=t64)
assert a32.equals(a32_expected)
assert a64.equals(a64_expected)
# Test converting back to pandas
colnames = ['date32', 'date64']
table = pa.Table.from_arrays([a32, a64], colnames)
table_pandas = table.to_pandas()
ex_values = (np.array(['2017-04-03', '2017-04-04', '2017-04-04',
'2017-04-05'],
dtype='datetime64[D]')
.astype('datetime64[ns]'))
ex_values[1] = pd.NaT.value
expected_pandas = pd.DataFrame({'date32': ex_values,
'date64': ex_values},
columns=colnames)
tm.assert_frame_equal(table_pandas, expected_pandas)
def test_dates_from_integers(self):
t1 = pa.date32()
t2 = pa.date64()
arr = np.array([17259, 17260, 17261], dtype='int32')
arr2 = arr.astype('int64') * 86400000
a1 = pa.array(arr, type=t1)
a2 = pa.array(arr2, type=t2)
expected = date(2017, 4, 3)
assert a1[0].as_py() == expected
assert a2[0].as_py() == expected
@pytest.mark.xfail(reason="not supported ATM",
raises=NotImplementedError)
def test_timedelta(self):
# TODO(jreback): Pandas only support ns resolution
# Arrow supports ??? for resolution
df = pd.DataFrame({
'timedelta': np.arange(start=0, stop=3 * 86400000,
step=86400000,
dtype='timedelta64[ms]')
})
pa.Table.from_pandas(df)
def test_pytime_from_pandas(self):
pytimes = [time(1, 2, 3, 1356),
time(4, 5, 6, 1356)]
# microseconds
t1 = pa.time64('us')
aobjs = np.array(pytimes + [None], dtype=object)
parr = pa.array(aobjs)
assert parr.type == t1
assert parr[0].as_py() == pytimes[0]
assert parr[1].as_py() == pytimes[1]
assert parr[2] is pa.NA
# DataFrame
df = pd.DataFrame({'times': aobjs})
batch = pa.RecordBatch.from_pandas(df)
assert batch[0].equals(parr)
# Test ndarray of int64 values
arr = np.array([_pytime_to_micros(v) for v in pytimes],
dtype='int64')
a1 = pa.array(arr, type=pa.time64('us'))
assert a1[0].as_py() == pytimes[0]
a2 = pa.array(arr * 1000, type=pa.time64('ns'))
assert a2[0].as_py() == pytimes[0]
a3 = pa.array((arr / 1000).astype('i4'),
type=pa.time32('ms'))
assert a3[0].as_py() == pytimes[0].replace(microsecond=1000)
a4 = pa.array((arr / 1000000).astype('i4'),
type=pa.time32('s'))
assert a4[0].as_py() == pytimes[0].replace(microsecond=0)
def test_arrow_time_to_pandas(self):
pytimes = [time(1, 2, 3, 1356),
time(4, 5, 6, 1356),
time(0, 0, 0)]
expected = np.array(pytimes[:2] + [None])
expected_ms = np.array([x.replace(microsecond=1000)
for x in pytimes[:2]] +
[None])
expected_s = np.array([x.replace(microsecond=0)
for x in pytimes[:2]] +
[None])
arr = np.array([_pytime_to_micros(v) for v in pytimes],
dtype='int64')
arr = np.array([_pytime_to_micros(v) for v in pytimes],
dtype='int64')
null_mask = np.array([False, False, True], dtype=bool)
a1 = pa.array(arr, mask=null_mask, type=pa.time64('us'))
a2 = pa.array(arr * 1000, mask=null_mask,
type=pa.time64('ns'))
a3 = pa.array((arr / 1000).astype('i4'), mask=null_mask,
type=pa.time32('ms'))
a4 = pa.array((arr / 1000000).astype('i4'), mask=null_mask,
type=pa.time32('s'))
names = ['time64[us]', 'time64[ns]', 'time32[ms]', 'time32[s]']
batch = pa.RecordBatch.from_arrays([a1, a2, a3, a4], names)
arr = a1.to_pandas()
assert (arr == expected).all()
arr = a2.to_pandas()
assert (arr == expected).all()
arr = a3.to_pandas()
assert (arr == expected_ms).all()
arr = a4.to_pandas()
assert (arr == expected_s).all()
df = batch.to_pandas()
expected_df = pd.DataFrame({'time64[us]': expected,
'time64[ns]': expected,
'time32[ms]': expected_ms,
'time32[s]': expected_s},
columns=names)
tm.assert_frame_equal(df, expected_df)
def test_numpy_datetime64_columns(self):
datetime64_ns = np.array([
'2007-07-13T01:23:34.123456789',
None,
'2006-01-13T12:34:56.432539784',
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ns]')
_check_array_from_pandas_roundtrip(datetime64_ns)
datetime64_us = np.array([
'2007-07-13T01:23:34.123456',
None,
'2006-01-13T12:34:56.432539',
'2010-08-13T05:46:57.437699'],
dtype='datetime64[us]')
_check_array_from_pandas_roundtrip(datetime64_us)
datetime64_ms = np.array([
'2007-07-13T01:23:34.123',
None,
'2006-01-13T12:34:56.432',
'2010-08-13T05:46:57.437'],
dtype='datetime64[ms]')
_check_array_from_pandas_roundtrip(datetime64_ms)
datetime64_s = np.array([
'2007-07-13T01:23:34',
None,
'2006-01-13T12:34:56',
'2010-08-13T05:46:57'],
dtype='datetime64[s]')
_check_array_from_pandas_roundtrip(datetime64_s)
@pytest.mark.parametrize('dtype', [pa.date32(), pa.date64()])
def test_numpy_datetime64_day_unit(self, dtype):
datetime64_d = np.array([
'2007-07-13',
None,
'2006-01-15',
'2010-08-19'],
dtype='datetime64[D]')
_check_array_from_pandas_roundtrip(datetime64_d, type=dtype)
def test_array_from_pandas_date_with_mask(self):
m = np.array([True, False, True])
data = pd.Series([
date(1990, 1, 1),
date(1991, 1, 1),
date(1992, 1, 1)
])
result = pa.Array.from_pandas(data, mask=m)
expected = pd.Series([None, date(1991, 1, 1), None])
assert pa.Array.from_pandas(expected).equals(result)
def test_fixed_offset_timezone(self):
df = pd.DataFrame({
'a': [
pd.Timestamp('2012-11-11 00:00:00+01:00'),
pd.NaT
]
})
_check_pandas_roundtrip(df)
_check_serialize_components_roundtrip(df)
# ----------------------------------------------------------------------
# Conversion tests for string and binary types.
class TestConvertStringLikeTypes(object):
def test_pandas_unicode(self):
repeats = 1000
values = [u'foo', None, u'bar', u'mañana', np.nan]
df = pd.DataFrame({'strings': values * repeats})
field = pa.field('strings', pa.string())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected_schema=schema)
def test_bytes_to_binary(self):
values = [u'qux', b'foo', None, bytearray(b'barz'), 'qux', np.nan]
df = pd.DataFrame({'strings': values})
table = pa.Table.from_pandas(df)
assert table[0].type == pa.binary()
values2 = [b'qux', b'foo', None, b'barz', b'qux', np.nan]
expected = pd.DataFrame({'strings': values2})
_check_pandas_roundtrip(df, expected)
@pytest.mark.large_memory
def test_bytes_exceed_2gb(self):
v1 = b'x' * 100000000
v2 = b'x' * 147483646
# ARROW-2227, hit exactly 2GB on the nose
df = pd.DataFrame({
'strings': [v1] * 20 + [v2] + ['x'] * 20
})
arr = pa.array(df['strings'])
assert isinstance(arr, pa.ChunkedArray)
assert arr.num_chunks == 2
arr = None
table = pa.Table.from_pandas(df)
assert table[0].data.num_chunks == 2
def test_fixed_size_bytes(self):
values = [b'foo', None, bytearray(b'bar'), None, None, b'hey']
df = pd.DataFrame({'strings': values})
schema = pa.schema([pa.field('strings', pa.binary(3))])
table = pa.Table.from_pandas(df, schema=schema)
assert table.schema[0].type == schema[0].type
assert table.schema[0].name == schema[0].name
result = table.to_pandas()
tm.assert_frame_equal(result, df)
def test_fixed_size_bytes_does_not_accept_varying_lengths(self):
values = [b'foo', None, b'ba', None, None, b'hey']
df = pd.DataFrame({'strings': values})
schema = pa.schema([pa.field('strings', pa.binary(3))])
with pytest.raises(pa.ArrowInvalid):
pa.Table.from_pandas(df, schema=schema)
def test_variable_size_bytes(self):
s = pd.Series([b'123', b'', b'a', None])
_check_series_roundtrip(s, type_=pa.binary())
def test_binary_from_bytearray(self):
s = pd.Series([bytearray(b'123'), bytearray(b''), bytearray(b'a'),
None])
# Explicitly set type
_check_series_roundtrip(s, type_=pa.binary())
# Infer type from bytearrays
_check_series_roundtrip(s, expected_pa_type=pa.binary())
def test_table_empty_str(self):
values = ['', '', '', '', '']
df = pd.DataFrame({'strings': values})
field = pa.field('strings', pa.string())
schema = pa.schema([field])
table = pa.Table.from_pandas(df, schema=schema)
result1 = table.to_pandas(strings_to_categorical=False)
expected1 = pd.DataFrame({'strings': values})
tm.assert_frame_equal(result1, expected1, check_dtype=True)
result2 = table.to_pandas(strings_to_categorical=True)
expected2 = pd.DataFrame({'strings': pd.Categorical(values)})
tm.assert_frame_equal(result2, expected2, check_dtype=True)
def test_selective_categoricals(self):
values = ['', '', '', '', '']
df = pd.DataFrame({'strings': values})
field = pa.field('strings', pa.string())
schema = pa.schema([field])
table = pa.Table.from_pandas(df, schema=schema)
expected_str = pd.DataFrame({'strings': values})
expected_cat = pd.DataFrame({'strings': pd.Categorical(values)})
result1 = table.to_pandas(categories=['strings'])
tm.assert_frame_equal(result1, expected_cat, check_dtype=True)
result2 = table.to_pandas(categories=[])
tm.assert_frame_equal(result2, expected_str, check_dtype=True)
result3 = table.to_pandas(categories=('strings',))
tm.assert_frame_equal(result3, expected_cat, check_dtype=True)
result4 = table.to_pandas(categories=tuple())
tm.assert_frame_equal(result4, expected_str, check_dtype=True)
def test_table_str_to_categorical_without_na(self):
values = ['a', 'a', 'b', 'b', 'c']
df = pd.DataFrame({'strings': values})
field = pa.field('strings', pa.string())
schema = pa.schema([field])
table = pa.Table.from_pandas(df, schema=schema)
result = table.to_pandas(strings_to_categorical=True)
expected = pd.DataFrame({'strings': pd.Categorical(values)})
tm.assert_frame_equal(result, expected, check_dtype=True)
with pytest.raises(pa.ArrowInvalid):
table.to_pandas(strings_to_categorical=True,
zero_copy_only=True)
def test_table_str_to_categorical_with_na(self):
values = [None, 'a', 'b', np.nan]
df = pd.DataFrame({'strings': values})
field = pa.field('strings', pa.string())
schema = pa.schema([field])
table = pa.Table.from_pandas(df, schema=schema)
result = table.to_pandas(strings_to_categorical=True)
expected = pd.DataFrame({'strings': pd.Categorical(values)})
tm.assert_frame_equal(result, expected, check_dtype=True)
with pytest.raises(pa.ArrowInvalid):
table.to_pandas(strings_to_categorical=True,
zero_copy_only=True)
# Regression test for ARROW-2101
def test_array_of_bytes_to_strings(self):
converted = pa.array(np.array([b'x'], dtype=object), pa.string())
assert converted.type == pa.string()
# Make sure that if an ndarray of bytes is passed to the array
# constructor and the type is string, it will fail if those bytes
# cannot be converted to utf-8
def test_array_of_bytes_to_strings_bad_data(self):
with pytest.raises(
pa.lib.ArrowInvalid,
match="was not a utf8 string"):
pa.array(np.array([b'\x80\x81'], dtype=object), pa.string())
def test_numpy_string_array_to_fixed_size_binary(self):
arr = np.array([b'foo', b'bar', b'baz'], dtype='|S3')
converted = pa.array(arr, type=pa.binary(3))
expected = pa.array(list(arr), type=pa.binary(3))
assert converted.equals(expected)
mask = np.array([True, False, True])
converted = pa.array(arr, type=pa.binary(3), mask=mask)
expected = pa.array([b'foo', None, b'baz'], type=pa.binary(3))
assert converted.equals(expected)
with pytest.raises(pa.lib.ArrowInvalid,
match=r'Got bytestring of length 3 \(expected 4\)'):
arr = np.array([b'foo', b'bar', b'baz'], dtype='|S3')
pa.array(arr, type=pa.binary(4))
with pytest.raises(
pa.lib.ArrowInvalid,
match=r'Got bytestring of length 12 \(expected 3\)'):
arr = np.array([b'foo', b'bar', b'baz'], dtype='|U3')
pa.array(arr, type=pa.binary(3))
class TestConvertDecimalTypes(object):
"""
Conversion test for decimal types.
"""
decimal32 = [
decimal.Decimal('-1234.123'),
decimal.Decimal('1234.439')
]
decimal64 = [
decimal.Decimal('-129934.123331'),
decimal.Decimal('129534.123731')
]
decimal128 = [
decimal.Decimal('394092382910493.12341234678'),
decimal.Decimal('-314292388910493.12343437128')
]
@pytest.mark.parametrize(('values', 'expected_type'), [
pytest.param(decimal32, pa.decimal128(7, 3), id='decimal32'),
pytest.param(decimal64, pa.decimal128(12, 6), id='decimal64'),
pytest.param(decimal128, pa.decimal128(26, 11), id='decimal128')
])
def test_decimal_from_pandas(self, values, expected_type):
expected = pd.DataFrame({'decimals': values})
table = pa.Table.from_pandas(expected, preserve_index=False)
field = pa.field('decimals', expected_type)
# schema's metadata is generated by from_pandas conversion
expected_schema = pa.schema([field], metadata=table.schema.metadata)
assert table.schema.equals(expected_schema)
@pytest.mark.parametrize('values', [
pytest.param(decimal32, id='decimal32'),
pytest.param(decimal64, id='decimal64'),
pytest.param(decimal128, id='decimal128')
])
def test_decimal_to_pandas(self, values):
expected = pd.DataFrame({'decimals': values})
converted = pa.Table.from_pandas(expected)
df = converted.to_pandas()
tm.assert_frame_equal(df, expected)
def test_decimal_fails_with_truncation(self):
data1 = [decimal.Decimal('1.234')]
type1 = pa.decimal128(10, 2)
with pytest.raises(pa.ArrowInvalid):
pa.array(data1, type=type1)
data2 = [decimal.Decimal('1.2345')]
type2 = pa.decimal128(10, 3)
with pytest.raises(pa.ArrowInvalid):
pa.array(data2, type=type2)
def test_decimal_with_different_precisions(self):
data = [
decimal.Decimal('0.01'),
decimal.Decimal('0.001'),
]
series = pd.Series(data)
array = pa.array(series)
assert array.to_pylist() == data
assert array.type == pa.decimal128(3, 3)
array = pa.array(data, type=pa.decimal128(12, 5))
expected = [decimal.Decimal('0.01000'), decimal.Decimal('0.00100')]
assert array.to_pylist() == expected
def test_decimal_with_None_explicit_type(self):
series = pd.Series([decimal.Decimal('3.14'), None])
_check_series_roundtrip(series, type_=pa.decimal128(12, 5))
# Test that having all None values still produces decimal array
series = pd.Series([None] * 2)
_check_series_roundtrip(series, type_=pa.decimal128(12, 5))
def test_decimal_with_None_infer_type(self):
series = pd.Series([decimal.Decimal('3.14'), None])
_check_series_roundtrip(series, expected_pa_type=pa.decimal128(3, 2))
def test_strided_objects(self, tmpdir):
# see ARROW-3053
data = {
'a': {0: 'a'},
'b': {0: decimal.Decimal('0.0')}
}
# This yields strided objects
df = pd.DataFrame.from_dict(data)
_check_pandas_roundtrip(df)
class TestListTypes(object):
"""
Conversion tests for list<> types.
"""
def test_column_of_arrays(self):
df, schema = dataframe_with_arrays()
_check_pandas_roundtrip(df, schema=schema, expected_schema=schema)
table = pa.Table.from_pandas(df, schema=schema, preserve_index=False)
# schema's metadata is generated by from_pandas conversion
expected_schema = schema.add_metadata(table.schema.metadata)
assert table.schema.equals(expected_schema)
for column in df.columns:
field = schema.field_by_name(column)
_check_array_roundtrip(df[column], type=field.type)
def test_column_of_arrays_to_py(self):
# Test regression in ARROW-1199 not caught in above test
dtype = 'i1'
arr = np.array([
np.arange(10, dtype=dtype),
np.arange(5, dtype=dtype),
None,
np.arange(1, dtype=dtype)
])
type_ = pa.list_(pa.int8())
parr = pa.array(arr, type=type_)
assert parr[0].as_py() == list(range(10))
assert parr[1].as_py() == list(range(5))
assert parr[2].as_py() is None
assert parr[3].as_py() == [0]
def test_column_of_lists(self):
df, schema = dataframe_with_lists()
_check_pandas_roundtrip(df, schema=schema, expected_schema=schema)
table = pa.Table.from_pandas(df, schema=schema, preserve_index=False)
# schema's metadata is generated by from_pandas conversion
expected_schema = schema.add_metadata(table.schema.metadata)
assert table.schema.equals(expected_schema)
for column in df.columns:
field = schema.field_by_name(column)
_check_array_roundtrip(df[column], type=field.type)
def test_column_of_lists_first_empty(self):
# ARROW-2124
num_lists = [[], [2, 3, 4], [3, 6, 7, 8], [], [2]]
series = pd.Series([np.array(s, dtype=float) for s in num_lists])
arr = pa.array(series)
result = pd.Series(arr.to_pandas())
tm.assert_series_equal(result, series)
def test_column_of_lists_chunked(self):
# ARROW-1357
df = pd.DataFrame({
'lists': np.array([
[1, 2],
None,
[2, 3],
[4, 5],
[6, 7],
[8, 9]
], dtype=object)
})
schema = pa.schema([
pa.field('lists', pa.list_(pa.int64()))
])
t1 = pa.Table.from_pandas(df[:2], schema=schema)
t2 = pa.Table.from_pandas(df[2:], schema=schema)
table = pa.concat_tables([t1, t2])
result = table.to_pandas()
tm.assert_frame_equal(result, df)
def test_column_of_lists_chunked2(self):
data1 = [[0, 1], [2, 3], [4, 5], [6, 7], [10, 11],
[12, 13], [14, 15], [16, 17]]
data2 = [[8, 9], [18, 19]]
a1 = pa.array(data1)
a2 = pa.array(data2)
t1 = pa.Table.from_arrays([a1], names=['a'])
t2 = pa.Table.from_arrays([a2], names=['a'])
concatenated = pa.concat_tables([t1, t2])
result = concatenated.to_pandas()
expected = pd.DataFrame({'a': data1 + data2})
tm.assert_frame_equal(result, expected)
def test_column_of_lists_strided(self):
df, schema = dataframe_with_lists()
df = pd.concat([df] * 6, ignore_index=True)
arr = df['int64'].values[::3]
assert arr.strides[0] != 8
_check_array_roundtrip(arr)
def test_nested_lists_all_none(self):
data = np.array([[None, None], None], dtype=object)
arr = pa.array(data)
expected = pa.array(list(data))
assert arr.equals(expected)
assert arr.type == pa.list_(pa.null())
data2 = np.array([None, None, [None, None],
np.array([None, None], dtype=object)],
dtype=object)
arr = pa.array(data2)
expected = pa.array([None, None, [None, None], [None, None]])
assert arr.equals(expected)
def test_nested_lists_all_empty(self):
# ARROW-2128
data = pd.Series([[], [], []])
arr = pa.array(data)
expected = pa.array(list(data))
assert arr.equals(expected)
assert arr.type == pa.list_(pa.null())
def test_nested_list_first_empty(self):
# ARROW-2711
data = pd.Series([[], [u"a"]])
arr = pa.array(data)
expected = pa.array(list(data))
assert arr.equals(expected)
assert arr.type == pa.list_(pa.string())
def test_nested_smaller_ints(self):
# ARROW-1345, ARROW-2008, there were some type inference bugs happening
# before
data = pd.Series([np.array([1, 2, 3], dtype='i1'), None])
result = pa.array(data)
result2 = pa.array(data.values)
expected = pa.array([[1, 2, 3], None], type=pa.list_(pa.int8()))
assert result.equals(expected)
assert result2.equals(expected)
data3 = pd.Series([np.array([1, 2, 3], dtype='f4'), None])
result3 = pa.array(data3)
expected3 = pa.array([[1, 2, 3], None], type=pa.list_(pa.float32()))
assert result3.equals(expected3)
def test_infer_lists(self):
data = OrderedDict([
('nan_ints', [[None, 1], [2, 3]]),
('ints', [[0, 1], [2, 3]]),
('strs', [[None, u'b'], [u'c', u'd']]),
('nested_strs', [[[None, u'b'], [u'c', u'd']], None])
])
df = pd.DataFrame(data)
expected_schema = pa.schema([
pa.field('nan_ints', pa.list_(pa.int64())),
pa.field('ints', pa.list_(pa.int64())),
pa.field('strs', pa.list_(pa.string())),
pa.field('nested_strs', pa.list_(pa.list_(pa.string())))
])
_check_pandas_roundtrip(df, expected_schema=expected_schema)
def test_infer_numpy_array(self):
data = OrderedDict([
('ints', [
np.array([0, 1], dtype=np.int64),
np.array([2, 3], dtype=np.int64)
])
])
df = pd.DataFrame(data)
expected_schema = pa.schema([
pa.field('ints', pa.list_(pa.int64()))
])
_check_pandas_roundtrip(df, expected_schema=expected_schema)
@pytest.mark.parametrize('t,data,expected', [
(
pa.int64,
[[1, 2], [3], None],
[None, [3], None]
),
(
pa.string,
[[u'aaa', u'bb'], [u'c'], None],
[None, [u'c'], None]
),
(
pa.null,
[[None, None], [None], None],
[None, [None], None]
)
])
def test_array_from_pandas_typed_array_with_mask(self, t, data, expected):
m = np.array([True, False, True])
s = pd.Series(data)
result = pa.Array.from_pandas(s, mask=m, type=pa.list_(t()))
assert pa.Array.from_pandas(expected,
type=pa.list_(t())).equals(result)
def test_empty_list_roundtrip(self):
empty_list_array = np.empty((3,), dtype=object)
empty_list_array.fill([])
df = pd.DataFrame({'a': np.array(['1', '2', '3']),
'b': empty_list_array})
tbl = pa.Table.from_pandas(df)
result = tbl.to_pandas()
tm.assert_frame_equal(result, df)
def test_array_from_nested_arrays(self):
df, schema = dataframe_with_arrays()
for field in schema:
arr = df[field.name].values
expected = pa.array(list(arr), type=field.type)
result = pa.array(arr)
assert result.type == field.type # == list<scalar>
assert result.equals(expected)
class TestConvertStructTypes(object):
"""
Conversion tests for struct types.
"""
def test_to_pandas(self):
ints = pa.array([None, 2, 3], type=pa.int64())
strs = pa.array([u'a', None, u'c'], type=pa.string())
bools = pa.array([True, False, None], type=pa.bool_())
arr = pa.StructArray.from_arrays(
[ints, strs, bools],
['ints', 'strs', 'bools'])
expected = pd.Series([
{'ints': None, 'strs': u'a', 'bools': True},
{'ints': 2, 'strs': None, 'bools': False},
{'ints': 3, 'strs': u'c', 'bools': None},
])
series = pd.Series(arr.to_pandas())
tm.assert_series_equal(series, expected)
def test_from_numpy(self):
dt = np.dtype([('x', np.int32),
(('y_title', 'y'), np.bool_)])
ty = pa.struct([pa.field('x', pa.int32()),
pa.field('y', pa.bool_())])
data = np.array([], dtype=dt)
arr = pa.array(data, type=ty)
assert arr.to_pylist() == []
data = np.array([(42, True), (43, False)], dtype=dt)
arr = pa.array(data, type=ty)
assert arr.to_pylist() == [{'x': 42, 'y': True},
{'x': 43, 'y': False}]
# With mask
arr = pa.array(data, mask=np.bool_([False, True]), type=ty)
assert arr.to_pylist() == [{'x': 42, 'y': True}, None]
# Trivial struct type
dt = np.dtype([])
ty = pa.struct([])
data = np.array([], dtype=dt)
arr = pa.array(data, type=ty)
assert arr.to_pylist() == []
data = np.array([(), ()], dtype=dt)
arr = pa.array(data, type=ty)
assert arr.to_pylist() == [{}, {}]
def test_from_numpy_nested(self):
dt = np.dtype([('x', np.dtype([('xx', np.int8),
('yy', np.bool_)])),
('y', np.int16)])
ty = pa.struct([pa.field('x', pa.struct([pa.field('xx', pa.int8()),
pa.field('yy', pa.bool_())])),
pa.field('y', pa.int16())])
data = np.array([], dtype=dt)
arr = pa.array(data, type=ty)
assert arr.to_pylist() == []
data = np.array([((1, True), 2), ((3, False), 4)], dtype=dt)
arr = pa.array(data, type=ty)
assert arr.to_pylist() == [{'x': {'xx': 1, 'yy': True}, 'y': 2},
{'x': {'xx': 3, 'yy': False}, 'y': 4}]
@pytest.mark.large_memory
def test_from_numpy_large(self):
# Exercise rechunking + nulls
target_size = 3 * 1024**3 # 4GB
dt = np.dtype([('x', np.float64), ('y', 'object')])
bs = 65536 - dt.itemsize
block = b'.' * bs
n = target_size // (bs + dt.itemsize)
data = np.zeros(n, dtype=dt)
data['x'] = np.random.random_sample(n)
data['y'] = block
# Add implicit nulls
data['x'][data['x'] < 0.2] = np.nan
ty = pa.struct([pa.field('x', pa.float64()),
pa.field('y', pa.binary(bs))])
arr = pa.array(data, type=ty, from_pandas=True)
assert arr.num_chunks == 2
def iter_chunked_array(arr):
for chunk in arr.iterchunks():
for item in chunk:
yield item
def check(arr, data, mask=None):
assert len(arr) == len(data)
xs = data['x']
ys = data['y']
for i, obj in enumerate(iter_chunked_array(arr)):
try:
d = obj.as_py()
if mask is not None and mask[i]:
assert d is None
else:
x = xs[i]
if np.isnan(x):
assert d['x'] is None
else:
assert d['x'] == x
assert d['y'] == ys[i]
except Exception:
print("Failed at index", i)
raise
check(arr, data)
del arr
# Now with explicit mask
mask = np.random.random_sample(n) < 0.2
arr = pa.array(data, type=ty, mask=mask, from_pandas=True)
assert arr.num_chunks == 2
check(arr, data, mask)
del arr
def test_from_numpy_bad_input(self):
ty = pa.struct([pa.field('x', pa.int32()),
pa.field('y', pa.bool_())])
dt = np.dtype([('x', np.int32),
('z', np.bool_)])
data = np.array([], dtype=dt)
with pytest.raises(TypeError,
match="Missing field 'y'"):
pa.array(data, type=ty)
data = np.int32([])
with pytest.raises(TypeError,
match="Expected struct array"):
pa.array(data, type=ty)
class TestZeroCopyConversion(object):
"""
Tests that zero-copy conversion works with some types.
"""
def test_zero_copy_success(self):
result = pa.array([0, 1, 2]).to_pandas(zero_copy_only=True)
npt.assert_array_equal(result, [0, 1, 2])
def test_zero_copy_dictionaries(self):
arr = pa.DictionaryArray.from_arrays(
np.array([0, 0]),
np.array([5]))
result = arr.to_pandas(zero_copy_only=True)
values = pd.Categorical([5, 5])
tm.assert_series_equal(pd.Series(result), pd.Series(values),
check_names=False)
def check_zero_copy_failure(self, arr):
with pytest.raises(pa.ArrowInvalid):
arr.to_pandas(zero_copy_only=True)
def test_zero_copy_failure_on_object_types(self):
self.check_zero_copy_failure(pa.array(['A', 'B', 'C']))
def test_zero_copy_failure_with_int_when_nulls(self):
self.check_zero_copy_failure(pa.array([0, 1, None]))
def test_zero_copy_failure_with_float_when_nulls(self):
self.check_zero_copy_failure(pa.array([0.0, 1.0, None]))
def test_zero_copy_failure_on_bool_types(self):
self.check_zero_copy_failure(pa.array([True, False]))
def test_zero_copy_failure_on_list_types(self):
arr = pa.array([[1, 2], [8, 9]], type=pa.list_(pa.int64()))
self.check_zero_copy_failure(arr)
def test_zero_copy_failure_on_timestamp_types(self):
arr = np.array(['2007-07-13'], dtype='datetime64[ns]')
self.check_zero_copy_failure(pa.array(arr))
# This function must be at the top-level for Python 2.7's multiprocessing
def _non_threaded_conversion():
df = _alltypes_example()
_check_pandas_roundtrip(df, use_threads=False)
_check_pandas_roundtrip(df, use_threads=False, as_batch=True)
def _threaded_conversion():
df = _alltypes_example()
_check_pandas_roundtrip(df, use_threads=True)
_check_pandas_roundtrip(df, use_threads=True, as_batch=True)
class TestConvertMisc(object):
"""
Miscellaneous conversion tests.
"""
type_pairs = [
(np.int8, pa.int8()),
(np.int16, pa.int16()),
(np.int32, pa.int32()),
(np.int64, pa.int64()),
(np.uint8, pa.uint8()),
(np.uint16, pa.uint16()),
(np.uint32, pa.uint32()),
(np.uint64, pa.uint64()),
(np.float16, pa.float16()),
(np.float32, pa.float32()),
(np.float64, pa.float64()),
# XXX unsupported
# (np.dtype([('a', 'i2')]), pa.struct([pa.field('a', pa.int16())])),
(np.object, pa.string()),
(np.object, pa.binary()),
(np.object, pa.binary(10)),
(np.object, pa.list_(pa.int64())),
]
def test_all_none_objects(self):
df = pd.DataFrame({'a': [None, None, None]})
_check_pandas_roundtrip(df)
def test_all_none_category(self):
df = pd.DataFrame({'a': [None, None, None]})
df['a'] = df['a'].astype('category')
_check_pandas_roundtrip(df)
def test_empty_arrays(self):
for dtype, pa_type in self.type_pairs:
arr = np.array([], dtype=dtype)
_check_array_roundtrip(arr, type=pa_type)
def test_non_threaded_conversion(self):
_non_threaded_conversion()
def test_threaded_conversion_multiprocess(self):
# Parallel conversion should work from child processes too (ARROW-2963)
pool = mp.Pool(2)
try:
pool.apply(_threaded_conversion)
finally:
pool.close()
pool.join()
def test_category(self):
repeats = 5
v1 = ['foo', None, 'bar', 'qux', np.nan]
v2 = [4, 5, 6, 7, 8]
v3 = [b'foo', None, b'bar', b'qux', np.nan]
arrays = {
'cat_strings': pd.Categorical(v1 * repeats),
'cat_strings_with_na': pd.Categorical(v1 * repeats,
categories=['foo', 'bar']),
'cat_ints': pd.Categorical(v2 * repeats),
'cat_binary': pd.Categorical(v3 * repeats),
'cat_strings_ordered': pd.Categorical(
v1 * repeats, categories=['bar', 'qux', 'foo'],
ordered=True),
'ints': v2 * repeats,
'ints2': v2 * repeats,
'strings': v1 * repeats,
'strings2': v1 * repeats,
'strings3': v3 * repeats}
df = pd.DataFrame(arrays)
_check_pandas_roundtrip(df)
for k in arrays:
_check_array_roundtrip(arrays[k])
def test_category_implicit_from_pandas(self):
# ARROW-3374
def _check(v):
arr = pa.array(v)
result = arr.to_pandas()
tm.assert_series_equal(pd.Series(result), | pd.Series(v) | pandas.Series |
""" Module for data preprocessing.
"""
import datetime
import warnings
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Set
from typing import Union
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator
from sklearn.base import TransformerMixin
from sklearn.utils.validation import check_is_fitted
__all__ = [
'ColumnSelector',
'ColumnDropper',
'ColumnRename',
'NaDropper',
'Clip',
'DatetimeTransformer',
'NumericTransformer',
'TimeframeExtractor',
'DateExtractor',
'ValueMapper',
'Sorter',
'Fill',
'TimeOffsetTransformer',
'ConditionedDropper',
'ZeroVarianceDropper',
'SignalSorter',
'ColumnSorter',
'DifferentialCreator'
]
class ColumnSelector(BaseEstimator, TransformerMixin):
"""Transformer to select a list of columns by their name.
Example:
>>> data = pd.DataFrame({'a': [0], 'b': [0]})
>>> ColumnSelector(keys=['a']).transform(data)
pd.DataFrame({'a': [0]})
"""
def __init__(self, keys: List[str]):
"""Creates ColumnSelector.
Transformer to select a list of columns for further processing.
Args:
keys (List[str]): List of columns to extract.
"""
self._keys = keys
def fit(self, X, y=None):
return self
def transform(self, X):
"""Extracts the columns from `X`.
Args:
X (pd.DataFrame): Dataframe.
Returns:
pd.DataFrame: Returns a DataFrame only containing the selected
features.
"""
return X.loc[:, self._keys]
class ColumnDropper(BaseEstimator, TransformerMixin):
"""Transformer to drop a list of columns by their name.
Example:
>>> data = pd.DataFrame({'a': [0], 'b': [0]})
>>> ColumnDropper(columns=['b']).transform(data)
pd.DataFrame({'a': [0]})
"""
def __init__(self,
*,
columns: Union[List[str], Set[str]],
verbose: bool = False):
"""Creates ColumnDropper.
Transformer to drop a list of columns from the data frame.
Args:
keys (list): List of columns names to drop.
"""
self.columns = set(columns)
self.verbose = verbose
def fit(self, X, y=None):
return self
def transform(self, X):
"""Drops a list of columns of `X`.
Args:
X (pd.DataFrame): Dataframe.
Returns:
pd.DataFrame: Returns the dataframe without the dropped features.
"""
cols = set(X.columns.to_list())
if len(m := self.columns - cols) > 0:
warnings.warn(f'Columns {m} not found in dataframe.')
if self.verbose:
print(f'New columns: {cols - self.columns}. '
f'Removed: {self.columns}.')
return X.drop(self.columns, axis=1, errors='ignore')
class ColumnRename(BaseEstimator, TransformerMixin):
"""Transformer to rename column with a function.
Example:
>>> data = pd.DataFrame({'a.b.c': [0], 'd.e.f': [0]})
>>> ColumnRename(lambda x: x.split('.')[-1]).transform(data)
pd.DataFrame({'c': [0], 'f': [0]})
"""
def __init__(self, mapper: Callable[[str], str]):
"""Create ColumnRename.
Transformer to rename columns by a mapper function.
Args:
mapper (lambda): Mapper rename function.
Example:
Given column with name: a.b.c
lambda x: x.split('.')[-1]
Returns c
"""
self.mapper = mapper
def fit(self, X, y=None):
return self
def transform(self, X):
"""Renames a columns in `X` with a mapper function.
Args:
X (pd.DataFrame): Dataframe.
Returns:
pd.DataFrame: Returns the dataframe with the renamed columns.
"""
# split the column name
# use the last item as new name
return X.rename(columns=self.mapper)
class NaDropper(BaseEstimator, TransformerMixin):
"""Transformer that drops rows with na values.
Example:
>>> data = pd.DataFrame({'a': [0, 1], 'b': [0, np.nan]})
>>> NaDropper().transform(data)
pd.DataFrame({'a': [0], 'b': [0]})
"""
def __init__(self):
pass
def fit(self, X, y=None):
return self
def transform(self, X):
return X.dropna()
class Clip(BaseEstimator, TransformerMixin):
"""Transformer that clips values by a lower and upper bound.
Example:
>>> data = pd.DataFrame({'a': [-0.1, 1.2], 'b': [0.5, 0.6]})
>>> Clip().transform(data)
pd.DataFrame({'a': [0, 1], 'b': [0.5, 0.6]})
"""
def __init__(self, lower: float = 0.0, upper: float = 1.0):
"""Creates Clip.
Transformer that clips a numeric column to the treshold if the
threshold is exceeded. Works with an upper and lower threshold. Wrapper
for pd.DataFrame.clip.
Args:
lower (float, optional): lower limit. Defaults to 0.
upper (float, optional): upper limit. Defaults to 1.
"""
self.upper = upper
self.lower = lower
def fit(self, X, y=None):
return self
def transform(self, X):
return X.clip(lower=self.lower, upper=self.upper, axis=0)
class ColumnTSMapper(BaseEstimator, TransformerMixin):
def __init__(self,
cols: List[str],
timedelta: pd.Timedelta = pd.Timedelta(250, 'ms'),
classes: List[str] = None,
verbose: bool = False):
"""Creates ColumnTSMapper.
Expects the timestamp column to be of type pd.Timestamp.
Args:
cols (List[str]): names of [0] timestamp column, [1] sensor names,
[2] sensor values.
timedelta (pd.Timedelta): Timedelta to resample with.
classes (List[str]): List of sensor names.
verbose (bool, optional): Whether to allow prints.
"""
super().__init__()
self._cols = cols
self._timedelta = timedelta
self._verbose = verbose
if classes is not None:
self.classes_ = classes
def fit(self, X, y=None):
"""Gets the unique values in the sensor name column that
are needed to expand the dataframe.
Args:
X (pd.DataFrame): Dataframe.
y (array-like, optional): Labels. Defaults to None.
Returns:
ColumnTSMapper: Returns this.
"""
classes = X[self._cols[1]].unique()
self.classes_ = np.hstack(['Timestamp', classes])
return self
def transform(self, X):
"""Performs the mapping to equidistant timestamps.
Args:
X (pd.DataFrame): Dataframe.
Raises:
ValueError: Raised if column is not found in `X`.
Returns:
pd.DataFrame: Returns the remapped dataframe.
"""
# check is fit had been called
check_is_fitted(self)
# check if all columns exist
if not all([item in X.columns for item in self._cols]):
raise ValueError(
f'Columns {self._cols} not found in DataFrame '
f'{X.columns.to_list()}.')
# split sensors into individual columns
# create new dataframe with all _categories
# use timestamp index, to use resample later on
# initialized with na
sensors = pd.DataFrame(
None, columns=self.classes_, index=X[self._cols[0]])
# group by sensor
groups = X.groupby([self._cols[1]])
# write sensor values to sensors which is indexed by the timestamp
for g in groups:
sensors.loc[g[1][self._cols[0]], g[0]
] = g[1][self._cols[2]].to_numpy()
sensors = sensors.apply(pd.to_numeric, errors='ignore')
# fill na, important before resampling
# otherwise mean affects more samples than necessary
# first: forward fill to next valid observation
# second: backward fill first missing rows
sensors = sensors.fillna(method='ffill').fillna(method='bfill')
# resamples to equidistant timeframe
# take avg if multiple samples in the same timeframe
sensors = sensors.resample(self._timedelta).mean()
sensors = sensors.fillna(method='ffill').fillna(method='bfill')
# FIXME: to avoid nans in model, but needs better fix
sensors = sensors.fillna(value=0.0)
# move index to column and use rangeindex
sensors['Timestamp'] = sensors.index
sensors.index = pd.RangeIndex(stop=sensors.shape[0])
if self._verbose:
start, end = sensors.iloc[0, 0], sensors.iloc[-1, 0]
print('ColumnTSMapper: ')
print(f'{sensors.shape[0]} rows. '
f'Mapped to {self._timedelta.total_seconds()}s interval '
f'from {start} to {end}.')
return sensors
class DatetimeTransformer(BaseEstimator, TransformerMixin):
"""Transforms a list of columns to datetime.
Example:
>>> data = pd.DataFrame({'dt': ['2021-07-02 16:30:00']})
>>> data = DatetimeTransformer(columns=['dt']).transform(data)
>>> data.dtypes
dt datetime64[ns]
"""
def __init__(self, *, columns: List[str], dt_format: str = None):
"""Creates DatetimeTransformer.
Parses a list of column to pd.Timestamp.
Args:
columns (list): List of columns names.
dt_format (str): Optional format string.
"""
super().__init__()
self._columns = columns
self._format = dt_format
def fit(self, X, y=None):
return self
def transform(self, X):
"""Parses `columns` to datetime.
Args:
X (pd.DataFrame): Dataframe.
Raises:
ValueError: Raised if columns are missing in `X`.
Returns:
pd.DataFrame: Returns the dataframe with datetime columns.
"""
X = X.copy()
# check if columns in dataframe
if len(diff := set(self._columns) - set(X.columns)):
raise ValueError(
f'Columns {diff} not found in DataFrame with columns'
f'{X.columns.to_list()}.')
# parse to pd.Timestamp
X[self._columns] = X[self._columns].apply(
lambda x: pd.to_datetime(x, format=self._format), axis=0)
# column wise
return X
class NumericTransformer(BaseEstimator, TransformerMixin):
"""Transforms a list of columns to numeric datatype.
Example:
>>> data = pd.DataFrame({'a': [0], 'b': ['1']})
>>> data.dtypes
a int64
b object
>>> data = NumericTransformer().transform(data)
>>> data.dtypes
a int64
b int64
"""
def __init__(self, *, columns: Optional[List[str]] = None):
"""Creates NumericTransformer.
Parses a list of column to numeric datatype. If None, all are
attempted to be parsed.
Args:
columns (list): List of columns names.
dt_format (str): Optional format string.
"""
super().__init__()
self._columns = columns
def fit(self, X, y=None):
return self
def transform(self, X):
"""Parses `columns` to numeric.
Args:
X (pd.DataFrame): Dataframe.
Raises:
ValueError: Raised if columns are missing in `X`.
Returns:
pd.DataFrame: Returns the dataframe with datetime columns.
"""
X = X.copy()
# transform all columns
if self._columns is None:
columns = X.columns.to_list()
else:
columns = self._columns
if len((diff := list(set(columns) - set(cols := X.columns)))):
raise ValueError(f'Columns found: {cols.to_list()}. '
f'Columns missing: {diff}.')
# parse to numeric
# column wise
X[columns] = X[columns].apply(pd.to_numeric, axis=0)
return X
class TimeframeExtractor(BaseEstimator, TransformerMixin):
"""Drops sampes that are not between a given start and end time.
Limits are inclusive.
Example:
>>> data = pd.DataFrame(
{'dates': [datetime.datetime(2021, 7, 2, 9, 50, 0),
datetime.datetime(2021, 7, 2, 11, 0, 0),
datetime.datetime(2021, 7, 2, 12, 10, 0)],
'values': [0, 1, 2]})
>>> TimeframeExtractor(time_column='dates',
start_time= datetime.time(10, 0, 0),
end_time=datetime.time(12, 0, 0)
).transform(data)
pd.DataFrame({'dates': datetime.datetime(2021, 7, 2, 11, 0, 0),
'values': [1]})
"""
def __init__(self,
*,
time_column: str,
start_time: datetime.time,
end_time: datetime.time,
invert: bool = False,
verbose: bool = False):
"""Creates TimeframeExtractor.
Drops samples that are not in between `start_time` and `end_time` in
`time_column`.
Args:
time_column (str): Column name of the timestamp column.
start_time (datetime.time): Start time.
end_time (datetime.time): End time.
invert(bool): Whether to invert the range.
verbose (bool, optional): Whether to allow prints.
"""
super().__init__()
self._start = start_time
self._end = end_time
self._column = time_column
self._negate = invert
self._verbose = verbose
def fit(self, X, y=None):
return self
def transform(self, X):
"""Drops rows from the dataframe if they are not in between
`start_time` and `end_time`. Limits are inclusive. Reindexes the
dataframe.
Args:
X (pd.DataFrame): Dataframe.
Returns:
pd.DataFrame: Returns the new dataframe.
"""
X = X.copy()
rows_before = X.shape[0]
dates = pd.to_datetime(X[self._column])
if self._negate:
X = X.loc[~((dates.dt.time >= self._start) &
(dates.dt.time <= self._end)), :]
else:
X = X.loc[(dates.dt.time >= self._start) &
(dates.dt.time <= self._end), :]
X.index = | pd.RangeIndex(0, X.shape[0]) | pandas.RangeIndex |
from __future__ import print_function
import unittest
import sqlite3
import csv
import os
import nose
import numpy as np
from pandas import DataFrame, Series
from pandas.compat import range, lrange, iteritems
#from pandas.core.datetools import format as date_format
import pandas.io.sql as sql
import pandas.util.testing as tm
try:
import sqlalchemy
SQLALCHEMY_INSTALLED = True
except ImportError:
SQLALCHEMY_INSTALLED = False
SQL_STRINGS = {
'create_iris': {
'sqlite': """CREATE TABLE iris (
`SepalLength` REAL,
`SepalWidth` REAL,
`PetalLength` REAL,
`PetalWidth` REAL,
`Name` TEXT
)""",
'mysql': """CREATE TABLE iris (
`SepalLength` DOUBLE,
`SepalWidth` DOUBLE,
`PetalLength` DOUBLE,
`PetalWidth` DOUBLE,
`Name` VARCHAR(200)
)""",
'postgresql': """CREATE TABLE iris (
"SepalLength" DOUBLE PRECISION,
"SepalWidth" DOUBLE PRECISION,
"PetalLength" DOUBLE PRECISION,
"PetalWidth" DOUBLE PRECISION,
"Name" VARCHAR(200)
)"""
},
'insert_iris': {
'sqlite': """INSERT INTO iris VALUES(?, ?, ?, ?, ?)""",
'mysql': """INSERT INTO iris VALUES(%s, %s, %s, %s, "%s");""",
'postgresql': """INSERT INTO iris VALUES(%s, %s, %s, %s, %s);"""
},
'create_test_types': {
'sqlite': """CREATE TABLE types_test_data (
`TextCol` TEXT,
`DateCol` TEXT,
`IntDateCol` INTEGER,
`FloatCol` REAL,
`IntCol` INTEGER,
`BoolCol` INTEGER,
`IntColWithNull` INTEGER,
`BoolColWithNull` INTEGER
)""",
'mysql': """CREATE TABLE types_test_data (
`TextCol` TEXT,
`DateCol` DATETIME,
`IntDateCol` INTEGER,
`FloatCol` DOUBLE,
`IntCol` INTEGER,
`BoolCol` BOOLEAN,
`IntColWithNull` INTEGER,
`BoolColWithNull` BOOLEAN
)""",
'postgresql': """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TIMESTAMP,
"IntDateCol" INTEGER,
"FloatCol" DOUBLE PRECISION,
"IntCol" INTEGER,
"BoolCol" BOOLEAN,
"IntColWithNull" INTEGER,
"BoolColWithNull" BOOLEAN
)"""
},
'insert_test_types': {
'sqlite': """
INSERT INTO types_test_data
VALUES(?, ?, ?, ?, ?, ?, ?, ?)
""",
'mysql': """
INSERT INTO types_test_data
VALUES("%s", %s, %s, %s, %s, %s, %s, %s)
""",
'postgresql': """
INSERT INTO types_test_data
VALUES(%s, %s, %s, %s, %s, %s, %s, %s)
"""
}
}
class PandasSQLTest(unittest.TestCase):
"""Base class with common private methods for
SQLAlchemy and fallback cases.
"""
def drop_table(self, table_name):
self._get_exec().execute("DROP TABLE IF EXISTS %s" % table_name)
def _get_exec(self):
if hasattr(self.conn, 'execute'):
return self.conn
else:
return self.conn.cursor()
def _load_iris_data(self):
iris_csv_file = os.path.join(tm.get_data_path(), 'iris.csv')
self.drop_table('iris')
self._get_exec().execute(SQL_STRINGS['create_iris'][self.flavor])
with open(iris_csv_file, 'rU') as iris_csv:
r = csv.reader(iris_csv)
next(r) # skip header row
ins = SQL_STRINGS['insert_iris'][self.flavor]
for row in r:
self._get_exec().execute(ins, row)
def _check_iris_loaded_frame(self, iris_frame):
pytype = iris_frame.dtypes[0].type
row = iris_frame.iloc[0]
self.assertTrue(
issubclass(pytype, np.floating), 'Loaded frame has incorrect type')
tm.equalContents(row.values, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def _load_test1_data(self):
columns = ['index', 'A', 'B', 'C', 'D']
data = [(
'2000-01-03 00:00:00', 0.980268513777, 3.68573087906, -0.364216805298, -1.15973806169),
('2000-01-04 00:00:00', 1.04791624281, -
0.0412318367011, -0.16181208307, 0.212549316967),
('2000-01-05 00:00:00', 0.498580885705,
0.731167677815, -0.537677223318, 1.34627041952),
('2000-01-06 00:00:00', 1.12020151869, 1.56762092543, 0.00364077397681, 0.67525259227)]
self.test_frame1 = DataFrame(data, columns=columns)
def _load_raw_sql(self):
self.drop_table('types_test_data')
self._get_exec().execute(SQL_STRINGS['create_test_types'][self.flavor])
ins = SQL_STRINGS['insert_test_types'][self.flavor]
data = [(
'first', '2000-01-03 00:00:00', 535852800, 10.10, 1, False, 1, False),
('first', '2000-01-04 00:00:00', 1356998400, 10.10, 1, False, None, None)]
for d in data:
self._get_exec().execute(ins, d)
def _count_rows(self, table_name):
result = self._get_exec().execute(
"SELECT count(*) AS count_1 FROM %s" % table_name).fetchone()
return result[0]
def _read_sql_iris(self):
iris_frame = self.pandasSQL.read_sql("SELECT * FROM iris")
self._check_iris_loaded_frame(iris_frame)
def _to_sql(self):
self.drop_table('test_frame1')
self.pandasSQL.to_sql(self.test_frame1, 'test_frame1')
self.assertTrue(self.pandasSQL.has_table(
'test_frame1'), 'Table not written to DB')
# Nuke table
self.drop_table('test_frame1')
def _to_sql_fail(self):
self.drop_table('test_frame1')
self.pandasSQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='fail')
self.assertTrue(self.pandasSQL.has_table(
'test_frame1'), 'Table not written to DB')
self.assertRaises(ValueError, self.pandasSQL.to_sql,
self.test_frame1, 'test_frame1', if_exists='fail')
self.drop_table('test_frame1')
def _to_sql_replace(self):
self.drop_table('test_frame1')
self.pandasSQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='fail')
# Add to table again
self.pandasSQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='replace')
self.assertTrue(self.pandasSQL.has_table(
'test_frame1'), 'Table not written to DB')
num_entries = len(self.test_frame1)
num_rows = self._count_rows('test_frame1')
self.assertEqual(
num_rows, num_entries, "not the same number of rows as entries")
self.drop_table('test_frame1')
def _to_sql_append(self):
# Nuke table just in case
self.drop_table('test_frame1')
self.pandasSQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='fail')
# Add to table again
self.pandasSQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='append')
self.assertTrue(self.pandasSQL.has_table(
'test_frame1'), 'Table not written to DB')
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows('test_frame1')
self.assertEqual(
num_rows, num_entries, "not the same number of rows as entries")
self.drop_table('test_frame1')
def _roundtrip(self):
self.drop_table('test_frame_roundtrip')
self.pandasSQL.to_sql(self.test_frame1, 'test_frame_roundtrip')
result = self.pandasSQL.read_sql('SELECT * FROM test_frame_roundtrip')
result.set_index('pandas_index', inplace=True)
# result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def _execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = self.pandasSQL.execute("SELECT * FROM iris")
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def _tquery(self):
iris_results = self.pandasSQL.tquery("SELECT * FROM iris")
row = iris_results[0]
| tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa']) | pandas.util.testing.equalContents |
"""
Module for static data retrieval. These functions were performed once during the initial project creation. Resulting
data is now provided in bulk at the url above.
"""
import datetime
import json
from math import sin, cos, sqrt, atan2, radians
import re
import requests
import pandas as pd
from riverrunner import settings
from riverrunner.context import StationRiverDistance
from riverrunner.repository import Repository
def scrape_rivers_urls():
"""scrape river run data from Professor Paddle
generates URLs from the array of strings below. Each element represents a unique river. Each page is
requested with the entire HTML contents being saved to disk. The parsed river data is saved to 'data/rivers.csv'
"""
# copied from jquery selection in chrome dev tools on main prof paddle run table
river_links = pd.read_csv('riverrunner/data/static_river_urls.csv').columns.values
river_ids = [r[r.find("=")+1:] for r in river_links]
url = "http://www.professorpaddle.com/rivers/riverdetails.asp?riverid="
for id in river_ids:
r = requests.get(url + id)
if r.status_code == 200:
with open("river_%s.html" % id, 'w+') as f:
f.write(str(r.content))
rivers = []
for rid in river_ids:
with open('data/river_%s.html' % rid) as f:
river = f.readlines()
r = river[0]
row = {}
# title and river name
r = r[r.find('<font size="+2">'):]
run_name = r[r.find(">") + 1:r.find('<a')]
run_name = re.sub(r'<[^>]*>| ', ' ', run_name)
river_name = run_name[:run_name.find(' ')]
run_name = run_name[len(river_name):]
run_name = re.sub(r''', "'", run_name)
run_name = re.sub(r'—', "", run_name).strip()
row['run_name'] = re.sub(r'( )+', ' ', run_name)
row['river_name'] = river_name
# chunk off the class
r = r[r.find('Class'):]
rating = r[6:r.find('</strong>')]
row['class_rating'] = rating
# river length
r = r[r.find('<strong>')+8:]
length = r[:r.find("<")]
row['river_length'] = length
# zip code
r = r[r.find('Zip Code'):]
r = r[r.find('path')+6:]
row['zip'] = r[:r.find("<")]
# put in long
r = r[r.find("Put In Longitude"):]
r = r[r.find('path')+6:]
row['put_in_long'] = r[:r.find("<")]
# put in lat
r = r[r.find("Put In Latitude"):]
r = r[r.find('path')+6:]
row['put_in_lat'] = r[:r.find("<")]
# take out long
r = r[r.find("Take Out Longitude"):]
r = r[r.find('path')+6:]
row['take_out_long'] = r[:r.find("<")]
# take out lat
r = r[r.find("Take Out Latitude"):]
r = r[r.find('path')+6:]
row['take_out_lat'] = r[:r.find("<")]
# county
r = r[r.find("County"):]
r = r[r.find('path')+6:]
row['county'] = r[:r.find("<")]
# min level
r = r[r.find("Minimum Recomended Level"):]
r = r[r.find(" ")+6:]
row['min_level'] = r[:r.find("&")]
# min level units
r = r[r.find(';')+1:]
row['min_level_units'] = r[:r.find('&')]
# Maximum Recomended Level
r = r[r.find("Maximum Recomended Level"):]
r = r[r.find(" ")+6:]
row['max_level'] = r[:r.find("&")]
# max level units
r = r[r.find(';')+1:]
row['max_level_units'] = r[:r.find('&')]
row['id'] = rid
row['url'] = url + rid
rivers.append(row)
pd.DataFrame(rivers).to_csv('data/rivers.csv')
def parse_location_components(components, lat, lon):
"""parses location data from a Goggle address component list"""
location = {'latitude': lat, 'longitude': lon}
for component in components:
component_type = component['types']
if 'route' in component_type:
location['address'] = component['long_name']
elif 'locality' in component_type:
location['city'] = component['long_name']
elif 'administrative_area_level_2' in component_type:
location['route'] = re.sub(r'County', '', component['long_name'])
elif 'administrative_area_level_1' in component_type:
location['state'] = component['short_name']
elif 'postal_code' in component_type:
location['zip'] = component['long_name']
print(location)
return location
def parse_addresses_from_rivers():
"""parses river geolocation data and retrieves associated address information from Google geolocation services"""
df = pd.read_csv('data/rivers.csv').fillna('null')
addresses = []
# put in addresses
for name, group in df.groupby(['put_in_lat', 'put_in_long']):
if name[0] == 0 or name[1] == 0:
continue
r = requests.get('https://maps.googleapis.com/maps/api/geocode/json?latlng=%s,%s&key=%s' %
(name[0], name[1], settings.GEOLOCATION_API_KEY))
components = json.loads(r.content)['results'][0]['address_components']
addresses.append(parse_location_components(components, name[0], name[1]))
# take out addresses
for name, group in df.groupby(['take_out_lat', 'take_out_long']):
if name[0] == 0 or name[1] == 0:
continue
r = requests.get('https://maps.googleapis.com/maps/api/geocode/json?latlng=%s,%s&key=%s' %
(name[0], name[1], settings.GEOLOCATION_API_KEY))
if r.status_code == 200 and len(r.content) > 10:
components = json.loads(r.content)['results'][0]['address_components']
addresses.append(parse_location_components(components, name[0], name[1]))
pd.DataFrame(addresses).to_csv('data/addresses_takeout.csv', index=False)
def scrape_snowfall():
"""scrapes daily snowfall data from NOAA"""
base_url = 'https://www.ncdc.noaa.gov/snow-and-ice/daily-snow/WA-snow-depth-'
snowfall = []
for year in [2016, 2017, 2018]:
for month in range(1, 13):
for day in range(1, 32):
try:
date = '%s%02d%02d' % (year, month, day)
r = requests.get(base_url + date + '.json')
if r.status_code == 200 and len(r.content) > 0:
snf = json.loads(r.content)
for row in snf['rows']:
lat = row['c'][0]['v']
lon = row['c'][1]['v']
location_name = row['c'][2]['v'].strip().lower()
depth = row['c'][3]['v']
this_row = (datetime.datetime.strptime(str(date), '%Y%m%d').date(), lat, lon, location_name, depth)
snowfall.append(this_row)
print(this_row)
except Exception as e:
print([str(a) for a in e.args])
df = pd.DataFrame(snowfall)
df.columns = ['date', 'lat', 'lon', 'location_name', 'depth']
df.to_csv('data/snowfall.csv', index=None)
def parse_addresses_and_stations_from_snowfall():
"""iterate through snowfall geolocation data for associated station addresses"""
df = pd.read_csv('data/snowfall.csv')
addresses, stations = [], []
for name, group in df.groupby(['lat', 'lon']):
if name[0] == 0 or name[1] == 0:
continue
# parse address information
r = requests.get('https://maps.googleapis.com/maps/api/geocode/json?latlng=%s,%s&key=%s' %
(name[0], name[1], settings.GEOLOCATION_API_KEY))
components = json.loads(r.content)['results'][0]['address_components']
addresses.append(parse_location_components(components, name[0], name[1]))
# parse station information
station = dict()
name = pd.unique(group.location_name)[0]
station['station_id'] = name[name.find('(') + 1:-1].strip().lower()
parts = name[:name.find(',')].split(' ')
for i, s in enumerate(parts):
if s.isdigit() or s not in \
['N', 'NE', 'NNE', 'ENE', 'E', 'ESE', 'SSE',
'SE', 'S', 'SSW', 'SW', 'WSW', 'W', 'WNW', 'NW', 'NNW']:
parts[i] = s.title()
station['name'] = ' '.join(parts)
station['source'] = 'NOAA'
station['latitude'] = pd.unique(group.lat)[0]
station['longitude'] = pd.unique(group.lon)[0]
stations.append(station)
pd.DataFrame(addresses).to_csv('data/addresses_snowfall.csv', index=False)
| pd.DataFrame(stations) | pandas.DataFrame |
import pickle
import os
import numpy as np
import argparse
from matplotlib import pyplot as plt
import matplotlib
import glob
import pandas as pd
from tqdm import tqdm
parser = argparse.ArgumentParser(description='save annotations')
parser.add_argument('--vis', action='store_true', default=False,
help='whether to visualize the distribution')
parser.add_argument('--annot_dir', type=str, default='/home/user1/dataset/Aff-Wild/annotations',
help='annotation dir')
parser.add_argument('--data_dir', type=str, default='/home/user1/dataset/Aff-Wild/cropped_aligned')
args = parser.parse_args()
def read_AU(txt_file):
with open(txt_file, 'r') as f:
lines = f.readlines()
lines = lines[1:] # skip first line
lines = [x.strip() for x in lines]
lines = [x.split(',') for x in lines]
lines = [[float(y) for y in x] for x in lines]
return np.array(lines)
def read_Expr(txt_file):
with open(txt_file, 'r') as f:
lines = f.readlines()
lines = lines[1:] # skip first line
lines = [x.strip() for x in lines]
lines = [int(x) for x in lines]
return np.array(lines)
def read_VA(txt_file):
with open(txt_file, 'r') as f:
lines = f.readlines()
lines = lines[1:] # skip first line
lines = [x.strip() for x in lines]
lines = [x.split(',') for x in lines]
lines = [[float(y) for y in x] for x in lines]
return np.array(lines)
def plot_pie(AU_list, pos_freq, neg_freq):
ploting_labels = [x + '+ {0:.2f}'.format(y) for x, y in zip(AU_list, pos_freq)] + [x + '- {0:.2f}'.format(y) for
x, y in zip(AU_list, neg_freq)]
cmap = matplotlib.cm.get_cmap('coolwarm')
colors = [cmap(x) for x in pos_freq] + [cmap(x) for x in neg_freq]
fracs = np.ones(len(AU_list) * 2)
plt.pie(fracs, labels=ploting_labels, autopct=None, shadow=False, colors=colors, startangle=78.75)
plt.title("AUs distribution")
plt.show()
def frames_to_label(label_array, frames, discard_value):
assert len(label_array) >= len(frames) # some labels need to be discarded
frames_ids = [int(frame.split('/')[-1].split('.')[0]) - 1 for frame in frames] # frame_id start from 0
N = label_array.shape[0]
label_array = label_array.reshape((N, -1))
to_drop = (label_array == discard_value).sum(-1)
drop_ids = [i for i in range(len(to_drop)) if to_drop[i]]
frames_ids = [i for i in frames_ids if i not in drop_ids]
indexes = [True if i in frames_ids else False for i in range(len(label_array))]
label_array = label_array[indexes]
assert len(label_array) == len(frames_ids)
prefix = '/'.join(frames[0].split('/')[:-1])
return_frames = [prefix + '/{0:05d}.jpg'.format(id + 1) for id in frames_ids]
return label_array, return_frames, frames_ids
def main():
annot_dir = args.annot_dir
tasks = [x for x in os.listdir(annot_dir)]
data_file = {}
for task in tasks:
if task == 'AU_Set':
AU_list = ['AU1', 'AU2', 'AU4', 'AU6', 'AU7', 'AU10', 'AU12', 'AU15', 'AU23', 'AU24', 'AU25', 'AU26']
data_file[task] = {}
for mode in ['Train_Set', 'Validation_Set']:
txt_files = glob.glob(os.path.join(annot_dir, task, mode, '*.txt'))
data_file[task][mode] = {}
for txt_file in tqdm(txt_files):
name = os.path.basename(txt_file).split('.')[0]
au_array = read_AU(txt_file)
frames_paths = sorted(glob.glob(os.path.join(args.data_dir, name, '*.jpg')))
au_array, frames_paths, frames_ids = frames_to_label(au_array, frames_paths, discard_value=-1)
data_dict = dict([(AU_list[i], au_array[:, i]) for i in range(len(AU_list))])
data_dict.update({'path': frames_paths, 'frames_ids': frames_ids})
data_file[task][mode][name] = | pd.DataFrame.from_dict(data_dict) | pandas.DataFrame.from_dict |
import csv
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from model.datasets import SampleDataset
def collect_data(data: dict, csv_path, is_tp):
# recording_id,species_id,songtype_id,t_min,f_min,t_max,f_max
reader = csv.DictReader(open(csv_path, "r"))
fieldnames = reader.fieldnames
for row in reader:
species_id = row['species_id''']
songtype_id = row['songtype_id']
t_min = float(row['t_min'])
t_max = float(row['t_max'])
f_min = float(row['f_min'])
f_max = float(row['f_max'])
duration = t_max - t_min
key = f'{species_id}|{songtype_id}|{is_tp}'
if key not in data:
data[key] = {'count': 0, 'duration': [], 'f_min': [], 'f_max': []}
data[key]['count'] = data[key]['count'] + 1
data[key]['duration'].append(duration)
data[key]['f_min'].append(f_min)
data[key]['f_max'].append(f_max)
def collect_mean_values(data):
def _get_stat(data_item):
return np.min(data_item), np.max(data_item), np.mean(data_item)
for key, value in data.items():
value['duration'] = _get_stat(value['duration'])
value['f_min'] = _get_stat(value['f_min'])
value['f_max'] = _get_stat(value['f_max'])
def analyze_data(path, is_tp):
data = pd.read_csv(path)
data[SampleDataset.k_key] = data['species_id'].astype(str) + '|' + data['songtype_id'].astype(str)
data[SampleDataset.k_duration] = data['t_max'] - data['t_min']
# data = data.sort_values([SampleDataset.k_species_id, SampleDataset.k_songtype_id], ascending=[True, True])
gr_min = data.groupby(SampleDataset.k_key).min(numeric_only=True)
gr_mean = data.groupby(SampleDataset.k_key).mean(numeric_only=True)
gr_max = data.groupby(SampleDataset.k_key).max(numeric_only=True)
gr_min = gr_min.sort_values([SampleDataset.k_species_id, SampleDataset.k_songtype_id]) # type:pd.DataFrame
gr_mean = gr_mean.sort_values([SampleDataset.k_species_id, SampleDataset.k_songtype_id]) # type:pd.DataFrame
gr_max = gr_max.sort_values([SampleDataset.k_species_id, SampleDataset.k_songtype_id]) # type:pd.DataFrame
gr_orig = gr_min[[SampleDataset.k_species_id, SampleDataset.k_songtype_id]]
gr_orig['tp'] = is_tp
columns = [SampleDataset.k_f_min, SampleDataset.k_f_max, SampleDataset.k_duration]
gr_min = gr_min[columns].add_suffix('_min')
gr_mean = gr_mean[columns].add_suffix('_mean')
gr_max = gr_max[columns].add_suffix('_max')
new_data = pd.concat([gr_orig, gr_min, gr_mean, gr_max], axis=1)
columns = [
SampleDataset.k_species_id, SampleDataset.k_songtype_id, 'tp',
SampleDataset.k_f_min + '_min', SampleDataset.k_f_min + '_mean', SampleDataset.k_f_min + '_max',
SampleDataset.k_f_max + '_min', SampleDataset.k_f_max + '_mean', SampleDataset.k_f_max + '_max',
SampleDataset.k_duration + '_min', SampleDataset.k_duration + '_mean', SampleDataset.k_duration + '_max',
]
new_data = new_data[columns]
return new_data
def main():
statistics_data = {}
statistics_data_tp = {}
statistics_data_fp = {}
# tips = sns.load_dataset("tips")
# sns.displot(tips, x="size")
# plt.show()
# collect_data(statistics_data, r'd:\Projects\Kaggle\rfcx-species-audio-detection_data\train_tp.csv', 1)
# collect_data(statistics_data, r'd:\Projects\Kaggle\rfcx-species-audio-detection_data\train_tp.csv', 1)
# collect_data(statistics_data_tp, r'd:\Projects\Kaggle\rfcx-species-audio-detection_data\train_tp.csv', 1)
# collect_data(statistics_data_fp, r'd:\Projects\Kaggle\rfcx-species-audio-detection_data\train_fp.csv', 0)
data_tp = analyze_data(r'../data/train_tp.csv', 1)
data_fp = analyze_data(r'../data/train_fp.csv', 0)
data_all = | pd.concat([data_tp, data_fp]) | pandas.concat |
import numpy as np
import pandas as pd
import scanpy as sc
import scanpy.external as sce
def create_cluster_annotation_overview(
adata,
n_levels,
cluster_label,
min_fraction_for_dominancy=0.80,
min_fraction_annotated=0.5,
compartment_of_interest=None,
):
"""Function to calculate for each cluster, for each annotation level, if it is
dominated by a cell type.
Args:
adata - scanpy AnnData object
n_levels - number of annotation levels (named "ann_level_[number]" in adata.obs)
cluster_label - column name of cluster column in adata.obs
min_fraction_for_dominancy - minimum fraction of annotated cells to belong to
one cell type, to be called "dominant". Should be
higher than 0.5.
min_fraction_annotated - minumum fraction of cells in cluster that need to be
annotated, before "dominancy" analysis is possible
compartment_of_interest - ann_level_1 compartment to which to limit the cluster
analysis. Only clusters that belong to multiple
compartments or this specific compartment are included
in the output df.
Returns:
cluster df - pandas dataframe with for each cluster information on what is the
dominant cluster (if there is one), and the fraction of annotated
cells belonging to the dominant cluster
"""
cluster_df = pd.DataFrame(
index=adata.obs[cluster_label].cat.categories,
columns=zip(
["ann{}_dom_type".format(level) for level in range(1, n_levels + 1)],
["ann{}_dom_fract".format(level) for level in range(1, n_levels + 1)],
),
)
for level in range(1, n_levels + 1):
level_name = "ann_level_" + str(level)
clust_cell_types = adata.obs.groupby([cluster_label, level_name]).agg(
{level_name: "count"}
)
# count fraction of cells that is annotated at this level:
clust_cell_types["annotated"] = [
"no" if celltype[:2] in ["1_", "2_", "3_", "4_"] else "yes"
for celltype in clust_cell_types.index.get_level_values(1)
]
number_annotated = clust_cell_types.groupby([cluster_label, "annotated"]).agg(
{level_name: "sum"}
)
fraction_annotated = number_annotated.groupby(level=0).apply(
lambda x: x / float(x.sum())
)
# keep only cells that are annotated at this level:
rows_to_keep = [
rownumber
for rownumber, rowname in enumerate(clust_cell_types.index.get_level_values(1))
if not rowname[:2] in ["1_", "2_", "3_", "4_"]
]
clust_cell_types = clust_cell_types.iloc[rows_to_keep, :]
# convert to proportions
clust_cell_types = clust_cell_types.groupby(level=0)[level_name].apply(
lambda x: x / float(x.sum())
)
# add "dominant" annotation:
dominant_types = clust_cell_types.index[
clust_cell_types > min_fraction_for_dominancy
]
dominant_fractions = clust_cell_types[clust_cell_types > min_fraction_for_dominancy]
# copy dominant types to cluster_df:
cluster_df.loc[
dominant_types.get_level_values(0), "ann{}_dom_type".format(level)
] = dominant_types.get_level_values(1)
# copy dominance fractions to cluster_df
cluster_df.loc[
dominant_fractions.index.get_level_values(0), "ann{}_dom_fract".format(level)
] = dominant_fractions.values
# set underannotated entries to "underann"
# first, make sure columns are not categorical (they would not accept new cat)
for cat in ["ann{}_dom_type".format(level), "ann{}_dom_fract".format(level)]:
cluster_df[cat] = cluster_df[cat].tolist()
idx = pd.IndexSlice
underannotated_boolean = (
fraction_annotated.loc[idx[:, "yes"], :] < min_fraction_annotated
)
cluster_df.loc[
underannotated_boolean[level_name].values,
["ann{}_dom_type".format(level), "ann{}_dom_fract".format(level)],
] = "underann"
if compartment_of_interest != None:
# subset epithelial and split clusters
cluster_df = cluster_df.loc[
[
main_type == compartment_of_interest or split_cluster
for main_type, split_cluster in zip(
cluster_df.ann1_dom_type, cluster_df.ann1_dom_type.isnull()
)
],
:,
]
return cluster_df
def add_nested_clustering(
adata,
cluster_df,
cluster_label_previous,
cluster_label_new,
cluster_res=0.2,
min_cluster_size=100,
verbose=True,
):
"""Function that goes through one round of clustering of already existing
clusters, based on the input cluster df. All clusters that don't have a
dominant cluster yet at all levels in the df (as long as they are
sufficiently annotated) will be reclustered individually.
Returns adata with new clustering (under adata.obs[cluster_label_new].
"""
# copy original clustering
adata.obs[cluster_label_new] = adata.obs[cluster_label_previous].tolist()
for cluster in cluster_df.index:
if verbose:
print("Cluster:", cluster)
dom_types = cluster_df.loc[cluster, :]
if dom_types.isnull().any():
subadata = adata[adata.obs[cluster_label_previous] == cluster, :].copy()
if subadata.shape[0] < min_cluster_size:
if verbose:
print("cluster size smaller than", min_cluster_size, "\n")
continue
if verbose:
print("reclustering...\n")
sc.tl.pca(subadata)
sc.tl.leiden(subadata, resolution=cluster_res, key_added=cluster_label_new)
subadata.obs[cluster_label_new] = [
"{}.{}".format(cluster, new_cluster)
for new_cluster in subadata.obs[cluster_label_new]
]
adata.obs.loc[subadata.obs.index, cluster_label_new] = subadata.obs[
cluster_label_new
]
else:
if verbose:
print("clustered to full resolution!\n")
# order categories "numerically" (so not 1, 10, 11 but 1, 2, 3... 10, 11):
cluster_numbers = list(sorted(set(adata.obs[cluster_label_new])))
prefix_cluster = [float(x.split(".")[0]) for x in cluster_numbers]
cluster_numbers_ordered = [
cluster_numbers[idx] for idx in np.argsort(prefix_cluster)
]
adata.obs[cluster_label_new] = pd.Categorical(
adata.obs[cluster_label_new], categories=cluster_numbers_ordered
)
return adata
def add_nested_clustering_blind(
adata,
cluster_label_previous,
cluster_label_new,
use_rep,
cluster_alg="leiden",
cluster_res=0.2,
cluster_k=30,
min_cluster_size=50,
redo_pca=True,
verbose=True,
):
"""Function that goes through one round of clustering of already existing
clusters, based on the input cluster df. All clusters will be reclustered
individually. ("blind" because we don't take into account annotation
purity of clusters.)
Args:
adata - anndata object to be clustered
cluster_label_previous - parent cluster label
cluster_label_new - label for new clustering
use_rep - name of .obsm object to be used for neighbor graph
cluster_alg - <"leiden","phenograph">
cluster_res - only applicable when using "leiden" as cluster_alg
cluster_k - only applicable when using "phenograph" as cluster_alg.
min_cluster_size - only applicable when using "phenograph" as cluster_alg
Make sure that cluster_k < min_cluster_size
redo_pca - boolean. whether to re-calculate PCA for subclusters
verbose - boolean
Returns adata with new clustering (under adata.obs[cluster_label_new].
"""
# copy original clustering
clusters_previous = adata.obs[cluster_label_previous].tolist()
adata.obs[cluster_label_new] = clusters_previous
if not redo_pca:
print("Not re-doing pca before nested clustering iterations!")
for cluster in sorted(set(clusters_previous)):
if verbose:
print("Cluster:", cluster)
subadata = adata[adata.obs[cluster_label_previous] == cluster, :].copy()
if subadata.shape[0] < min_cluster_size:
if verbose:
print("cluster size smaller than", min_cluster_size, "\n")
continue
if verbose:
print("reclustering...\n")
if redo_pca:
if verbose:
print("running pca...")
sc.tl.pca(subadata)
if cluster_alg == "leiden":
if verbose:
print("calculating 30 nearest neighbors")
print("using rep:", use_rep)
sc.pp.neighbors(subadata, n_neighbors=30, use_rep=use_rep)
if verbose:
print("clustering")
sc.tl.leiden(subadata, resolution=cluster_res, key_added=cluster_label_new)
elif cluster_alg == "phenograph":
subadata.obs[cluster_label_new] = pd.Categorical(
sce.tl.phenograph(subadata.obsm[use_rep], k=cluster_k)[0]
)
else:
raise ValueError("Your cluster_alg argument is incorrect.")
subadata.obs[cluster_label_new] = [
"{}.{}".format(cluster, new_cluster)
for new_cluster in subadata.obs[cluster_label_new]
]
adata.obs.loc[subadata.obs.index, cluster_label_new] = subadata.obs[
cluster_label_new
]
# order categories "numerically" (so not 1, 10, 11 but 1, 2, 3... 10, 11):
# convert all cluster names to strings, instead of a mix of strings and ints:
adata.obs[cluster_label_new] = [
str(clust) for clust in adata.obs[cluster_label_new]
]
cluster_numbers = list(sorted(set(adata.obs[cluster_label_new])))
prefix_cluster = [float(x.split(".")[0]) for x in cluster_numbers]
cluster_numbers_ordered = [
cluster_numbers[idx] for idx in np.argsort(prefix_cluster)
]
adata.obs[cluster_label_new] = pd.Categorical(
adata.obs[cluster_label_new], categories=cluster_numbers_ordered
)
return adata
def get_cluster_markers(adata, cluster_label, marker_ref, ngenes=100, verbose=True):
"""
Calculates markers for every cluster, using either all other cells or
the parent cluster as a reference (i.e. for cluster 00.00.01, it
uses all clusters starting with 00.00 as reference. For cluster
00, it uses all cells as reference).
sc.tl.rank_genes is used for marker gene calculation.
Arguments:
adata - AnnData object
cluster_label - string
label in adata.obs that contains nested-cluster names
marker_ref - either "all" or "sisters". Which clusters to compare with.
ngenes - number of marker genes to get per cluster
Returns:
cluster_markers - pd.DataFrame
dataframe with, for each cluster, 100 highest scoring genes,
plus matching logfc and adj pvalue
"""
# input check:
if marker_ref == "all":
print("Doing one versus all differential expression analysis.")
elif marker_ref == "sisters":
print("Doing one versus sisters differential expression analysis.")
else:
raise ValueError("marker_ref argument should be set to either 'all' or 'sisters'.")
# convert clusters to strings:
adata.obs[cluster_label] = [str(cl) for cl in adata.obs[cluster_label]]
# store cluster set
clusters = sorted(set(adata.obs[cluster_label]))
colnames_nested = [
[clust + "_gene", clust + "_logfc", clust + "_pval_adj"] for clust in clusters
]
colnames = [item for sublist in colnames_nested for item in sublist]
cluster_markers = pd.DataFrame(index=range(100), columns=colnames)
parents_tested = list()
for clust in clusters:
clust_depth = len(clust.split("."))
if clust_depth == 1:
parent = "all"
if parent not in parents_tested:
if verbose:
print("ranking genes for parent group", parent)
parents_tested.append(parent)
sc.tl.rank_genes_groups(adata, groupby=cluster_label, n_genes=ngenes)
# store results for all clusters from this parent
# i.e. all clusters of depth 1
for d1_cluster in [
clust for clust in clusters if len(clust.split(".")) == 1
]:
# create a subdf that will allow us to sort genes per cluster
submarker_df = pd.DataFrame(
index=range(ngenes),
columns=[
d1_cluster + "_gene",
d1_cluster + "_logfc",
d1_cluster + "_pval_adj",
],
)
submarker_df[d1_cluster + "_gene"] = adata.uns["rank_genes_groups"][
"names"
][d1_cluster]
submarker_df[d1_cluster + "_logfc"] = adata.uns[
"rank_genes_groups"
]["logfoldchanges"][d1_cluster]
submarker_df[d1_cluster + "_pval_adj"] = adata.uns[
"rank_genes_groups"
]["pvals_adj"][d1_cluster]
# sort values:
submarker_df.sort_values(
by=[d1_cluster + "_pval_adj", d1_cluster + "_logfc"],
ascending=[True, False],
inplace=True,
)
submarker_df = submarker_df.reset_index().drop(columns="index")
# and add to big dataframe
cluster_markers.loc[
submarker_df.index, submarker_df.columns
] = submarker_df.values
else:
parent = ".".join(clust.split(".")[: clust_depth - 1])
if parent not in parents_tested:
# depending on reference choice, use whole adata as reference
# or only the parent cluster.
if marker_ref == "all":
subadata = adata
elif marker_ref == "sisters":
subadata = adata[[cl.startswith(parent) for cl in adata.obs[cluster_label]],:].copy()
if verbose:
print("ranking genes for parent group", parent)
parents_tested.append(parent)
siblings = [c for c in clusters if c.startswith(parent)]
if len(siblings) < 2 and marker_ref == "sisters":
print("Cluster {} has only one subcluster. Skipping DEA for this parent.".format(parent))
else:
sc.tl.rank_genes_groups(subadata, groupby=cluster_label, groups=siblings, n_genes=ngenes)
for same_depth_sibling in [
sib for sib in siblings if len(clust.split(".")) == clust_depth
]:
# create a subdf that will allow us to sort genes per cluster
submarker_df = pd.DataFrame(
index=range(ngenes),
columns=[
same_depth_sibling + "_gene",
same_depth_sibling + "_logfc",
same_depth_sibling + "_pval_adj",
],
)
submarker_df[same_depth_sibling + "_gene"] = subadata.uns[
"rank_genes_groups"
]["names"][same_depth_sibling]
submarker_df[same_depth_sibling + "_logfc"] = subadata.uns[
"rank_genes_groups"
]["logfoldchanges"][same_depth_sibling]
submarker_df[same_depth_sibling + "_pval_adj"] = subadata.uns[
"rank_genes_groups"
]["pvals_adj"][same_depth_sibling]
# sort values:
submarker_df.sort_values(
by=[
same_depth_sibling + "_pval_adj",
same_depth_sibling + "_logfc",
],
ascending=[True, False],
inplace=True,
)
submarker_df = submarker_df.reset_index().drop(columns="index")
# add to big dataframe
cluster_markers.loc[
submarker_df.index, submarker_df.columns
] = submarker_df.values
return cluster_markers
def create_cluster_mapping_overview(
adata,
n_levels,
cluster_label_to_decompose,
cluster_label_to_count_prefix,
min_fraction_for_dominancy=0.5,
index_name=None,
):
"""Function to calculate for a new clustering, which clusters from an old
clustering are the dominant ones in your new clustering (or vice versa).
Args:
adata - scanpy AnnData object
n_levels - number of annotation levels (named "ann_level_[number]" in adata.obs)
cluster_label_to_decompose - column name of cluster column in adata.obs
for which we want to know of what clusters it consists
cluster_label_to_count_prefix - column name (excluding level number) of
clusters by which we want to define our cluster-to-decompose
min_fraction_for_dominancy - minimum fraction of annotated cells to belong to
one cell type, to be called "dominant". Should be
higher than 0.5.
index_name - name to give to index column
Returns:
cluster df - pandas dataframe with for each cluster information on what is the
dominant cluster (if there is one), and the fraction of annotated
cells belonging to the dominant cluster
"""
# set up dataframe with one row per new cluster
cluster_df = pd.DataFrame(
index=adata.obs[cluster_label_to_decompose].cat.categories,
columns=zip(
[
f"{cluster_label_to_count_prefix}{level}_dom_type"
for level in range(1, n_levels + 1)
],
[
f"{cluster_label_to_count_prefix}{level}_dom_fract"
for level in range(1, n_levels + 1)
],
),
)
# loop through cluster-to-count levels
for level in range(1, n_levels + 1):
cluster_to_count_level_name = f"{cluster_label_to_count_prefix}{level}"
clust_cell_types = adata.obs.groupby(
[cluster_label_to_decompose, cluster_to_count_level_name]
).agg({cluster_to_count_level_name: "count"})
# convert to proportions
clust_cell_types = clust_cell_types.groupby(level=0)[
cluster_to_count_level_name
].apply(lambda x: x / float(x.sum()))
# add "dominant" annotation:
dominant_types = clust_cell_types.index[
clust_cell_types > min_fraction_for_dominancy
]
dominant_fractions = clust_cell_types[
clust_cell_types > min_fraction_for_dominancy
]
# copy dominant types to cluster_df:
cluster_df.loc[
dominant_types.get_level_values(0),
f"{cluster_to_count_level_name}_dom_type",
] = dominant_types.get_level_values(1)
# copy dominance fractions to cluster_df
cluster_df.loc[
dominant_fractions.index.get_level_values(0),
f"{cluster_to_count_level_name}_dom_fract",
] = dominant_fractions.values
if not | pd.isnull(index_name) | pandas.isnull |
# -*- coding:Utf-8 -*-
"""
This module handles CORMORAN measurement data
CorSer Class
============
.. autoclass:: CorSer
:members:
Notes
-----
Useful members
distdf : distance between radio nodes (122 columns)
devdf : device data frame
"""
#import mayavi.mlab as mlabc
import os
import pdb
import sys
import pandas as pd
import numpy as np
import numpy.ma as ma
import scipy.io as io
from pylayers.util.project import *
from pylayers.util.pyutil import *
from pylayers.mobility.ban.body import *
from pylayers.gis.layout import *
import pylayers.antprop.antenna as antenna
from matplotlib.widgets import Slider, CheckButtons, Button, Cursor
from pylayers.signal.DF import *
# from moviepy.editor import *
from skimage import img_as_ubyte
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import pickle
try:
from tvtk.api import tvtk
from mayavi.sources.vtk_data_source import VTKDataSource
from mayavi import mlab
except:
print('Layout:Mayavi is not installed')
#Those lines handle incompatibility between mayavi and VTK
#and redirect noisy warning message into a log file
# import vtk
# output=vtk.vtkFileOutputWindow()
# output.SetFileName("mayaviwarninglog.tmp")
# vtk.vtkOutputWindow().SetInstance(output)
def cor_log(short=True):
""" display cormoran measurement campaign logfile
Parameters
----------
short : boolean
enable short version
Examples
--------
>>> from pylayers.measures.cormoran import *
>>> cor_log(short=True)
"""
filelog = os.path.join(os.environ['CORMORAN'],'RAW','Doc','MeasurementLog.csv')
log = pd.read_csv(filelog)
if short :
log['day'] = [x.split('/')[0] for x in log['Date'].values]
log['serie']=log['Meas Serie']
return log[['serie','day','Subject','techno','Short Notes']]
else:
return log
def time2npa(lt):
""" convert pd.datetime.time to numpy array
Parameters
----------
lt : pd.datetime.time
Returns
-------
ta : numpy array
time in seconds
"""
ta = (lt.microsecond*1e-6+
lt.second+
lt.minute*60+
lt.hour*3600)
return(ta)
class CorSer(PyLayers):
""" Handle CORMORAN measurement data
Hikob data handling from CORMORAN measurement campaign
11/06/2014
single subject (Bernard and Nicolas)
12/06/2014
several subject (Jihad, Eric , Nicolas)
"""
def __init__(self,serie=6,day=11,source='CITI',layout=False):
"""
Parameters
----------
serie : int
day : int
source : string
Notes
-----
The environment variable CORMORAN is indicating the location of data directory
"""
assert (day in [11,12]),"wrong day"
try:
self.rootdir = os.environ['CORMORAN']
except:
raise NameError('Please add a CORMORAN environement variable \
pointing to the data')
# infos
self.serie = serie
self.day = day
self.loadlog()
if day == 11:
if serie in [7,8]:
raise 'Serie '+str(serie) + ' has no hkb data and will not be loaded'
if day ==12:
if serie in [17,18,19,20]:
raise AttributeError('Serie '+str(serie) + \
' has no hkb data and will not be loaded')
#Measures
if day==11:
self.stcr = [1,2,3,4,10,11,12,32,33,34,35,9,17,18,19,20,25,26]
self.shkb = [5,6,13,14,15,16,21,22,23,24,27,28,29,30,31,32,33,34,35]
self.sbs = [5,6,7,8,13,14,15,16,21,22,23,24,27,28,29,30,31,32,33,34,35]
self.mocap = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35]
self.mocapinterf=[]
if day==12:
self.stcr = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16]
self.shkb = [9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24]
self.sbs = [9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24]
self.mocap =[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24]
self.mocapinterf = [5,6,7,8,13,14,15,16,21,22,23,24,]
self.typ=''
# HIKOB
if serie in self.shkb:
self._loadhkb(serie=serie,day=day,source=source)
# IR-UWB TCR
if serie in self.stcr:
self._loadTCR(serie=serie,day=day)
# BeSpoon
if serie in self.sbs:
self._loadBS(serie=serie,day=day)
# set filename
if self.typ=='FULL':
self._filename = 'Sc' + self.scenario + '_S' + str(self.serie) + '_R' + str(self.run) + '_' + self.typ.capitalize()
else:
self._filename = 'Sc' + self.scenario + '_S' + str(self.serie) + '_R' + str(self.run) + '_' + self.typ
#Layout
if layout:
self.L= Layout('MOCAP-small2.lay')
# Load Infrastructure Nodes
self._loadinfranodes()
# Load cameras
self._loadcam()
#BODY & interferers
self.subject = str(self.log['Subject'].values[0].replace('jihad','Jihad')).split(' ')
#filter typos in self.subject
self.subject = [ x for x in self.subject if len(x)!=0 ]
if 'Jihad' in self.subject :
uj = self.subject.index('Jihad')
self.subject[uj]='Jihan'
if serie in self.mocap :
# load bodies from mocap file
self._loadbody(serie=serie,day=day)
self._distancematrix()
self._computedevpdf()
if isinstance(self.B,dict):
for b in self.B:
if hasattr(self,'L'):
self.B[b].traj.Lfilename=copy.copy(self.L._filename)
else:
self.B[b].traj.Lfilename='notloaded'
else :
self.B.traj.Lfilename=copy.copy(self.L._filename)
# reference time is tmocap
self.tmocap = self.B[self.subject[0]].time
# load offset dict
self.offset= self._load_offset_dict()
########################
#realign Radio on mocap
########################
# 1 - Resample radio time => mocap time
# 2 - (if available) apply offset
if ('BS' in self.typ) or ('FULL' in self.typ):
print( '\nBS data frame index: ',)
self._align_on_devdf(typ='BS')
print( 'Align on mocap OK...',)
try:
self._apply_offset('BS')
print ('time-offset applied OK')
except:
print ('WARNING time-offset NOT applied')
print ('No BS offset not yet set => use self.offset_setter ')
if ('TCR' in self.typ) or ('FULL' in self.typ):
print ('\nTCR data frame index:', )
self._align_on_devdf(typ='TCR')
print ('Align on mocap OK...',)
try:
self._apply_offset('TCR')
print ('time-offset applied OK')
except:
print ('WARNING time-offset NOT applied')
print ('No TCR offset not yet set => use self.offset_setter')
if ('HK' in self.typ) or ('FULL' in self.typ):
print ('\nHKB data frame index:',)
self._align_on_devdf(typ='HKB')
print ('Align on mocap OK...',)
try:
# self._apply_offset('HKB')
print ('time-offset applied OK')
except:
print ('WARNING time-offset NOT applied')
print ('No HKB offset not yet set => use self.offset_setter')
print ('\nCreate distance Dataframe...',)
self._computedistdf()
print ('OK',)
def __repr__(self):
st = ''
st = st + 'filename : ' + self._filename + '\n'
st = st + 'filewear : ' + self.filewear + '\n'
st = st + 'filebody : ' + self.filebody + '\n'
st = st + 'filemocap : ' + self.filemocap + '\n'
st = st + 'Day : '+ str(self.day)+'/06/2014'+'\n'
st = st + 'Serie : '+ str(self.serie)+'\n'
st = st + 'Scenario : '+str(self.scenario)+'\n'
st = st + 'Run : '+ str(self.run)+'\n'
st = st + 'Type : '+ str(self.typ)+'\n'
st = st + 'Original Video Id : '+ str(self.video)+'\n'
st = st + 'Subject(s) : '
for k in self.subject:
st = st + k + ' '
st = st + '\n\n'
st = st+'Body available: ' + str('B' in dir(self)) + '\n\n'
try :
st = st+'BeSPoon : '+self._fileBS+'\n'
except:
pass
try :
st = st+'HIKOB : '+self._filehkb+'\n'
except:
pass
try :
st = st+'TCR : '+self._fileTCR+'\n'
except:
pass
st = st + '----------------------\n\n'
for k in self.log.columns:
st = st + k + ' :' + str(self.log[k].values)+'\n'
return(st)
# @property
# def dev(self):
# """ display device techno, id , id on body, body owner,...
# """
# title = '{0:21} | {1:7} | {2:8} | {3:10} '.format('Name in Dataframe', 'Real Id', 'Body Id', 'Subject')
# print title + '\n' + '-'*len(title)
# if ('HK' in self.typ) or ('FULL' in self.typ):
# hkbkeys = self.idHKB.keys()
# hkbkeys.sort()
# for d in hkbkeys:
# dev = self.devmapper(self.idHKB[d],'HKB')
# print '{0:21} | {1:7} | {2:8} | {3:10} '.format(dev[0],dev[1],dev[2],dev[3])
# if ('TCR' in self.typ) or ('FULL' in self.typ):
# tcrkeys = self.idTCR.keys()
# tcrkeys.sort()
# for d in tcrkeys:
# dev = self.devmapper(self.idTCR[d],'TCR')
# print '{0:21} | {1:7} | {2:8} | {3:10} '.format(dev[0],dev[1],dev[2],dev[3])
@property
def dev(self):
""" display device techno, id , id on body, body owner,...
"""
title = '{0:21} | {1:7} | {2:8} | {3:10} '.format('Name in Dataframe', 'Real Id', 'Body Id', 'Subject')
print( title + '\n' + '='*len(title))
# access points HKB
for d in self.din:
if ('HK' in d) :
dev = self.devmapper(d,'HKB')
print('{0:21} | {1:7} | {2:8} | {3:10} '.format(dev[0],dev[1],dev[2],dev[3]))
if 'FULL' in self.typ:
print ('{0:21} | {1:7} | {2:8} | {3:10} '.format('','','',''))
for d in self.din:
if ('BS' in d) :
dev = self.devmapper(d,'BS')
print ('{0:21} | {1:7} | {2:8} | {3:10} '.format(dev[0],dev[1],dev[2],dev[3]))
if 'FULL' in self.typ:
print ('{0:21} | {1:7} | {2:8} | {3:10} '.format('','','',''))
# access points TCR
for d in self.din:
if ('TCR' in d) :
dev = self.devmapper(d,'TCR')
print ('{0:21} | {1:7} | {2:8} | {3:10} '.format(dev[0],dev[1],dev[2],dev[3]))
print ('{0:66}'.format('-'*len(title) ))
#device per RAT per body
for b in self.B:
if b not in self.interf:
#HKB per body
for d in self.B[b].dev.keys():
if ('HK' in d):
dev = self.devmapper(d,'HKB')
print( '{0:21} | {1:7} | {2:8} | {3:10} '.format(dev[0],dev[1],dev[2],dev[3]))
#bespoon
if ('FULL' in self.typ) or ('HKB' in self.typ):
print( '{0:21} | {1:7} | {2:8} | {3:10} '.format('','','',''))
for d in self.B[b].dev.keys():
if ('BS' in d):
dev = self.devmapper(d,'BS')
print( '{0:21} | {1:7} | {2:8} | {3:10} '.format(dev[0],dev[1],dev[2],dev[3]))
# print '{0:66}'.format('-'*len(title) )
#TCR per body
if 'FULL' in self.typ:
print ('{0:21} | {1:7} | {2:8} | {3:10} '.format('','','',''))
for d in self.B[b].dev.keys():
if ('TCR' in d):
dev = self.devmapper(d,'TCR')
print ('{0:21} | {1:7} | {2:8} | {3:10} '.format(dev[0],dev[1],dev[2],dev[3]))
print ('{0:66}'.format('-'*len(title) ))
@property
def ant(self):
""" display device techno, id , id on body, body owner,...
"""
title = '{0:21} | {1:7} | {2:8} | {3:10} '.format('Name in Dataframe', 'Real Id', 'Body Id', 'Subject')
print (title + '\n' + '='*len(title) )
# access points HKB
for d in self.din:
if ('HK' in d) :
dev = self.devmapper(d,'HKB')
print ('{0:21} | {1:7} | {2:8} | {3:10} '.format(dev[0],dev[1],dev[2],dev[3]))
if 'FULL' in self.typ:
print ('{0:21} | {1:7} | {2:8} | {3:10} '.format('','','',''))
for d in self.din:
if ('BS' in d) :
dev = self.devmapper(d,'BS')
print ('{0:21} | {1:7} | {2:8} | {3:10} '.format(dev[0],dev[1],dev[2],dev[3]))
if 'FULL' in self.typ:
print( '{0:21} | {1:7} | {2:8} | {3:10} '.format('','','',''))
# access points TCR
for d in self.din:
if ('TCR' in d) :
dev = self.devmapper(d,'TCR')
print( '{0:21} | {1:7} | {2:8} | {3:10} '.format(dev[0],dev[1],dev[2],dev[3]))
print ('{0:66}'.format('-'*len(title) ))
#device per RAT per body
for b in self.B:
if b not in self.interf:
#HKB per body
for d in self.B[b].dev.keys():
if ('HK' in d):
dev = self.devmapper(d,'HKB')
print( '{0:21} | {1:7} | {2:8} | {3:10} '.format(dev[0],dev[1],dev[2],dev[3]))
#bespoon
if ('FULL' in self.typ) or ('HKB' in self.typ):
print( '{0:21} | {1:7} | {2:8} | {3:10} '.format('','','',''))
for d in self.B[b].dev.keys():
if ('BS' in d):
dev = self.devmapper(d,'BS')
print( '{0:21} | {1:7} | {2:8} | {3:10} '.format(dev[0],dev[1],dev[2],dev[3]))
# print '{0:66}'.format('-'*len(title) )
#TCR per body
if 'FULL' in self.typ:
print( '{0:21} | {1:7} | {2:8} | {3:10} '.format('','','',''))
for d in self.B[b].dev.keys():
if ('TCR' in d):
dev = self.devmapper(d,'TCR')
print( '{0:21} | {1:7} | {2:8} | {3:10} '.format(dev[0],dev[1],dev[2],dev[3]))
print( '{0:66}'.format('-'*len(title) ))
def _loadcam(self):
""" load camera position
Returns
-------
update self.cam
"""
self.cam = np.array([
[-6502.16643961174,5440.97951452912,2296.44437108561],
[-7782.34866625776,4998.47624994092,2417.5861326688],
[8308.82897665828,3618.50516290547,2698.07710953287],
[5606.68337709102,-6354.17891528277,2500.27779697402],
[-8237.91886515041,-2332.98639475305,4765.31798299242],
[5496.0942989988,6216.91946236788,2433.30012872688],
[-8296.19706598514,2430.07325486109,4794.01607841197],
[7718.37527064615,-4644.26760522485,2584.75330667172],
[8471.27154730777,-3043.74550832061,2683.45089703377],
[-8213.04824602894,-4034.57371591121,2368.54548665579],
[-7184.66711497403,-4950.49444503781,2317.68563412347],
[7531.66103727189,5279.02353243886,2479.36291603544],
[-6303.08628709464,-7057.06193926342,2288.84938553817],
[-5441.17834354692,6637.93014323586,2315.15657646861],
[8287.79937470615,59.1614281340528,4809.14535447027]
])*1e-3
def _loadinfranodes(self):
""" load infrastructure nodes
nico
A4
mpts[6,7,8]
X
A3 A1
mpts[9,10,11] mpts[3,4,5]
X X
A2
mpts[0,1,2]
X
TCR = mpts[0,3,6,9]
HKB = mpts[1,2,
4,5,
7,8,
10,11]
bernard
A3
mpts[3,4,5]
X
A2 A4
mpts[6,7,8] mpts[0,1,2]
X X
A1
mpts[9,10,11]
X
TCR = mpts[0,3,6,9]
HKB = mpts[1,2,
4,5,
7,8,
10,11]
"""
filename = os.path.join(self.rootdir,'RAW','11-06-2014','MOCAP','scene.c3d')
print( "\nload infrastructure node position:",)
a, self.infraname, pts, i = c3d.ReadC3d(filename)
pts = pts/1000.
mpts = np.mean(pts, axis=0)
self.din={}
if ('HK' in self.typ) or ('FULL' in self.typ):
uhkb = np.array([[1,2], [4,5], [7,8], [10,11]])
mphkb = np.mean(mpts[uhkb], axis=1)
self.din.update(
{'HKB:1':{'p' : mphkb[3],
# 'T' : np.eye(3),
's3off' : 0.},
'HKB:2':{'p' : mphkb[2],
# 'T': np.array([[-0.44807362, 0.89399666, 0.],
# [-0.89399666, -0.44807362, 0.],
# [ 0.,0.,1. ]]),
's3off':0.} ,
'HKB:3':{'p':mphkb[1],
# 'T':array([[-0.59846007, -0.80115264, 0.],
# [ 0.80115264, -0.59846007, 0.],
# [ 0.,0., 1.]]),
's3off':0.},
'HKB:4':{'p':mphkb[0],
# 'T':array([[-0.44807362, -0.89399666, 0.],
# [ 0.89399666, -0.44807362, 0.],
# [ 0.,0., 1.]]),
's3off':0.}
})
# TCR:31 is the coordinator which was not captured.
# The position has been determined via optimization
if ('TCR' in self.typ) or ('FULL' in self.typ):
self.din.update({'TCR:32':{'p':mpts[9],
'T':np.eye(3),
's3off':0.1},
'TCR:24':{'p':mpts[6],
# 'T': np.array([[-0.44807362, 0.89399666, 0.],
# [-0.89399666, -0.44807362, 0.],
# [ 0.,0.,1. ]]),
's3off':0.1},
'TCR:27':{'p':mpts[3],
# 'T':array([[-0.59846007, -0.80115264, 0.],
# [ 0.80115264, -0.59846007, 0.],
# [ 0.,0., 1.]]),
's3off':0.1},
'TCR:28':{'p':mpts[0],
# 'T':array([[-0.44807362, -0.89399666, 0.],
# [ 0.89399666, -0.44807362, 0.],
# [ 0.,0., 1.]]),
's3off':0.1},
'TCR:31':{'p':array([1.7719,-3.2655,1.74]),
# 'T':array([[-0.44807362, -0.89399666, 0.],
# [ 0.89399666, -0.44807362, 0.],
# [ 0.,0., 1.]]),
's3off':0.0}
})
if self.day == 12:
#BS idem HKB:1 and HKB:2
if ('BS' in self.typ) or ('FULL' in self.typ):
self.din.update(
{'BS:74':{'p':mphkb[3],
# 'T':np.eye(3),
's3off':-0.2},
'BS:157':{'p':mphkb[2],
# 'T': np.array([[-0.44807362, 0.89399666, 0.],
# [-0.89399666, -0.44807362, 0.],
# [ 0.,0.,1. ]]),
's3off':-0.2} ,
})
#load extra information from inifile (antenna, rotation matrix,...)
inifile = os.path.join(self.rootdir,'POST-TREATED',str(self.day)+'-06-2014','BodyandWear','AccesPoints.ini')
config = ConfigParser.ConfigParser()
config.read(inifile)
for d in self.din:
self.din[d]['antname']=config.get(d,'file')
self.din[d]['ant']=antenna.Antenna(config.get(d,'file'))
self.din[d]['T']=eval(config.get(d,'t'))
self.din[d]['comment']=config.get(d,'comment')
# self.pts= np.empty((12,3))
# self.pts[:,0]= -mpts[:,1]
# self.pts[:,1]= mpts[:,0]
# self.pts[:,2]= mpts[:,2]
# return mpts
# self.dist = np.sqrt(np.sum((mpts[:,np.newaxis,:]-mpts[np.newaxis,:])**2,axis=2))
def loadlog(self):
""" load in self.log the log of the current serie
from MeasurementLog.csv
"""
filelog = os.path.join(self.rootdir,'RAW','Doc','MeasurementLog.csv')
log = pd.read_csv(filelog)
date = str(self.day)+'/06/14'
self.log = log[(log['Meas Serie'] == self.serie) & (log['Date'] == date)]
def _loadbody(self,day=11,serie=''):
""" load body from motion capture file
Parameters
----------
day :
serie :
"""
assert day in [11,12],"wrong day in _loadbody"
self.B={}
color=['LightBlue','YellowGreen','PaleVioletRed','white','white','white','white','white','white','white']
for us,subject in enumerate(self.subject):
print( "\nload ",subject, " body:",)
seriestr = str(self.serie).zfill(3)
if day == 11:
self.filemocap = os.path.join(self.rootdir,'RAW',str(self.day)+'-06-2014','MOCAP','serie_'+seriestr+'.c3d')
elif day == 12:
self.filemocap = os.path.join(self.rootdir,'RAW',str(self.day)+'-06-2014','MOCAP','Nav_serie_'+seriestr+'.c3d')
# body and wear directory
baw = os.path.join(self.rootdir,'POST-TREATED',str(self.day)+'-06-2014','BodyandWear')
if subject =='Jihad':
subject ='Jihan'
#
# Load body cylinder description : "Subject.ini"
# Load wearable device description (contains antenna filename) :
#
self.filebody = os.path.join(baw, subject + '.ini')
self.filewear = os.path.join(baw,subject + '_' +str(self.day)+'-06-2014_' + self.typ + '.ini')
if len(self.subject) >1 or self.mocapinterf:
multi_subject=True
else:
multi_subject=False
self.B.update({subject:Body(_filebody=self.filebody,
_filemocap=self.filemocap,unit = 'mm', loop=False,
_filewear=self.filewear,
centered=False,
multi_subject_mocap=multi_subject,
color=color[us])})
if self.serie in self.mocapinterf:
self.interf = ['Anis_Cylindre:',
'Benoit_Cylindre:',
'Bernard_Cylindre:',
'Claude_Cylindre:',
'Meriem_Cylindre:']
intertmp=[]
if self.serie==13:
self.interf.remove('Bernard_Cylindre:')
for ui,i in enumerate(self.interf):
#try:
print( "load ",i, " interfering body:",)
_filemocap = pyu.getshort(self.filemocap)
self.B.update({i:Cylinder(name=i,
_filemocap=_filemocap,
unit = 'mm',
color = color[ui])})
intertmp.append(i)
#except:
# print "Warning ! load ",i, " FAIL !"
self.interf=intertmp
else :
self.interf=[]
# if len(self.subject) == 1:
# self.B = self.B[self.subject]
def _loadTCR(self,day=11,serie='',scenario='20',run=1):
""" load TCR data
Parameters
----------
day :
serie :
scenario :
run :
"""
#
# TNET : (NodeId,MAC)
#
self.TNET={0:31,
1:2,
7:24,
8:25,
9:26,
10:27,
11:28,
12:30,
14:32,
15:33,
16:34,
17:35,
18:36,
19:37,
20:48,
21:49}
if day==11:
self.dTCR ={'Unused':49,
'COORD':31,
'AP1':32,
'AP2':24,
'AP3':27,
'AP4':28,
'HeadRight':34,
'TorsoTopRight':25,
'TorsoTopLeft':30,
'BackCenter':35,
'HipRight':2,
'WristRight':26,
'WristLeft':48,
'KneeLeft':33,
'AnkleRight':36,
'AnkleLeft':37}
dirname = os.path.join(self.rootdir,'POST-TREATED','11-06-2014','TCR')
if day==12:
dirname = os.path.join(self.rootdir,'POST-TREATED','12-06-2014','TCR')
self.dTCR ={ 'COORD':31,
'AP1':32,
'AP2':24,
'AP3':27,
'AP4':28,
'Jihad:TorsoTopRight':35,
'Jihad:TorsoTopLeft':2,
'Jihad:BackCenter':33,
'Jihad:ShoulderLeft':37,
'Nicolas:TorsoTopRight':34,
'Nicolas:TorsoTopLeft':49,
'Nicolas:BackCenter':48,
'Nicolas:ShoulderLeft':36,
'Eric:TorsoCenter':30,
'Eric:BackCenter':25,
'Eric:ShoulderLeft':26}
#
# TCR : (Name , MAC)
# iTCR : (MAC , Name)
# dTCR : (NodeId, Name)
#
self.idTCR={}
for k in self.dTCR:
self.idTCR[self.dTCR[k]]=k
dTCRni={}
for k in self.TNET.keys():
dTCRni[k]=self.idTCR[self.TNET[k]]
files = os.listdir(dirname)
if serie != '':
try:
self._fileTCR = filter(lambda x : '_S'+str(serie)+'_' in x ,files)[0]
except:
self._fileTCR = filter(lambda x : '_s'+str(serie)+'_' in x ,files)[0]
tt = self._fileTCR.split('_')
self.scenario=tt[0].replace('Sc','')
self.run = tt[2].replace('R','')
self.typ = tt[3].replace('.csv','').upper()
self.video = 'NA'
else:
filesc = filter(lambda x : 'Sc'+scenario in x ,files)
self._fileTCR = filter(lambda x : 'R'+str(run) in x ,filsc)[0]
self.scenario= scenario
self.run = str(run)
filename = os.path.join(dirname,self._fileTCR)
dtTCR = pd.read_csv(filename)
tcr={}
for k in dTCRni:
for l in dTCRni:
if k!=l:
d = dtTCR[((dtTCR['ida']==k) & (dtTCR['idb']==l))]
d.drop_duplicates('time',inplace=True)
del d['lqi']
del d['ida']
del d['idb']
d = d[d['time']!=-1]
d.index = d['time']
del d['time']
if len(d)!=0:
sr = pd.Series(d['dist']/1000,index=d.index)
tcr[dTCRni[k]+'-'+dTCRni[l]]= sr
self.tcr = pd.DataFrame(tcr)
self.tcr = self.tcr.fillna(0)
ts = 75366400./1e9
t = np.array(self.tcr.index)*ts
t = t-t[0]
self.tcr.index = t
self.ttcr=self.tcr.index
def _loadBS(self,day=11,serie='',scenario='20',run=1):
""" load BeSpoon data
Parameters
----------
day : int
serie : string
scenario : string
run : int
"""
if day == 11:
self.dBS = {'WristRight':157,'AnkleRight':74,'HandRight':0}
elif day == 12:
self.dBS = {'AP1':157,'AP2':74,'HandRight':0}
self.idBS={}
for k in self.dBS:
self.idBS[self.dBS[k]]=k
if day==11:
dirname = os.path.join(self.rootdir,'POST-TREATED','11-06-2014','BeSpoon')
if day==12:
dirname = os.path.join(self.rootdir,'POST-TREATED','12-06-2014','BeSpoon')
files = os.listdir(dirname)
if serie != '':
#self._fileBS = filter(lambda x : 'S'+str(serie) in x ,files)[0]
self._fileBS = [ x for x in files if 'S'+str(serie) in x ][0]
else:
self._fileBS = [ x for x in files if 'R'+str(serie) in x ][0]
#filesc = filter(lambda x : 'Sc'+scenario in x ,files)
self._fileBS = filter(lambda x : 'R'+str(run) in x ,filsc)[0]
bespo = pd.read_csv(os.path.join(dirname,self._fileBS),index_col='ts')
gb = bespo.groupby(['Sensor'])
#get device id
devid,idevid = np.unique(bespo['Sensor'],return_index=True)
# get index of each group
dgb={d:gb.get_group(d) for d in devid}
lgb=[]
for i in dgb:
ind = dgb[i].index/1e3
dti = pd.to_datetime(ind,unit='s')
npai = time2npa(dti)
npai = npai - npai[0]
dgb[i].index=pd.Index(npai)
lgb.append(pd.DataFrame(dgb[i]['d'].values,columns=[self.idBS[0]+'-'+self.idBS[i]],index=dgb[i].index))
df = lgb[0].join(lgb[1])
self.bespo = df
#self.s157 = self.bespo[self.bespo['Sensor']==157]
#self.s157.set_index(self.s157['tu'].values/1e9)
#self.s74 = self.bespo[self.bespo['Sensor']==74]
#self.s74.set_index(self.s74['tu'].values/1e9)
#t157 = np.array(self.s157['tu']/(1e9))
#self.t157 = t157-t157[0]
#t74 = np.array(self.s74['tu']/(1e9))
#self.t74 = t74 - t74[0]
def _loadhkb(self,day=11,serie='',scenario='20',run=1,source='CITI'):
""" load hkb measurement data
Parameters
----------
day : string
serie : string
scenario : string
run : int
source : 'string'
Returns
-------
update self.hkb
"""
if day == 11:
if serie == 5:
source = 'UR1'
if day==11:
self.dHKB ={'AP1':1,'AP2':2,'AP3':3,'AP4':4,
'HeadRight':5,'TorsoTopRight':6,'TorsoTopLeft':7,'BackCenter':8,'ElbowRight':9,'ElbowLeft':10,'HipRight':11,'WristRight':12,'WristLeft':13,'KneeLeft':14,'AnkleRight':16,'AnkleLeft':15}
if source=='UR1' :
dirname = os.path.join(self.rootdir,'POST-TREATED','11-06-2014','HIKOB')
elif source=='CITI':
dirname = os.path.join(self.rootdir,'POST-TREATED','11-06-2014','HIKOB','CITI')
if day==12:
self.dHKB= {'AP1':1,'AP2':2,'AP3':3,'AP4':4,'Jihad:TorsoTopRight':10,'Jihad:TorsoTopLeft':9,'Jihad:BackCenter':11,'JihadShoulderLeft':12,
'Nicolas:TorsoTopRight':6,'Nicolas:TorsoTopLeft':5,'Nicolas:BackCenter':7,'Nicolas:ShoulderLeft':8,
'Eric:TooTopRight':15,'Eric:TorsoTopLeft':13,'Eric:BackCenter':16,'Eric:ShoulderLeft':14}
#if source=='UR1':
dirname = os.path.join(self.rootdir,'POST-TREATED','12-06-2014','HIKOB')
files = os.listdir(dirname)
self.idHKB={}
for k in self.dHKB:
self.idHKB[self.dHKB[k]]=k
if serie != '':
self._filehkb = [ x for x in files if 'S'+str(serie) in x][0]
tt = self._filehkb.split('_')
if source == 'UR1':
self.scenario=tt[0].replace('Sc','')
self.run = tt[2].replace('R','')
self.typ = tt[3]
self.video = tt[4].replace('.mat','')
elif source == 'CITI':
self.scenario=tt[0].replace('Sc','')+tt[1]
self.run = tt[3].replace('r','')
self.typ = tt[4]
if self.typ == 'HKB':
self.typ = 'HKBS'
self.video = tt[5].replace('.mat','')
else:
filesc = [ x for x in files if x in 'Sc'+scenario ][0]
if source=='UR1':
self._filehkb = [ x for x in filesc if x in 'R'+str(run)][0]
else:
self._filehkb = [ x for x in filesc if x in 'r'+str(run)][0]
data = io.loadmat(os.path.join(dirname,self._filehkb))
if source=='UR1':
self.rssi = data['rssi']
self.thkb = data['t']
else:
self.rssi = data['val']
self.thkb = np.arange(np.shape(self.rssi)[2])*25.832e-3
def topandas():
try:
self.hkb = pd.DataFrame(index=self.thkb[0])
except:
self.hkb = pd.DataFrame(index=self.thkb)
for k in self.idHKB:
for l in self.idHKB:
if k!=l:
col = self.idHKB[k]+'-'+self.idHKB[l]
rcol = self.idHKB[l]+'-'+self.idHKB[k]
if rcol not in self.hkb.columns:
rssi = self.rssi[k-1,l-1,:]
self.hkb[col] = rssi
topandas()
self.hkb = self.hkb[self.hkb!=0]
def compute_visibility(self,techno='HKB',square_mda=True,all_links=True):
""" determine visibility of links for a given techno
Parameters
----------
techno string
select the given radio technology of the nodes to determine
the visibility matrix
square_mda boolean
select ouput format
True : (device x device x timestamp)
False : (link x timestamp)
all_links : bool
compute all links or just those for which data is available
Return
------
if square_mda = True
intersection : (ndevice x nbdevice x nb_timestamp)
matrice of intersection (1 if link is cut 0 otherwise)
links : (nbdevice)
name of the links
if square_mda = False
intersection : (nblink x nb_timestamp)
matrice of intersection (1 if link is cut 0 otherwise)
links : (nblink x2)
name of the links
Example
-------
>>> from pylayers.measures.cormoran import *
>>> import matplotlib.pyplot as plt
>>> C=CorSer(serie=14,day=12)
>>> inter,links=C.compute_visibility(techno='TCR',square_mda=True)
>>> inter.shape
(15, 15, 12473)
>>>C.imshowvisibility_i(inter,links)
"""
if techno == 'TCR':
if not ((self.typ == 'TCR') or (self.typ == 'FULL')):
raise AttributeError('Serie has not data for techno: ',techno)
hname = self.tcr.keys()
dnode=copy.copy(self.dTCR)
dnode.pop('COORD')
prefix = 'TCR:'
elif techno=='HKB':
if not ((self.typ == 'HKBS') or (self.typ == 'FULL')):
raise AttributeError('Serie has not data for techno: '+techno)
hname = self.hkb.keys()
dnode=self.dHKB
prefix = 'HKB:'
# get link list
if all_links:
import itertools
links =[l for l in itertools.combinations(dnode.keys(),2)]
else:
links=[n.split('-') for n in hname]
links = [l for l in links if ('COORD' not in l[0]) and ('COORD' not in l[1])]
#mapping between device name in self.hkb and on body/in self.devdf
dev_bid = [self.devmapper(k,techno=techno)[2] for k in dnode.keys()]
nb_totaldev=len(np.unique(self.devdf['id']))
# extract all dev position on body
# Mpdev : (3 x (nb devices and nb infra nodes) x nb_timestamp)
Mpdev = np.empty((3,len(dev_bid),len(self.devdf.index)/nb_totaldev))
# get all positions
for ik,i in enumerate(dev_bid) :
if i in self.din:
Mpdev[:,ik,:] = self.din[i]['p'][:,np.newaxis]
else:
pts = self.devdf[self.devdf['id']==i][['x','y','z']].values.T
if np.prod(pts.shape)!=0:
Mpdev[:,ik,:] = pts
# create A and B from links
nA = np.array([prefix+ str(dnode[l[0]]) for l in links])
nB = np.array([prefix+ str(dnode[l[1]]) for l in links])
dma = dict(zip(dev_bid,range(len(dev_bid))))
mnA = [dma[n] for n in nA]
mnB = [dma[n] for n in nB]
A=Mpdev[:,mnA]
B=Mpdev[:,mnB]
# intersect2D matrix is
# d_0: nb links
#d_1: (cylinder number) * nb body + 1 * nb cylinder_object
# d_2 : nb frame
intersect2D = np.zeros((len(links),
11*len(self.subject) + len(self.interf),
Mpdev.shape[-1]))
# usub : index axes subject
usub_start=0
usub_stop=0
# C-D correspond to bodies segments
#C or D : 3 x 11 body segments x time
# radius of cylinders are (nb_cylinder x time)
for b in self.B:
print( 'processing shadowing from ',b)
# if b is a body not a cylinder
if not 'Cylindre' in b:
uta = self.B[b].sl[:,0].astype('int')
uhe = self.B[b].sl[:,1].astype('int')
rad = self.B[b].sl[:,2]
C = self.B[b].d[:,uta,:]
D = self.B[b].d[:,uhe,:]
try:
radius = np.concatenate((radius,rad[:,np.newaxis]*np.ones((1,C.shape[2]))),axis=0)
except:
radius = rad[:,np.newaxis]*np.ones((1,C.shape[2]))
usub_start=usub_stop
usub_stop=usub_stop+11
else:
cyl = self.B[b]
# top of cylinder
top = cyl.d[:,cyl.topnode,:]
# bottom of cylinder =top with z =0
bottom = copy.copy(cyl.d[:,cyl.topnode,:])
bottom[2,:]=0.02
#top 3 x 1 X time
C=top[:,np.newaxis,:]
D=bottom[:,np.newaxis,:]
radius = np.concatenate((radius,cyl.radius[np.newaxis]))
usub_start=usub_stop
usub_stop=usub_stop+1
f,g,X,Y,alpha,beta,dmin=seg.segdist(A,B,C,D,hard=True)
intersect2D[:,usub_start:usub_stop,:]=g
# import ipdb
# ipdb.set_trace()
#USEFUL Lines for debug
#########################
# def plt3d(ndev=53,ncyl=0,kl=11499):
# fig=plt.figure()
# ax=fig.add_subplot(111,projection='3d')
# if not isinstance(kl,list):
# kl=[kl]
# for ktime in kl:
# ax.plot([A[0,ndev,ktime],B[0,ndev,ktime]],[A[1,ndev,ktime],B[1,ndev,ktime]],[A[2,ndev,ktime],B[2,ndev,ktime]])
# [ax.plot([C[0,k,ktime],D[0,k,ktime]],[C[1,k,ktime],D[1,k,ktime]],[C[2,k,ktime],D[2,k,ktime]],'k') for k in range(11) ]
# ax.plot([X[0,ndev,ncyl,ktime],Y[0,ndev,ncyl,ktime]],[X[1,ndev,ncyl,ktime],Y[1,ndev,ncyl,ktime]],[X[2,ndev,ncyl,ktime],Y[2,ndev,ncyl,ktime]])
# ax.auto_scale_xyz([-5, 5], [-5, 5], [0, 2])
# plt.show()
# import ipdb
# ipdb.set_trace()
uinter1 = np.where((intersect2D<=(radius-0.01)))
uinter0 = np.where((intersect2D>(radius-0.01)))
# intersect2D_=copy.copy(intersect2D)
intersect2D[uinter1[0],uinter1[1],uinter1[2]]=1
intersect2D[uinter0[0],uinter0[1],uinter0[2]]=0
# #integrate the effect of all bodies by summing on axis 1
intersect = np.sum(intersect2D,axis=1)>0
if square_mda:
dev= np.unique(links)
ddev = dict(zip(dev,range(len(dev))))
lmap = np.array(map(lambda x: (ddev[x[0]],ddev[x[1]]),links))
M = np.nan*np.ones((len(dev),len(dev),intersect.shape[-1]))
for i in range(len(intersect)):
id1 = lmap[i][0]
id2 = lmap[i][1]
M[id1,id2,:]=intersect[i,:]
M[id2,id1,:]=intersect[i,:]
intersect=M
links = dev
self._visilinks = links
self._visiintersect = intersect
return intersect,links
def imshowvisibility(self,techno='HKB',t=0,**kwargs):
""" imshow visibility mda
Parameters
----------
techno : (HKB|TCR)
t : float
time in second
Examples
--------
>>> from pylayers.measures.cormoran import *
>>> import matplotlib.pyplot as plt
>>> C=CorSer(serie=6,day=12)
>>> inter,links=C.compute_visibility(techno='TCR',square_mda=True)
>>> i,l=C.imshowvisibility_i(inter,links)
See Also
--------
pylayers.measures.CorSer.compute_visibility()
"""
defaults = { 'grid':True,
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
if 'fig' not in kwargs:
fig = plt.figure()
else:
fig = kwargs.pop('fig')
if 'ax' not in kwargs:
ax = fig.add_subplot(111)
else:
ax = kwargs.pop('ax')
if not '_visiintersect' in dir(self):
print( 'Visibility computed only once')
self.compute_visibility(techno=techno)
links = self._visilinks
inter = self._visiintersect
kt=np.where(self.tmocap <= t)[0][-1]
plt.xticks(np.arange(0, len(links), 1.0))
plt.yticks(np.arange(0, len(links), 1.0))
ax.set_xlim([-0.5,len(links)-0.5])
ax.set_ylim([len(links)-0.5,-0.5])
ax.xaxis.set_ticks_position('top')
xtickNames = plt.setp(ax, xticklabels=links)
ytickNames = plt.setp(ax, yticklabels=links)
plt.setp(xtickNames, rotation=90, fontsize=8)
plt.setp(ytickNames, rotation=0, fontsize=8)
ims=[]
ax.imshow(inter[:,:,kt],interpolation='nearest')
if kwargs['grid']:
ax.grid()
return fig,ax
def _show3i(self,t=0,**kwargs):
""" show3 interactive
"""
fig =plt.figure(num='Jog',figsize=(5,1.5))
#set time to -10 is a trick to make appear interferers cylinder
#because __refreshshow3i only update the data of the cylinder.
# if cylinder is not present in the first _show3, they are not displayed
# later.
time=self.B[self.subject[0]].time
fId = np.where(time<= t)[0][-1]
kwargs['bodytime']=[self.tmocap[-10]]
kwargs['returnfig']=True
kwargs['tagtraj']=False
mayafig = self._show3(**kwargs)
self.__refreshshow3i(fId)
# ax.grid()
# matplotlib Widgets
slax=plt.axes([0.1, 0.5, 0.8, 0.3])
slax.set_title('t='+str(time[fId]),loc='left')
sliderx = Slider(slax, "time", 0, len(time),
valinit=fId, color='#AAAAAA')
def update_x(val):
value = int(sliderx.val)
self.__refreshshow3i(val)
slax.set_title('t='+str(time[val]),loc='left')
fig.canvas.draw_idle()
sliderx.on_changed(update_x)
def plus(event):
sliderx.set_val(sliderx.val +1)
fig.canvas.draw_idle()
def minus(event):
sliderx.set_val(sliderx.val -1)
fig.canvas.draw_idle()
def pplus(event):
sliderx.set_val(sliderx.val +10)
fig.canvas.draw_idle()
def mminus(event):
sliderx.set_val(sliderx.val -10)
fig.canvas.draw_idle()
#QUIT by pressing 'q'
def press(event):
if event.key == 'q':
mlab.close(mayafig)
plt.close(fig)
fig.canvas.mpl_connect('key_press_event', press)
#-1 frame axes
axm = plt.axes([0.2, 0.05, 0.1, 0.15])
bm = Button(axm, '-1')
bm.on_clicked(minus)
#+1 frame axes
axp = plt.axes([0.7, 0.05, 0.1, 0.15])
bp = Button(axp, '+1')
bp.on_clicked(plus)
#-10 frames axes
axmm = plt.axes([0.1, 0.05, 0.1, 0.15])
bmm = Button(axmm, '-10')
bmm.on_clicked(mminus)
#+10 frames axes
axpp = plt.axes([0.8, 0.05, 0.1, 0.15])
bpp = Button(axpp, '+10')
bpp.on_clicked(pplus)
plt.show()
def _show3idemo(self,t=0,**kwargs):
""" show3 interactive
"""
defaults={'nodename':'TorsoTopLeft'}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
fig =plt.figure(num='Jog',figsize=(5,1.5))
#set time to -10 is a trick to make appear interferers cylinder
#because __refreshshow3i only update the data of the cylinder.
# if cylinder is not present in the first _show3, they are not displayed
# later.
time=self.B[self.subject[0]].time
fId = np.where(time<= t)[0][-1]
kwargs['bodytime']=[self.tmocap[-10]]
kwargs['returnfig']=True
kwargs['tagtraj']=False
mayafig = self._show3(**kwargs)
self.__refreshshow3i(fId)
# ax.grid()
# matplotlib Widgets
slax=plt.axes([0.1, 0.5, 0.8, 0.3])
slax.set_title('t='+str(time[fId]),loc='left')
sliderx = Slider(slax, "time", 0, len(time),
valinit=fId, color='#AAAAAA')
def update_x(val):
value = int(sliderx.val)
self.__refreshshow3i(val)
slax.set_title('t='+str(time[val]),loc='left')
vline0.set_data(([time[value],time[value]],[0,1]))
vline1.set_data(([time[value],time[value]],[0,1]))
vline2.set_data(([time[value],time[value]],[0,1]))
vline3.set_data(([time[value],time[value]],[0,1]))
fig.canvas.draw_idle()
fig2.canvas.draw_idle()
sliderx.on_changed(update_x)
def plus(event):
sliderx.set_val(sliderx.val +1)
fig.canvas.draw_idle()
def minus(event):
sliderx.set_val(sliderx.val -1)
fig.canvas.draw_idle()
def pplus(event):
sliderx.set_val(sliderx.val +10)
fig.canvas.draw_idle()
def mminus(event):
sliderx.set_val(sliderx.val -10)
fig.canvas.draw_idle()
#QUIT by pressing 'q'
def press(event):
if event.key == 'q':
mlab.close(mayafig)
plt.close(fig)
plt.close(fig2)
fig.canvas.mpl_connect('key_press_event', press)
#-1 frame axes
axm = plt.axes([0.2, 0.05, 0.1, 0.15])
bm = Button(axm, '-1')
bm.on_clicked(minus)
#+1 frame axes
axp = plt.axes([0.7, 0.05, 0.1, 0.15])
bp = Button(axp, '+1')
bp.on_clicked(plus)
#-10 frames axes
axmm = plt.axes([0.1, 0.05, 0.1, 0.15])
bmm = Button(axmm, '-10')
bmm.on_clicked(mminus)
#+10 frames axes
axpp = plt.axes([0.8, 0.05, 0.1, 0.15])
bpp = Button(axpp, '+10')
bpp.on_clicked(pplus)
fig2,ax2 = plt.subplots(4,1,figsize=(12,6))
ax2=ax2.ravel()
df0 = self.getlink(kwargs['nodename'],'AP1',techno='HKB')
df0.plot(ax=ax2[0],fig=fig2)
df1 = self.getlink(kwargs['nodename'],'AP2',techno='HKB')
df1.plot(ax=ax2[1],fig=fig2)
df2 = self.getlink(kwargs['nodename'],'AP3',techno='HKB')
df2.plot(ax=ax2[2],fig=fig2)
df3 = self.getlink(kwargs['nodename'],'AP4',techno='HKB')
df3.plot(ax=ax2[3],fig=fig2)
ax2[0].set_ylabel('AP1')
ax2[1].set_ylabel('AP2')
ax2[2].set_ylabel('AP3')
ax2[3].set_ylabel('AP4')
vline0 = ax2[0].axvline(x=time[fId], color='red')
vline1 = ax2[1].axvline(x=time[fId], color='red')
vline2 = ax2[2].axvline(x=time[fId], color='red')
vline3 = ax2[3].axvline(x=time[fId], color='red')
fig2.suptitle(kwargs['nodename'])
plt.show()
def __refreshshow3i(self,kt):
""" show3 update for interactive mode
USED in imshowvisibility_i
"""
t=self.tmocap[kt]
for ib,b in enumerate(self.B):
self.B[b].settopos(t=t,cs=True)
try:
# body
X=np.hstack((self.B[b]._pta,self.B[b]._phe))
self.B[b]._mayapts.mlab_source.set(x=X[0,:], y=X[1,:], z=X[2,:])
# device
udev = [self.B[b].dev[i]['uc3d'][0] for i in self.B[b].dev]
Xd=self.B[b]._f[kt,udev,:].T
self.B[b]._mayadev.mlab_source.set(x=Xd[0,:], y=Xd[1,:], z=Xd[2,:])
# name
uupper = np.where(X[2]==X[2].max())[0]
self.B[b]._mayaname.actors.pop()
self.B[b]._mayaname = mlab.text3d(X[0,uupper][0],X[1,uupper][0],X[2,uupper][0],self.B[b].name,scale=0.05,color=(1,0,0))
# s = np.hstack((cylrad,cylrad))
except:
# cylinder
X=np.vstack((self.B[b].top,self.B[b].bottom))
self.B[b]._mayapts.mlab_source.set(x=X[:,0], y=X[:,1], z=X[:,2])
# name
self.B[b]._mayaname.actors.pop()
self.B[b]._mayaname = mlab.text3d(self.B[b].top[0],self.B[b].top[1],self.B[b].top[2],self.B[b].name,scale=0.05,color=(1,0,0))
#vdict
V = self.B[b].traj[['vx','vy','vz']].iloc[self.B[b].toposFrameId].values
self.B[b]._mayavdic.mlab_source.set(x= self.B[b].top[0],y=self.B[b].top[1],z=self.B[b].top[2],u=V[ 0],v=V[ 1],w=V[ 2])
def imshowvisibility_i(self,techno='HKB',t=0,**kwargs):
""" imshow visibility mda interactive
Parameters
----------
inter : (nb link x nb link x timestamps)
links : (nblinks)
time : intial time (s)
Example
-------
>>> from pylayers.measures.cormoran import *
>>> import matplotlib.pyplot as plt
>>> C=CorSer(serie=6,day=12)
>>> inter,links=C.visimda(techno='TCR',square_mda=True)
>>> i,l=C.imshowvisibility_i(inter,links)
"""
# if in_ipynb():
# notebook = False #program launch in ipyhon notebook
# from IPython.html import widgets # Widget definitions
# from IPython.display import display, clear_output# Used to display widgets in the notebook
# else :
# notebook = False
if not '_visiintersect' in dir(self):
print( 'Visibility is computed only once, Please wait\n')
self.compute_visibility(techno=techno)
links = self._visilinks
inter = self._visiintersect
fig, ax = plt.subplots()
fig.subplots_adjust(bottom=0.3)
time=self.tmocap
fId = np.where(time<=t)[0][-1]
vertc = [(0,-10),(0,-10),(0,10),(0,-10)]
poly = plt.Polygon(vertc)
pp = ax.add_patch(poly)
plt.xticks(np.arange(0, len(links), 1.0))
plt.yticks(np.arange(0, len(links), 1.0))
ax.set_xlim([-0.5,len(links)-0.5])
ax.set_ylim([len(links)-0.5,-0.5])
ax.xaxis.set_ticks_position('top')
xtickNames = plt.setp(ax, xticklabels=links)
ytickNames = plt.setp(ax, yticklabels=links)
plt.setp(xtickNames, rotation=90, fontsize=8)
plt.setp(ytickNames, rotation=0, fontsize=8)
ims=[]
l=ax.imshow(inter[:,:,fId],interpolation='nearest')
#set time to -10 is a trick to make appear interferers cylinder
#because __refreshshow3i only update the data of the cylinder.
# if cylinder is not present in the first _show3, they are not displayed
# later.
kwargs['bodytime']=[self.tmocap[-10]]
kwargs['returnfig']=True
kwargs['tagtraj']=False
mayafig = self._show3(**kwargs)
self.__refreshshow3i(fId)
# ax.grid()
# matplotlib Widgets
slax=plt.axes([0.1, 0.15, 0.8, 0.05])
slax.set_title('t='+str(time[fId]),loc='left')
sliderx = Slider(slax, "time", 0, inter.shape[-1],
valinit=fId, color='#AAAAAA')
# else :
# int_range = widgets.IntSliderWidget(min=0,max=inter.shape[-1],step=1,value=fId)
# display(int_range)
def update_x(val):
value = int(sliderx.val)
sliderx.valtext.set_text('{}'.format(value))
l.set_data(inter[:,:,value])
self.__refreshshow3i(val)
slax.set_title('t='+str(time[val]),loc='left')
fig.canvas.draw_idle()
sliderx.on_changed(update_x)
# else:
# def update_x(name,value):
# clear_output(wait=True)
# display(plt.gcf())
# plt.imshow(inter[:,:,value],interpolation='nearest')
# # l.set_data(inter[:,:,value])
# kwargs['bodytime']=[self.tmocap[value]]
# self._show3(**kwargs)
# myu.inotshow('fig1',width=200,height=200,magnification=1)
# # slax.set_title('t='+str(time[val]),loc='left')
# # fig.canvas.draw_idle()
# int_range.on_trait_change(update_x, 'value')
def plus(event):
sliderx.set_val(sliderx.val +1)
fig.canvas.draw_idle()
# if not notebook:
sliderx.on_changed(update_x)
def minus(event):
sliderx.set_val(sliderx.val -1)
fig.canvas.draw_idle()
# if not notebook:
sliderx.on_changed(update_x)
def pplus(event):
sliderx.set_val(sliderx.val +10)
fig.canvas.draw_idle()
# if not notebook:
sliderx.on_changed(update_x)
def mminus(event):
sliderx.set_val(sliderx.val -10)
fig.canvas.draw_idle()
# if not notebook:
sliderx.on_changed(update_x)
# #QUIT by pressing 'q'
# def press(event):
# if event.key == 'q':
# mlab.close(mayafig)
# plt.close(fig)
# fig.canvas.mpl_connect('key_press_event', press)
# if not notebook:
#-1 frame axes
axm = plt.axes([0.3, 0.05, 0.1, 0.075])
bm = Button(axm, '-1')
bm.on_clicked(minus)
#+1 frame axes
axp = plt.axes([0.7, 0.05, 0.1, 0.075])
bp = Button(axp, '+1')
bp.on_clicked(plus)
#-10 frames axes
axmm = plt.axes([0.1, 0.05, 0.1, 0.075])
bmm = Button(axmm, '-10')
bmm.on_clicked(mminus)
#+10 frames axes
axpp = plt.axes([0.9, 0.05, 0.1, 0.075])
bpp = Button(axpp, '+10')
bpp.on_clicked(pplus)
plt.show()
def _distancematrix(self):
"""Compute the distance matrix between the nodes
self.dist : (nb frame x nb_node x nb_node)
self.dist_nodesmap : list of used nodes (useful to make the association ;) )
"""
if not isinstance(self.B,dict):
B={self.subject[0]:self.B}
else :
B=self.B
bn= []
for b in B:
if 'dev' in dir(B[b]):
tdev=[]
for k in B[b].dev:
bn.append(k)
tdev.append(B[b].dev[k]['uc3d'][0])
tdev=np.array(tdev)
try:
pnb = np.concatenate((pnb,B[b]._f[:,tdev,:]),axis=1)
except:
pnb = B[b]._f[:,tdev,:]
ln = []
uin = []
# infrastructure nodes
if ('HK' in self.typ) or ('FULL' in self.typ):
uin.extend(['HKB:1','HKB:2','HKB:3','HKB:4'])
if ('TCR' in self.typ) or ('FULL' in self.typ):
# TCR:31 is the coordinator (1.7719,-3.26)
uin.extend(['TCR:32','TCR:24','TCR:27','TCR:28','TCR:31'])
if self.day == 12:
if ('BS' in self.typ) or ('FULL' in self.typ):
uin.extend(['BS:74','BS:157'])
ln = uin + bn
pin = np.array([self.din[d]['p'] for d in uin])
pin2 = np.empty((pnb.shape[0],pin.shape[0],pin.shape[1]))
pin2[:,:,:] = pin
p = np.concatenate((pin2,pnb),axis=1)
self.points = p
self.dist = np.sqrt(np.sum((p[:,:,np.newaxis,:]-p[:,np.newaxis,:,:])**2,axis=3))
self.dist_nodesmap = ln
def _computedistdf(self):
"""Compute the distance dataframe from distance matrix
"""
# HIKOB
if ('HK' in self.typ) or ('FULL' in self.typ):
devmap = {self.devmapper(k,'hkb')[0]:self.devmapper(k,'hkb')[2] for k in self.dHKB}
udev = np.array([[self.dist_nodesmap.index(devmap[k.split('-')[0]]),self.dist_nodesmap.index(devmap[k.split('-')[1]])] for k in self.hkb.keys()])
iudev =np.array([(self.dist_nodesmap[u[0]]+'-'+self.dist_nodesmap[u[1]]) for u in udev])
df = pd.DataFrame(self.dist[:,udev[:,0],udev[:,1]],columns=iudev,index=self.tmocap)
# BE Spoon
if ('BS' in self.typ) or ('FULL' in self.typ):
devmap = {self.devmapper(k,'BS')[0]:self.devmapper(k,'BS')[2] for k in self.dBS}
udev = np.array([[self.dist_nodesmap.index(devmap[k.split('-')[0]]),self.dist_nodesmap.index(devmap[k.split('-')[1]])] for k in self.bespo.keys()])
iudev =np.array([(self.dist_nodesmap[u[0]]+'-'+self.dist_nodesmap[u[1]]) for u in udev])
dfb = pd.DataFrame(self.dist[:,udev[:,0],udev[:,1]],columns=iudev,index=self.tmocap)
df = df.join(dfb)
del dfb
if ('TCR' in self.typ) or ('FULL' in self.typ):
devmap = {self.devmapper(k,'tcr')[0]:self.devmapper(k,'tcr')[2] for k in self.dTCR}
udev = np.array([[self.dist_nodesmap.index(devmap[k.split('-')[0]]),
self.dist_nodesmap.index(devmap[k.split('-')[1]])]
for k in self.tcr.keys() ])
# for k in self.tcr.keys() if not 'COORD' in k])
iudev =np.array([(self.dist_nodesmap[u[0]]+'-'+self.dist_nodesmap[u[1]]) for u in udev])
dft = pd.DataFrame(self.dist[:,udev[:,0],udev[:,1]],columns=iudev,index=self.tmocap)
if ('FULL' in self.typ):
df = df.join(dft)
else :
df = dft
del dft
self.distdf=df
# def accessdm(self,a,b,techno=''):
# """ access to the distance matrix
# give name|id of node a and b and a given techno. retrun Groung truth
# distance between the 2 nodes
# # """
# # a,ia,bia,subja=self.devmapper(a,techno)
# # b,ib,bib,subjb=self.devmapper(b,techno)
# if 'HKB' in techno :
# if isinstance(a,str):
# ia = self.dHKB[a]
# else:
# ia = a
# a = self.idHKB[a]
# if isinstance(b,str):
# ib = self.dHKB[b]
# else:
# ib = b
# b = self.idHKB[b]
# elif 'TCR' in techno :
# if isinstance(a,str):
# ia = self.dTCR[a]
# else:
# ia = a
# a = self.idTCR[a]
# if isinstance(b,str):
# ib = self.dTCR[b]
# else:
# ib = b
# b = self.idTCR[b]
# else :
# raise AttributeError('please give only 1 techno or radio node')
# ka = techno+':'+str(ia)
# kb = techno+':'+str(ib)
# ua = self.dist_nodesmap.index(ka)
# ub = self.dist_nodesmap.index(kb)
# return(ua,ub)
# c3ds = self.B._f.shape
# if 'Full' in self.typ:
# pdev= np.empty((c3ds[0],len(self.dHKB)+len(self.tcr)+len(bs),3))
# elif 'HK' in self.typ:
# pdev= np.empty((c3ds[0],len(self.dHKB)+len(bs),3))
# elif 'TCR' in self.typ:
# pdev= np.empty((c3ds[0],len(self.tcr),3))
# else:
# raise AttributeError('invalid self.typ')
# self.B.network()
# DB = self.B.D2
# ludev = np.array([[i,self.B.dev[i]['uc3d'][0]] for i in self.B.dev])
# for i in ludev:
# pdev[:,eval(i[0])-1,:] = self.B._f[:,i[1],:]
# # self.dist = np.sqrt(np.sum((mpts[:,np.newaxis,:]-mpts[np.newaxis,:])**2,axis=2))
def vlc(self):
""" play video of the associated serie
"""
videofile = os.path.join(self.rootdir,'POST-TREATED', str(self.day)+'-06-2014','Videos')
ldir = os.listdir(videofile)
luldir = map(lambda x : self._filename in x,ldir)
try:
uldir = luldir.index(True)
_filename = ldir[uldir]
filename = os.path.join(videofile,_filename)
os.system('vlc '+filename +'&' )
except:
raise AttributeError('file '+ self._filename + ' not found')
def snapshot(self,t0=0,offset=15.5,title=True,save=False,fig=[],ax=[],figsize=(10,10)):
""" single snapshot plot
Parameters
----------
t0: float
offset : float
title : boolean
save : boolean
fig
ax
figsize : tuple
"""
if fig ==[]:
fig=plt.figure(figsize=figsize)
if ax == []:
ax = fig.add_subplot(111)
if 'video_sec' in self.offset[self._filename]:
offset = self.offset[self._filename]['video_sec']
elif offset != '':
offset = offset
else:
offset=0
videofile = os.path.join(self.rootdir,'POST-TREATED',str(self.day)+'-06-2014','Videos')
ldir = os.listdir(videofile)
luldir = map(lambda x : self._filename in x,ldir)
uldir = luldir.index(True)
_filename = ldir[uldir]
filename = os.path.join(videofile,_filename)
vc = VideoFileClip(filename)
F0 = vc.get_frame(t0+offset)
I0 = img_as_ubyte(F0)
ax.imshow(F0)
if title:
ax.set_title('t = '+str(t0)+'s')
if save :
plt.savefig(self._filename +'_'+str(t0) + '_snap.png',format='png')
return fig,ax
def snapshots(self,t0=0,t1=10,offset=15.5):
""" take snapshots
Parameters
----------
t0 : float
t1 : float
"""
if 'video_sec' in self.offset[self._filename]:
offset = self.offset[self._filename]['video_sec']
elif offset != '':
offset = offset
else:
offset=0
videofile = os.path.join(self.rootdir,'POST-TREATED',str(self.day)+'-06-2014','Videos')
ldir = os.listdir(videofile)
luldir = [ self._filename in x for x in ldir ]
uldir = luldir.index(True)
_filename = ldir[uldir]
filename = os.path.join(videofile,_filename)
vc = VideoFileClip(filename)
F0 = vc.get_frame(t0+offset)
F1 = vc.get_frame(t1+offset)
I0 = img_as_ubyte(F0)
I1 = img_as_ubyte(F1)
plt.subplot(121)
plt.imshow(F0)
plt.title('t = '+str(t0)+'s')
plt.subplot(122)
plt.imshow(F1)
plt.title('t = '+str(t1)+'s')
def _show3(self,**kwargs):
""" mayavi 3d show of scenario
Parameters
----------
L : boolean
display layout (True)
body :boolean
display bodytime(True)
bodyname : boolean
display body name
bodytime: list
list of time instant where body topos has to be shown
devsize : float
device on body size (100)
devlist : list
list of device name to show on body
pattern : boolean
display devices pattern
trajectory : boolean
display trajectory (True)
tagtraj : boolean
tag on trajectory at the 'bodytime' instants (True)
tagname : list
name of the tagtrajs
tagpoffset : ndarray
offset of the tag positions (nb_of_tags x 3)
fontsizetag : float
size of the tag names
inodes : boolean
display infrastructure nodes
inname : boolean
display infra strucutre node name
innamesize : float,
size of name of infrastructure nodes (0.1)
incolor: str
color of infrastructure nodes ('r')
insize
size of infrastructure nodes (0.1)
camera : boolean
display Vicon camera position (True)
cameracolor : str
color of camera nodes ('b')
camerasize : float
size of camera nodes (0.1)
Examples
--------
>>> S = Corser(6)
>>> S._show3()
"""
defaults = { 'L':True,
'body':True,
'bodyname':True,
'subject':[],
'interf':True,
'trajectory' :False,
'trajectory_list' :[],
'devsize':100,
'devlist':[],
'pattern':False,
'inodes' : True,
'inname' : True,
'innamesize' : 0.1,
'incolor' : 'r',
'insize' : 0.1,
'camera':True,
'cameracolor' :'k',
'camerasize' :0.1,
'bodytime':[],
'tagtraj':True,
'tagname':[],
'tagpoffset':[],
'fontsizetag':0.1,
'trajectory_color_range':True,
'trajectory_linewidth':0.01
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
cold = pyu.coldict()
camhex = cold[kwargs['cameracolor']]
cam_color = tuple(pyu.rgb(camhex)/255.)
inhex = cold[kwargs['incolor']]
in_color = tuple(pyu.rgb(inhex)/255.)
if kwargs['subject'] == []:
subject = self.subject
else:
subject = kwargs['subject']
if kwargs['L']:
self.L._show3(opacity=0.5)
v = self.din.items()
if kwargs['inodes']:
X= np.array([v[i][1]['p'] for i in range(len(v))])
mlab.points3d(X[:,0],X[:,1], X[:,2],scale_factor=kwargs['insize'],color=in_color)
if kwargs['pattern']:
for i in range(len(v)):
if not hasattr(self.din[v[i][0]]['ant'],'SqG'):
self.din[v[i][0]]['ant'].eval()
self.din[v[i][0]]['ant']._show3(po=v[i][1]['p'],
T=self.din[v[i][0]]['T'],
ilog=False,
minr=0.01,
maxr=0.2,
newfig=False,
title=False,
colorbar=False,
)
if kwargs['inname']:
[mlab.text3d(v[i][1]['p'][0],
v[i][1]['p'][1],
v[i][1]['p'][2]+v[i][1]['s3off'],
v[i][0],
scale=kwargs['innamesize'],color=in_color) for i in range(len(v))]
if kwargs['body']:
if kwargs['bodytime']==[]:
time =np.linspace(0,self.B[subject[0]].time[-1],5).astype(int)
# time=range(10,100,20)
else :
time=kwargs['bodytime']
for ki, i in enumerate(time):
for ib,b in enumerate(subject):
self.B[b].settopos(t=i,cs=True)
self.B[b]._show3(dev=True,
name = kwargs['bodyname'],
devlist=kwargs['devlist'],
devsize=kwargs['devsize'],
tube_sides=12,
pattern=kwargs['pattern'])
if kwargs['tagtraj']:
X=self.B[b].traj[['x','y','z']].values[self.B[b].toposFrameId]
if kwargs['tagpoffset']==[]:
X[2]=X[2]+0.2
else :
X=X+kwargs['tagpoffset'][ki]
if kwargs['tagname']==[]:
name = 't='+str(i)+'s'
else :
name = str(kwargs['tagname'][ki])
mlab.text3d(X[0],X[1],X[2],name,scale=kwargs['fontsizetag'])
if kwargs['interf']:
for ib,b in enumerate(self.interf):
self.B[b].settopos(t=i,cs=True)
self.B[b]._show3(name=kwargs['bodyname'],tube_sides=12)
if kwargs['trajectory']:
if kwargs['trajectory_list']==[]:
tr_subject = subject
else:
tr_subject = kwargs['trajectory_list']
for b in tr_subject:
self.B[b].traj._show3(color_range=kwargs['trajectory_color_range'],
linewidth=kwargs['trajectory_linewidth'])
if kwargs['camera'] :
mlab.points3d(self.cam[:,0],self.cam[:,1], self.cam[:,2],scale_factor=kwargs['camerasize'],color=cam_color)
mlab.view(-111.44127634143871,
60.40674368088245,
24.492297713984197,
array([-0.07235499, 0.04868631, -0.00314969]))
mlab.view(-128.66519195313163,
50.708933839573511,
24.492297713984247,
np.array([-0.07235499, 0.04868631, -0.00314969]))
def anim(self):
self._show3(body=False,inname=False,trajectory=False)
[self.B[b].anim() for b in self.B]
mlab.view(-43.413544538477254,
74.048193730704611,
11.425837641867618,
array([ 0.48298163, 0.67806043, 0.0987967 ]))
def imshow(self,time=100,kind='time'):
""" DEPRECATED
Parameters
----------
kind : string
'mean','std'
"""
fig = plt.figure(figsize=(10,10))
self.D = self.rssi-self.rssi.swapaxes(0,1)
try:
timeindex = np.where(self.thkb[0]-time>0)[0][0]
except:
timeindex = np.where(self.thkb-time>0)[0][0]
if kind=='time':
dt1 = self.rssi[:,:,timeindex]
dt2 = self.D[:,:,timeindex]
if kind == 'mean':
dt1 = ma.masked_invalid(self.rssi).mean(axis=2)
dt2 = ma.masked_invalid(self.D).mean(axis=2)
if kind == 'std':
dt1 = ma.masked_invalid(self.rssi).std(axis=2)
dt2 = ma.masked_invalid(self.D).std(axis=2)
ax1 = fig.add_subplot(121)
#img1 = ax1.imshow(self.rssi[:,:,timeindex],interpolation='nearest',origin='lower')
img1 = ax1.imshow(dt1,interpolation='nearest')
labels = [ self.idHKB[x] for x in range(1,17)]
plt.xticks(range(16),labels,rotation=80,fontsize=14)
plt.yticks(range(16),labels,fontsize=14)
if kind=='time':
plt.title('t = '+str(time)+ ' s')
if kind=='mean':
plt.title(u'$mean(\mathbf{L})$')
if kind=='std':
plt.title(u'$std(\mathbf{L})$')
divider = make_axes_locatable(ax1)
cax1 = divider.append_axes("right", size="5%", pad=0.05)
clb1 = fig.colorbar(img1,cax1)
clb1.set_label('level dBm',fontsize=14)
ax2 = fig.add_subplot(122)
#img2 = ax2.imshow(self.D[:,:,timeindex],interpolation='nearest',origin='lower')
img2 = ax2.imshow(dt2,interpolation='nearest')
plt.title(u'$\mathbf{L}-\mathbf{L}^T$')
divider = make_axes_locatable(ax2)
plt.xticks(range(16),labels,rotation=80,fontsize=14)
plt.yticks(range(16),labels,fontsize=14)
cax2 = divider.append_axes("right", size="5%", pad=0.05)
clb2 = fig.colorbar(img2,cax2)
clb2.set_label('level dBm',fontsize=14)
plt.tight_layout()
plt.show()
#for k in range(1,17):
# for l in range(1,17):
# self.dHKB[(k,l)]=iHKB[k]+' - '+iHKB[l]
# cpt = cpt + 1
return fig,(ax1,ax2)
def lk2nd(self,lk):
""" transcode a lk from Id to real name
Parameters
----------
lk : string
Examples
--------
>>> C=Corser(6)
>>> lk = 'HKB:15-HKB:7'
>>> C.lk2nd(lk)
"""
u = lk.replace('HKB:','').split('-')
v = [ self.idHKB[int(x)] for x in u ]
return(v)
def _load_offset_dict(self):
""" load offset_dictionnary.bin
Returns
-------
d : dict
{'Sc20_S5_R1_HKBS': {'hkb_index': -148, 'video_sec': 32.622087273809527},
'Sc20_S6_R2_HKBS': {'bs_index': -124, 'hkb_index': -157},
'Sc21a_S13_R1_HKBS': {'hkb_index': 537},
'Sc21a_S14_R2_HKBS': {'hkb_index': 752},
'Sc21a_S15_R3_HKBS': {'hkb_index': 438},
'Sc21a_S16_R4_HKBS': {'hkb_index': 224},
'Sc21b_S21_R1_HKBS': {'hkb_index': 368},
'Sc21b_S22_R2_HKBS': {'hkb_index': -333},
'Sc21b_S23_R3_HKBS': {'hkb_index': 136},
'Sc22a_S9_R1_Full': {'hkb_index': 678}}
Notes
-----
This is used for synchronization purpose
"""
path = os.path.join(os.environ['CORMORAN'],'POST-TREATED')
d = pickle.load( open( os.path.join(path,'offset_dictionnary.bin'), "rb" ) )
return d
def _save_offset_dict(self,d):
path = os.path.join(os.environ['CORMORAN'],'POST-TREATED')
d = pickle.dump( d, open( os.path.join(path,'offset_dictionnary.bin'), "wb" ) )
def _save_data_off_dict(self,filename,typ,value):
""" save
- a given "value" of an for,
- a serie/run "filename",
- of a given typ (video|hkb|tcr|...)
"""
d = self._load_offset_dict()
try:
d[filename].update({typ:value})
except:
d[filename]={}
d[filename][typ]=value
self._save_offset_dict(d)
def offset_setter_video(self,a='AP1',b='WristRight',**kwargs):
""" video offset setter
"""
defaults = { 'inverse':True
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
fig, axs = plt.subplots(nrows=2,ncols=1)
fig.subplots_adjust(bottom=0.3)
if isinstance(a,str):
ia = self.dHKB[a]
else:
ia = a
a = self.idHKB[a]
if isinstance(b,str):
ib = self.dHKB[b]
else:
ib = bq
b = self.idHKB[b]
time = self.thkb
if len(time) == 1:
time=time[0]
sab = self.hkb[a+'-'+b].values
sabt = self.hkb[a+'-'+b].index
hkb = axs[1].plot(sabt,sab,label = a+'-'+b)
axs[1].legend()
try :
init = self.offset[self._filename]['video_sec']
except:
init=time[0]
videofile = os.path.join(self.rootdir,'POST-TREATED',str(self.day)+'-06-2014','Videos')
ldir = os.listdir(videofile)
luldir = [ self._filename in x for x in ldir ]
uldir = luldir.index(True)
_filename = ldir[uldir]
filename = os.path.join(videofile,_filename)
vc = VideoFileClip(filename)
F0 = vc.get_frame(init)
I0 = img_as_ubyte(F0)
axs[0].imshow(F0)
########
# slider
########
slide_xoffset_ax = plt.axes([0.1, 0.15, 0.8, 0.05])
sliderx = Slider(slide_xoffset_ax, "video offset", 0, self.hkb.index[-1],
valinit=init, color='#AAAAAA')
# vertc = [(0,-10),(0,-10),(0,10),(0,-10)]
# poly = plt.Polygon(vertc)
# pp = axs[1].add_patch(poly)
def update_x(val):
F0 = vc.get_frame(val)
I0 = img_as_ubyte(F0)
axs[0].imshow(F0)
fig.canvas.draw_idle()
sliderx.on_changed(update_x)
# def cursor(val):
# try :
# pp.remove()
# except:
# pass
# vertc = [(sabt[0]+val,min(sab)-10),(sabt[0]+val,min(sab)-10),(sabt[0]+val,max(sab)+10),(sabt[0]+val,max(sab)-10)]
# poly = plt.Polygon(vertc)
# pp = axs[1].add_patch(poly)
# sliderx.on_changed(cursor)
def plus(event):
sliderx.set_val(sliderx.val +0.2)
fig.canvas.draw_idle()
sliderx.on_changed(update_x)
def minus(event):
sliderx.set_val(sliderx.val -0.2)
fig.canvas.draw_idle()
sliderx.on_changed(update_x)
def setter(event):
self._save_data_off_dict(self._filename,'video_sec',sliderx.val)
self.offset= self._load_offset_dict()
axp = plt.axes([0.3, 0.05, 0.1, 0.075])
axset = plt.axes([0.5, 0.05, 0.1, 0.075])
axm = plt.axes([0.7, 0.05, 0.1, 0.075])
bp = Button(axp, '<-')
bp.on_clicked(minus)
bset = Button(axset, 'SET offs.')
bset.on_clicked(setter)
bm = Button(axm, '->')
bm.on_clicked(plus)
plt.show()
def offset_setter(self,a='HKB:1',b='HKB:12',techno='',**kwargs):
""" offset setter
"""
defaults = { 'inverse':True
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
if plt.isinteractive():
interactive = True
plt.ioff()
else :
interactive = False
fig, ax = plt.subplots()
fig.subplots_adjust(bottom=0.2, left=0.3)
a,ia,bia,subja,techno=self.devmapper(a,techno)
b,ib,bib,subjb,techno=self.devmapper(b,techno)
time = self.tmocap
if len(time.shape) == 2:
time = time[0,:]
try :
init = time[0]#self.offset[self._filename]['hkb_index']
except:
init=time[0]
var = self.getlinkd(ia,ib,techno).values
if kwargs['inverse']:
var = 10*np.log10(1./(var)**2)
gt = ax.plot(time,var)
ab = self.getlink(ia,ib,techno)
sab = ab.values
sabt = ab.index.values
technoval = ax.plot(sabt,sab)
########
# slider
########
slide_xoffset_ax = plt.axes([0.1, 0.15, 0.8, 0.02])
sliderx = Slider(slide_xoffset_ax, techno + " offset", -(len(sabt)/16), (len(sabt)/16),
valinit=init, color='#AAAAAA')
slide_yoffset_ax = plt.axes([0.1, 0.10, 0.8, 0.02])
slidery = Slider(slide_yoffset_ax, "gt_yoff", -100, 0,
valinit=0, color='#AAAAAA')
slide_alpha_ax = plt.axes([0.1, 0.05, 0.8, 0.02])
slideralpha = Slider(slide_alpha_ax, "gt_alpha", 0, 60,
valinit=30, color='#AAAAAA')
def update_x(val):
value = int(sliderx.val)
rtechnoval = np.roll(sab,value)
sliderx.valtext.set_text('{}'.format(value))
technoval[0].set_xdata(sabt)
technoval[0].set_ydata(rtechnoval)
fig.canvas.draw_idle()
sliderx.on_changed(update_x)
sliderx.drawon = False
def update_y(val):
yoff = slidery.val
alpha = slideralpha.val
gt[0].set_ydata(alpha*var + yoff)
fig.canvas.draw_idle()
#initpurpose
update_y(5)
slidery.on_changed(update_y)
slideralpha.on_changed(update_y)
def setter(event):
value = int(sliderx.val)
try :
nval = self.offset[self._filename][techno.lower()+'_index'] + value
except :
nval = value
self._save_data_off_dict(self._filename,techno.lower()+'_index',nval)
self.offset= self._load_offset_dict()
ax.set_title('WARNING : Please Reload serie to Valide offset change',color='r',weight='bold')
axset = plt.axes([0.0, 0.5, 0.2, 0.05])
bset = Button(axset, 'SET ' +techno+' offs.')
bset.on_clicked(setter)
plt.show()
if interactive :
plt.ion()
# def offset_setter_hkb(self,a='AP1',b='WristRight',**kwargs):
# """ offset setter
# """
# defaults = { 'inverse':True
# }
# for k in defaults:
# if k not in kwargs:
# kwargs[k] = defaults[k]
# if plt.isinteractive():
# interactive = True
# plt.ioff()
# else :
# interactive = False
# fig, ax = plt.subplots()
# fig.subplots_adjust(bottom=0.2, left=0.3)
# a,ia,bia,subja,techno=self.devmapper(a,'HKB')
# b,ib,bib,subjb,techno=self.devmapper(b,'HKB')
# time = self.thkb
# if len(time.shape) == 2:
# time = time[0,:]
# try :
# init = time[0]#self.offset[self._filename]['hkb_index']
# except:
# init=time[0]
# var = self.getlinkd(ia,ib,'HKB').values
# if kwargs['inverse']:
# var = 10*np.log10(1./(var)**2)
# gt = ax.plot(self.B[self.B.keys()[0]].time,var)
# sab = self.hkb[a+'-'+b].values
# sabt = self.hkb[a+'-'+b].index
# hkb = ax.plot(sabt,sab)
# ########
# # slider
# ########
# slide_xoffset_ax = plt.axes([0.1, 0.15, 0.8, 0.02])
# sliderx = Slider(slide_xoffset_ax, "hkb offset", -(len(sabt)/16), (len(sabt)/16),
# valinit=init, color='#AAAAAA')
# slide_yoffset_ax = plt.axes([0.1, 0.10, 0.8, 0.02])
# slidery = Slider(slide_yoffset_ax, "gt_yoff", -100, 0,
# valinit=0, color='#AAAAAA')
# slide_alpha_ax = plt.axes([0.1, 0.05, 0.8, 0.02])
# slideralpha = Slider(slide_alpha_ax, "gt_alpha", 0, 10,
# valinit=5, color='#AAAAAA')
# def update_x(val):
# value = int(sliderx.val)
# rhkb = np.roll(sab,value)
# sliderx.valtext.set_text('{}'.format(value))
# hkb[0].set_xdata(sabt)
# hkb[0].set_ydata(rhkb)
# fig.canvas.draw_idle()
# sliderx.on_changed(update_x)
# sliderx.drawon = False
# def update_y(val):
# yoff = slidery.val
# alpha = slideralpha.val
# gt[0].set_ydata(alpha*var + yoff)
# fig.canvas.draw_idle()
# #initpurpose
# update_y(5)
# slidery.on_changed(update_y)
# slideralpha.on_changed(update_y)
# def setter(event):
# value = int(sliderx.val)
# try :
# nval = self.offset[self._filename]['hkb_index'] + value
# except :
# nval = value
# self._save_data_off_dict(self._filename,'hkb_index',nval)
# self.offset= self._load_offset_dict()
# ax.set_title('WARNING : Please Reload serie to Valide offset change',color='r',weight='bold')
# axset = plt.axes([0.0, 0.5, 0.2, 0.05])
# bset = Button(axset, 'SET offs.')
# bset.on_clicked(setter)
# plt.show()
# if interactive:
# plt.ion()
def mtlbsave(self):
""" Matlab format save
S{day}_{serie}
node_name
node_place
node_coord
HKB.{linkname}.tr
HKB.{linkname}.rssi
HKB.{linkname}.td
HKB.{linkname}.dist
HKB.{linkname}.sh
HKB.{linkname}.dsh
TCR.{linkname}.tr
HKB.{linkname}.range
HKB.{linkname}.td
HKB.{linkname}.dist
HKB.{linkname}.sh
"""
key = 'S'+str(self.day)+'_'+str(self.serie)
filemat = key+'.mat'
d = {}
d[key]={}
d[key]['node_name'] = self.dist_nodesmap
d[key]['node_place'] = [ self.devmapper(x)[0] for x in self.dist_nodesmap ]
d[key]['node_coord'] = self.points
for subject in self.interf:
sub = subject.replace(':','')
d[key][sub]=np.mean(self.B[subject].d,axis=1)
if ('HKB' in self.typ.upper()) or ('FULL' in self.typ.upper()):
d[key]['HKB']={}
links = list(self.hkb.columns)
inter,lks = self.compute_visibility(techno='HKB')
for l in links:
ls = l.split('-')
nl = ls[0]+'_'+ls[1]
nl=nl.replace('Jihad','J').replace('Nicolas','N').replace('Eric','E')
d[key]['HKB'][nl] = {}
ix0 = np.where(lks==ls[0])[0]
ix1 = np.where(lks==ls[1])[0]
Ssh = inter[ix0,ix1,:]
Srssi= self.getlink(ls[0],ls[1],techno='HKB')
# get distances between nodes
Sdist = self.getlinkd(ls[0],ls[1],techno='HKB')
dsh = dist_sh2rssi(Sdist,Ssh,15)
# rssi
d[key]['HKB'][nl]['rssi'] = Srssi.values
# dsh
d[key]['HKB'][nl]['dsh'] = dsh
#d['S6'][nl]['rssi_dec'] = np.roll(Srssi.values,-dec)
d[key]['HKB'][nl]['sh'] = Ssh
# time rssi
#d[key]['HKB'][nl]['trh'] = np.array(Srssi.index)
d[key]['trh'] = np.array(Srssi.index)
# distance
d[key]['HKB'][nl]['dist'] = Sdist.values
# time mocap
#d[key]['HKB'][nl]['td'] = np.array(Sdist.index)
d[key]['tm'] = np.array(Sdist.index)
if ('TCR' in self.typ.upper()) or ('FULL' in self.typ.upper()):
d[key]['TCR']={}
links = list(self.tcr.columns)
inter,lks = self.compute_visibility(techno='TCR')
for l in links:
ls = l.split('-')
# to shorten matlab keys surname are replaced by first letter
nl = ls[0]+'_'+ls[1]
nl=nl.replace('Jihad','J').replace('Nicolas','N').replace('Eric','E')
d[key]['TCR'][nl] = {}
ix0 = np.where(lks==ls[0])[0]
ix1 = np.where(lks==ls[1])[0]
# intersection on the link
Ssh = inter[ix0,ix1,:]
Srange= self.getlink(ls[0],ls[1],techno='TCR')
# get distances between nodes
Sdist = self.getlinkd(ls[0],ls[1],techno='TCR')
# rssi
d[key]['TCR'][nl]['range'] = Srange.values
# dsh
#d['S6'][nl]['rssi_dec'] = np.roll(Srssi.values,-dec)
d[key]['TCR'][nl]['sh'] = Ssh
# time rssi
#d[key]['TCR'][nl]['tr'] = np.array(Srange.index)
d[key]['trt'] = np.array(Srange.index)
# distance
d[key]['TCR'][nl]['dist'] = Sdist.values
# time mocap
#d[key]['TCR'][nl]['td'] = np.array(Sdist.index)
d[key]['tm'] = np.array(Sdist.index)
self.matlab = d
io.savemat(filemat,d)
def pltvisi(self,a,b,techno='',**kwargs):
""" plot visibility between link a and b
Attributes
----------
color:
fill color
hatch:
hatch type
label_pos: ('top'|'bottom'|'')
postion of the label
label_pos_off: float
offset of postion of the label
label_mob: str
prefix of label in mobility
label_stat: str
prefix of label static
Examples
--------
>>> from pylayers.measures.cormoran import *
>>> S = CorSer(6)
>>> f,ax = S.plthkb('AP1','TorsoTopLeft',techno='HKB')
>>> f,ax = S.pltvisi('AP1','TorsoTopLeft',techno='HKB',fig=f,ax=ax)
>>> f,ax = S.pltmob(fig=f,ax=ax)
>>> plt.title('hatch = visibility / gray= mobility')
>>> plt.show()
"""
defaults = { 'fig':[],
'figsize':(10,10),
'ax':[],
'color':'',
'hatch':'//',
'label_pos':'',
'label_pos_off':5,
'label_vis':'V',
'label_hide':'H'
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
if kwargs['fig']==[]:
fig = plt.figure(figsize=kwargs['figsize'])
else :
fig=kwargs['fig']
if kwargs['ax'] ==[]:
ax = fig.add_subplot(111)
else :
ax = kwargs['ax']
aa= ax.axis()
a,ia,nna,subjecta,technoa = self.devmapper(a,techno)
b,ib,nnb,subjectb,technob = self.devmapper(b,techno)
vv,tv,tseg,itseg = self._visiarray(nna,nnb)
# vv.any : it exist NLOS regions
if vv.any():
if kwargs['color']=='':
fig,ax=plu.rectplot(tv,tseg,ylim=aa[2:],
fill=False,
hatch=kwargs['hatch'],
fig=fig,ax=ax)
else :
fig,ax=plu.rectplot(tv,tseg,ylim=aa[2:],
color=kwargs['color'],
hatch=kwargs['hatch'],
fig=fig,ax=ax)
if kwargs['label_pos']!='':
if kwargs['label_pos'] == 'top':
yposV = aa[3]-kwargs['label_pos_off']+0.5
yposH = aa[3]-kwargs['label_pos_off']-0.5
elif kwargs['label_pos'] == 'bottom':
yposV = aa[2]+kwargs['label_pos_off']+0.5
yposH = aa[2]+kwargs['label_pos_off']+0.5
xposV= tv[tseg.mean(axis=1).astype(int)]
xposH= tv[itseg.mean(axis=1).astype(int)]
[ax.text(x,yposV,kwargs['label_vis']+str(ix+1)) for ix,x in enumerate(xposV)]
[ax.text(x,yposH,kwargs['label_hide']+str(ix+1)) for ix,x in enumerate(xposH)]
return fig,ax
def pltmob(self,**kwargs):
""" plot mobility
Parameters
----------
subject: str
subject to display () if '', take the fist one from self.subject)
showvel : boolean
display filtered velocity
velth: float (0.7)
velocity threshold
fo : int (5)
filter order
fw: float (0.02)
0 < fw < 1 (fN <=> 1)
time_offset : int
add time_offset to start later
Examples
--------
>>> from pylayers.measures.cormoran import *
>>> S = CorSer(6)
>>> f,ax = S.plthkb('AP1','TorsoTopLeft',techno='HKB')
>>> #f,ax = S.pltvisi('AP1','TorsoTopLeft',techno='HKB',fig=f,ax=ax)
>>> f,ax = S.pltmob(fig=f,ax=ax)
>>> plt.title('hatch = visibility / gray= mobility')
>>> plt.show()
"""
defaults = { 'subject':'',
'fig':[],
'figsize':(10,10),
'ax':[],
'showvel':False,
'velth':0.07,
'fo':5,
'fw':0.02,
'ylim':(),
'time_offset':0,
'color':'gray',
'hatch':'',
'label_pos':'top',
'label_pos_off':2,
'label_mob':'M',
'label_stat':'S'
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
if kwargs['fig']==[]:
fig = plt.figure(figsize=kwargs['figsize'])
else :
fig=kwargs['fig']
if kwargs['ax'] ==[]:
ax = fig.add_subplot(111)
else :
ax = kwargs['ax']
if kwargs['subject']=='':
subject=self.B.keys()[0]
else:
subject=kwargs['subject']
V=self.B[subject].traj[['vx','vy']].values
Vi=np.sqrt((V[:,0]**2+V[:,1]**2))
f=DF()
f.butter(kwargs['fo'],kwargs['fw'],'lowpass')
Vif=f.filter(Vi)
if kwargs['time_offset']>=0:
zmo = np.zeros(kwargs['time_offset'])
tmp = np.insert(Vif,zmo,0)
Vif = tmp[:len(Vif)]
else:
zmo = np.zeros(-kwargs['time_offset'])
tmp = np.concatenate((Vif,zmo))
Vif = tmp[-kwargs['time_offset']:len(Vif)-kwargs['time_offset']]
if kwargs['showvel']:
fig2 = plt.figure()
ax2=fig2.add_subplot(111)
ax2.plot(self.B[subject].time[:-2],Vif)
ax2.plot(Vif)
cursor2 = Cursor(ax2, useblit=True, color='gray', linewidth=1)
null = np.where(Vif<kwargs['velth'])[0]
unu1 = np.where(np.diff(null)!=1)[0]
unu2 = np.where(np.diff(null[::-1])!=-1)[0]
unu2 = len(null)-unu2
unu = np.concatenate((unu1,unu2))
unu = np.sort(unu)
sunu = unu.shape
if sunu[0]%2:
unu=np.insert(unu,-1,len(null)-1)
sunu = unu.shape
nullr=null[unu].reshape(sunu[0]/2,2)
if kwargs['ylim'] != ():
ylim = kwargs['ylim']
else :
axlim = ax.axis()
ylim = [axlim[2],axlim[3]]
fig , ax =plu.rectplot(self.B[subject].time,nullr,ylim=ylim,
color=kwargs['color'],
hatch=kwargs['hatch'],
fig=fig,ax=ax)
inullr = copy.copy(nullr)
bb = np.insert(inullr[:,1],0,0)
ee = np.hstack((inullr[:,0],null[-1]))
inullr = np.array((bb,ee)).T
# remove last
inullr = inullr[:-1,:]
if kwargs['label_pos']!='':
if kwargs['label_pos'] == 'top':
yposM = ylim[1]-kwargs['label_pos_off']+0.5
yposS = ylim[1]-kwargs['label_pos_off']-0.5
elif kwargs['label_pos'] == 'bottom':
yposM = ylim[0]+kwargs['label_pos_off']+0.5
yposS = ylim[0]+kwargs['label_pos_off']+0.5
xposM= self.B[subject].time[nullr.mean(axis=1).astype(int)]
xposS= self.B[subject].time[inullr.mean(axis=1).astype(int)]
[ax.text(x,yposM,kwargs['label_mob']+str(ix+1),
horizontalalignment='center',
verticalalignment='center')
for ix,x in enumerate(xposM)]
[ax.text(x,yposS,kwargs['label_stat']+str(ix+1),
horizontalalignment='center',
verticalalignment='center')
for ix,x in enumerate(xposS)]
return fig,ax
def animhkb(self,a,b,interval=10,save=False):
"""
Parameters
----------
a : node name |number
b : node name | number
save : bool
"""
import matplotlib.animation as animation
x = self.hkb.index
link = a+'-'+b
y = self.hkb[link].values
fig, ax = plt.subplots()
plt.xlim(0,x[-1])
line = [ax.plot(x, y, animated=True)[0]]
def animate(i):
line[0].set_ydata(y[:i])
line[0].set_xdata(x[:i])
return line
ani = animation.FuncAnimation(fig, animate, xrange(1, len(x)),
interval=interval, blit=True)
if save:
ani.save(link+'.mp4')
plt.title(link)
plt.xlabel('time (s)')
plt.ylabel('RSS (dBm)')
plt.show()
def animhkbAP(self,a,AP_list,interval=1,save=False,**kwargs):
"""
Parameters
----------
a : node name
AP_nb=[]
save : bool
Example
-------
>>> from pylayers.measures.cormoran import *
>>> S = CorSer(6)
>>> S.animhkbAP('TorsoTopLeft',['AP1','AP2','AP3','AP4'],interval=100,xstart=58,figsize=(20,2))
"""
import matplotlib.animation as animation
defaults = { 'fig':[],
'figsize':(10,10),
'ax':[],
'label':'',
'xstart':0
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
if kwargs['fig']==[]:
fig = plt.figure(figsize=kwargs['figsize'])
else :
fig=kwargs['fig']
if kwargs['ax'] ==[]:
ax = fig.add_subplot(111)
else :
ax = kwargs['ax']
ust = np.where(self.hkb.index>=kwargs['xstart'])[0][0]
x = self.hkb.index[ust:]
links = [l+'-'+a for l in AP_list]
ly = [self.hkb[l].values[ust:] for l in links]
color=['k','b','g','r']
plt.xlim(kwargs['xstart'],x[-1]+3)
line = [ax.plot(x, y, animated=True,
color=color[iy],
label=AP_list[iy]+'-'+kwargs['label'])[0] for iy,y in enumerate(ly)]
def animate(i):
for iy,y in enumerate(ly):
line[iy].set_ydata(y[:i])
line[iy].set_xdata(x[:i])
return line
plt.legend()
plt.xlabel('time (s)')
plt.ylabel('RSS (dBm)')
ani = animation.FuncAnimation(fig, animate, xrange(0, len(x)),
interval=interval, blit=True)
if save:
ani.save(a+'.mp4')
#plt.title(links)
plt.show()
def plot(self,a,b,techno='',t='',**kwargs):
""" ploting
Parameters
----------
a : str | int
name |id
b : str | int
name |id
techno : str (optional)
radio techno
t : float | list (optional)
given time
or [start,stop] time
color : color
distance : boolean (False)
plot distance instead of value
lin : boolean (False)
display linear value instead of dB
sqrtinv : boolean (False)
apply : "sqrt (1/ dataset)"
xoffset : float (0)
add an offset on x axis
yoffset : float (1|1e3|1e6)
add an offset on y axis
title : boolean (True)
display title
shortlabel : boolean (True)
enable short labelling
fontsize : int (18)
font size
returnlines : boolean
if True return the matplotlib ploted lines
Examples
--------
>>> from pylayers.measures.cormoran import *
>>> S = CorSer(6)
>>> f,ax = S.plot('AP1','TorsoTopLeft',techno='HKB')
>>> f,ax = S.pltvisi('AP1','TorsoTopLeft',techno='HKB',fig=f,ax=ax)
>>> #f,ax = S.pltmob(fig=f,ax=ax)
>>> #plt.title('hatch = visibility / gray= mobility')
>>> plt.show()
"""
defaults = { 'fig':[],
'ax':[],
'figsize':(6,4),
'color':'g',
'distance':False,
'lin':False,
'xoffset':0,
'yoffset': 1e6,
'sqrtinv':False,
'title':True,
'shortlabel':True,
'fontsize':18,
'returnlines':False
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
a,ia,bia,subja,techno=self.devmapper(a,techno)
b,ib,bib,subjb,techno=self.devmapper(b,techno)
###create a short labeling
if kwargs['shortlabel']:
#find uppercase position
uu = np.nonzero([l.isupper() or l.isdigit() for l in a])[0]
#cretae string from list
labela = ''.join([a[i] for i in uu])
uu = np.nonzero([l.isupper() or l.isdigit() for l in b])[0]
#cretae string from list
labelb = ''.join([b[i] for i in uu])
label = labela +'-'+labelb
else:
label = a+'-'+b
if kwargs['distance']:
label = 'dist ' + label
if kwargs['fig']==[]:
fig = plt.figure(figsize=kwargs['figsize'])
else :
fig=kwargs['fig']
if kwargs['ax'] ==[]:
ax = fig.add_subplot(111)
else :
ax = kwargs['ax']
# get dataframe
if not kwargs['distance']:
df = self.getlink(a,b,techno,t)
title = 'Received Power between ' + label
ylabel = 'Received Power dBm'
else :
df = self.getlinkd(a,b,techno,t)
title = 'Distance between ' + label
ylabel = 'distance (m)'
#post processing on dataframe
if kwargs['lin']:
df = 10**(df/10) * kwargs['yoffset']
if kwargs['sqrtinv']:
df = np.sqrt(1./df)
ylabel = u'$ (mW)^{-1/2} linear scale$'
lines = df.plot(ax=ax,color=kwargs['color'],label=label)
# Managing labelling
if kwargs['title']:
ax.set_title(label=title,fontsize=kwargs['fontsize'])
if kwargs['lin']:
if kwargs['yoffset']==1:
ylabel = 'mW'
if kwargs['yoffset']==1e3:
ylabel = u'$\micro$W'
if kwargs['yoffset']==1e6:
ylabel = u'nW'
ax.set_ylabel(ylabel)
# if kwargs['data']==True:
# #ax.plot(self.thkb[0],self.rssi[ia,ib,:])
# #ax.plot(self.thkb[0],self.rssi[ib,ia,:])
# sab = self.hkb[a+'-'+b]
# if not(kwargs['dB']):
# sab = 10**(sab/10) * kwargs['yoffset']
# if kwargs['distance']:
# sab = np.sqrt(1/sab)
# if kwargs['reciprocal']:
# sba = 10**(sba/10 ) * kwargs['yoffset']
# sba = np.sqrt(1/sba)
# sab[t0:t1].plot(ax=ax,color=kwargs['colorab'],label=label,xlim=(t0,t1))
# if kwargs['reciprocal']:
# sba[t0:t1].plot(ax=ax,color=kwargs['colorba'],label=label)
# #title = 'Received Power ' + self.title1
# if kwargs['dis_title']:
# #title = self.title1+kwargs['tit']
# title = kwargs['tit']
# ax.set_title(label=title,fontsize=kwargs['fontsize'])
# if not kwargs['distance']:
# if kwargs['dB']:
# ax.set_ylabel('Received Power dBm')
# else:
# if kwargs['yoffset']==1:
# ax.set_ylabel('mW')
# if kwargs['yoffset']==1e3:
# ax.set_ylabel(u'$\micro$W')
# if kwargs['yoffset']==1e6:
# ax.set_ylabel(u'nW')
# else:
# ax.set_ylabel(u'$\prop (mW)^{-1/2} linear scale$')
# if kwargs['reciprocal']==True:
# # if kwargs['data']==True:
# # ax2=fig.add_subplot(212)
# r = self.hkb[a+'-'+b][self.hkb[a+'-'+b]!=0]- self.hkb[b+'-'+a][self.hkb[b+'-'+a]!=0]
# r[t0:t1].plot(ax=ax2)
# ax2.set_title('Reciprocity offset',fontsize=kwargs['fontsize'])
if not kwargs['returnlines']:
return fig,ax
else:
return fig,ax,lines
def plthkb(self,a,b,techno='HKB',**kwargs):
""" plot Hikob devices
DEPRECATED
Parameters
----------
a : node name |number
b : node name | number
t0 : start time
t1 : stop time
Examples
--------
>>> from pylayers.measures.cormoran import *
>>> S = CorSer(6)
>>> f,ax = S.plthkb('AP1','TorsoTopLeft',techno='HKB')
>>> f,ax = S.pltvisi('AP1','TorsoTopLeft',techno='HKB',fig=f,ax=ax)
>>> f,ax = S.pltmob(fig=f,ax=ax)
>>> plt.title('hatch = visibility / gray= mobility')
>>> plt.show()
"""
defaults = { 't0':0,
't1':-1,
'fig':[],
'ax':[],
'figsize':(8,8),
'xoffset':0,
'yoffset': 1e6,
'reciprocal':False,
'dB':True,
'data':True,
'colorab':'g',
'colorba':'b',
'distance':False,
'fontsize':18,
'shortlabel':True,
'dis_title':True,
'xlim':(),
'tit':''
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
t0 =kwargs['t0']
t1 =kwargs['t1']
if t1 ==-1:
try:
t1=self.thkb[0][-1]
except:
t1=self.thkb[-1]
a,ia,bia,subja,technoa=self.devmapper(a,techno)
b,ib,bib,subjb,technob=self.devmapper(b,techno)
if kwargs['shortlabel']:
#find uppercase position
uu = np.nonzero([l.isupper() or l.isdigit() for l in a])[0]
#cretae string from list
labela = ''.join([a[i] for i in uu])
uu = np.nonzero([l.isupper() or l.isdigit() for l in b])[0]
#cretae string from list
labelb = ''.join([b[i] for i in uu])
label = labela +'-'+labelb
else:
label = a+'-'+b
if kwargs['fig']==[]:
fig = plt.figure(figsize=kwargs['figsize'])
else :
fig=kwargs['fig']
if kwargs['ax'] ==[]:
if kwargs['reciprocal']:
ax = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
else :
ax = fig.add_subplot(111)
else :
ax = kwargs['ax']
if kwargs['data']==True:
#ax.plot(self.thkb[0],self.rssi[ia,ib,:])
#ax.plot(self.thkb[0],self.rssi[ib,ia,:])
sab = self.hkb[a+'-'+b]
if not(kwargs['dB']):
sab = 10**(sab/10) * kwargs['yoffset']
if kwargs['distance']:
sab = np.sqrt(1/sab)
if kwargs['reciprocal']:
sba = 10**(sba/10 ) * kwargs['yoffset']
sba = np.sqrt(1/sba)
sab[t0:t1].plot(ax=ax,color=kwargs['colorab'],label=label,xlim=(t0,t1))
if kwargs['reciprocal']:
sba[t0:t1].plot(ax=ax,color=kwargs['colorba'],label=label)
#title = 'Received Power ' + self.title1
if kwargs['dis_title']:
#title = self.title1+kwargs['tit']
title = kwargs['tit']
ax.set_title(label=title,fontsize=kwargs['fontsize'])
if not kwargs['distance']:
if kwargs['dB']:
ax.set_ylabel('Received Power dBm')
else:
if kwargs['yoffset']==1:
ax.set_ylabel('mW')
if kwargs['yoffset']==1e3:
ax.set_ylabel(u'$\micro$W')
if kwargs['yoffset']==1e6:
ax.set_ylabel(u'nW')
else:
ax.set_ylabel(u'$\prop (mW)^{-1/2} linear scale$')
if kwargs['reciprocal']==True:
# if kwargs['data']==True:
# ax2=fig.add_subplot(212)
r = self.hkb[a+'-'+b][self.hkb[a+'-'+b]!=0]- self.hkb[b+'-'+a][self.hkb[b+'-'+a]!=0]
r[t0:t1].plot(ax=ax2)
ax2.set_title('Reciprocity offset',fontsize=kwargs['fontsize'])
return fig,ax
def plttcr(self,a,b,**kwargs):
""" plot TCR devices
Parameters
----------
a : node name |number
b : node name | number
t0 : start time
t1 : stop time
"""
defaults = { 't0':0,
't1':-1,
'fig':[],
'ax':[],
'figsize':(8,8),
'data':True,
'colorab':'g',
'colorba':'b',
'linestyle':'default',
'inverse':False
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
t0 =kwargs['t0']
t1 =kwargs['t1']
if t1 ==-1:
t1=self.ttcr[-1]
if isinstance(a,str):
ia = self.dTCR[a]
else:
ia = a
a = self.idTCR[a]
if isinstance(b,str):
ib = self.dTCR[b]
else:
ib = b
b = self.idTCR[b]
if kwargs['fig']==[]:
fig = plt.figure(figsize=kwargs['figsize'])
else:
fig = kwargs['fig']
if kwargs['ax'] ==[]:
ax = fig.add_subplot(111)
else :
ax=kwargs['ax']
if kwargs['data']==True:
#ax.plot(self.thkb[0],self.rssi[ia,ib,:])
#ax.plot(self.thkb[0],self.rssi[ib,ia,:])
if kwargs['inverse']:
sab = 1./(self.tcr[a+'-'+b])**2
sba = 1./(self.tcr[b+'-'+a])**2
else:
sab = self.tcr[a+'-'+b]
sba = self.tcr[b+'-'+a]
sab[t0:t1].plot(ax=ax,color=kwargs['colorab'],marker='o',linestyle=kwargs['linestyle'])
sba[t0:t1].plot(ax=ax,color=kwargs['colorba'],marker='o',linestyle=kwargs['linestyle'])
ax.set_title(a+'-'+b)
return fig,ax
def pltgt(self,a,b,**kwargs):
""" plt ground truth
Parameters
----------
t0
t1
fig
ax
figsize: tuple
linestyle'
inverse :False,
display 1/distance instead of distance
log : boolean
display log for distance intead of distance
gammma':1.,
mulitplication factor for log : gamma*log(distance)
this can be used to fit RSS
mode : string
'HKB' | 'TCR' | 'FULL'
visi : boolean,
display visibility
color: string color ('k'|'m'|'g'),
color to display the visibility area
hatch': strin hatch type ('//')
hatch type to hatch visibility area
fontsize: int
title fontsize
Example
-------
>>> from pylayers.measures.cormoran import *
>>> S=CorSer(6)
>>> S.pltgt('AP1','TorsoTopLeft')
"""
defaults = { 'subject':'',
't0':0,
't1':-1,
'fig':[],
'ax':[],
'figsize':(8,8),
'linestyle':'default',
'inverse':False,
'log':True,
'gamma':-40,
'mode':'HKB',
'visi': True,
'fontsize': 14,
'color':'k',
'hatch':''
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
#t0 =kwargs.pop('t0')
#t1 =kwargs.pop('t1')
#if t1 ==-1:
#t1=self.thkb[-1]
# t1=self.ttcr[-1]
label = a+'-'+b
mode = kwargs.pop('mode')
inverse = kwargs.pop('inverse')
log = kwargs.pop('log')
gamma = kwargs.pop('gamma')
visibility = kwargs.pop('visi')
fontsize = kwargs.pop('fontsize')
hatch = kwargs.pop('hatch')
subject = kwargs.pop('subject')
if subject=='':
subject=self.B.keys()[0]
else:
subject=subject
if kwargs['fig']==[]:
figsize = kwargs.pop('figsize')
kwargs.pop('fig')
fig = plt.figure(figsize=figsize)
else:
kwargs.pop('figsize')
fig = kwargs.pop('fig')
if kwargs['ax'] ==[]:
kwargs.pop('ax')
ax = fig.add_subplot(111)
else :
ax=kwargs.pop('ax')
if mode == 'HKB' or mode == 'FULL':
if isinstance(a,str):
iahk = self.dHKB[a]
else:
iahk = a
a = self.idHKB[a]
if isinstance(b,str):
ibhk = self.dHKB[b]
else:
ibhk = b
b = self.idHKB[b]
var = self.getlink(iahk,ibhk,'HKB')
#var = U.values
#time = U.index
#pdb.set_trace()
if inverse:
var = 1./(var)
ax.set_ylabel(u'$m^{-2}$',fontsize=fontsize)
if log :
#var = gamma*10*np.log10(var)
var = 20*np.log10(var)+gamma
ax.set_ylabel(u'$- 20 \log_{10}(d)'+str(gamma)+'$ (dB)',fontsize=fontsize)
plt.ylim(-65,-40)
else:
ax.set_ylabel(u'meters',fontsize=fontsize)
if log :
var = gamma*10*np.log10(var)+gamma
ax.set_ylabel(u'$10log_{10}m^{-2}$',fontsize=fontsize)
#ax.plot(self.B[subject].time,var,label=label,**kwargs)
var.plot()
#
# TCR |Full
#
if mode == 'TCR' or mode == 'FULL':
if isinstance(a,str):
iatcr = self.dTCR[a]
else:
iatcr = a
a = self.idTCR[a]
if isinstance(b,str):
ibtcr = self.dTCR[b]
else:
ibtcr = b
b = self.idTCR[b]
var = self.getlink(iatcr,ibtcr,'TCR').values
#if inverse:
# var = 1./(var)**2
# if log :
# var = gamma*10*np.log10(var)
#else:
# if log :
# var = gamma*10*np.log10(var)
#pdb.set_trace()
#ax.plot(self.B[subject].time,var,**kwargs)
ax.plot(self.B[subject].ttcr,var,**kwargs)
if visibility:
aa= ax.axis()
vv,tv,tseg,itseg = self._visiarray(a,b)
# vv.any : it exist NLOS regions
if vv.any():
fig,ax=plu.rectplot(tv,tseg,ylim=aa[2:],color=kwargs['color'],hatch=hatch,fig=fig,ax=ax)
# for t in tseg:
#axs[cptax].plot(visi.index.values,visi.values,'r')
#if inverse:
# ax.set_title(u'Motion Capture Ground Truth : inverse of squared distance',fontsize=fontsize+1)
#else:
# ax.set_title('Motion Capture Ground Truth : evolution of distance (m)',fontsize=fontsize+1)
ax.set_xlabel('Time (s)',fontsize=fontsize)
plt.tight_layout()
return fig, ax
def pltlk(self,a,b,**kwargs):
""" plot links
Parameters
----------
a : string
node a name
b : string
node b name
display: list
techno to be displayed
figsize
t0: float
time start
t1 : float
time stop
colhk: plt.color
color of hk curve
colhk2:plt.color
color of hk curve2 ( if recirpocal)
linestylehk:
linestyle hk
coltcr:
color tcr curve
coltcr2:
color of tcr curve2 ( if recirpocal)
linestyletcr:
linestyle tcr
colgt:
color ground truth
inversegt:
invert ground truth
loggt: bool
apply a log10 factor to ground truth
gammagt:
applly a gamma factor to ground truth (if loggt ! )
fontsize:
font size of legend
visi:
display visibility indicator
axs :
list of matplotlib axes
Example
-------
>>> from pylayers.measures.cormoran import *
>>> S=CorSer(6)
>>> S.pltlk('AP1','TorsoTopLeft')
"""
defaults = { 'display':[],
'figsize':(8,8),
't0':0,
't1':-1,
'colhk':'g',
'colhk2':'b',
'linestylehk':'default',
'coltcr':'g',
'coltcr2':'b',
'linestyletcr':'step',
'colgt': 'k',
'inversegt':True,
'loggt':True,
'gammagt':-40,
'fontsize':14,
'visi':True,
'axs' :[],
'gt':True,
'tit':''
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
display = kwargs.pop('display')
if not isinstance(display,list):
display=[display]
if display == []:
if ('tcr' in dir(self)) and ('hkb' in dir(self)):
display.append('FULL')
elif 'tcr' in dir(self):
display.append('TCR')
elif 'hkb' in dir(self):
display.append('HKB')
display = [t.upper() for t in display]
if 'FULL' in display:
ld = 2
elif 'TCR' in display or 'HKB' in display:
ld = 2
#Axes management
if kwargs['axs'] == []:
kwargs.pop('axs')
fig,axs = plt.subplots(nrows=ld,ncols=1,figsize=kwargs['figsize'],sharex=True)
else :
fig =plt.gcf()
axs = kwargs.pop('axs')
cptax= 0
# HKB plot
if 'HKB' in display or 'FULL' in display:
if ('HKB' in self.typ.upper()) or ('FULL' in self.typ.upper()):
if isinstance(a,str):
iahk = self.dHKB[a]
else :
raise AttributeError('in self.pltlk, nodes id must be a string')
if isinstance(b,str):
ibhk = self.dHKB[b]
else :
raise AttributeError('in self.pltlk, nodes id must be a string')
else :
raise AttributeError('HK not available for the given scenario')
kwargs['fig']=fig
kwargs['ax']=axs[cptax]
kwargs['colorab']=kwargs.pop('colhk')
kwargs['colorba']=kwargs.pop('colhk2')
kwargs['linestyle']=kwargs.pop('linestylehk')
kwargs['tit']=kwargs.pop('tit')
fig,axs[cptax]=self.plthkb(a,b,reciprocal=False,**kwargs)
cptax+=1
else :
kwargs.pop('colhk')
kwargs.pop('colhk2')
kwargs.pop('linestylehk')
#TCR plot
if 'TCR' in display or 'FULL' in display:
if ('TCR' in self.typ.upper()) or ('FULL' in self.typ.upper()):
if isinstance(a,str):
iatcr = self.dTCR[a]
else :
raise AttributeError('in self.pltlk, nodes id must be a string')
if isinstance(b,str):
ibtcr = self.dTCR[b]
else :
raise AttributeError('in self.pltlk, nodes id must be a string')
else :
raise AttributeError('TCR not available for the given scenario')
kwargs['fig']=fig
kwargs['ax']=axs[cptax]
kwargs['colorab']=kwargs.pop('coltcr')
kwargs['colorba']=kwargs.pop('coltcr2')
kwargs['linestyle']=kwargs.pop('linestyletcr')
tcrlink = a+'-'+b
#plot only if link exist
if tcrlink in self.tcr:
fig,axs[cptax]=self.plttcr(a,b,**kwargs)
else :
kwargs.pop('coltcr')
kwargs.pop('coltcr2')
kwargs.pop('linestyletcr')
#cptax+=1
#
# Ground Truth
#
#
# HKB |Full
#
if kwargs.pop('gt'):
kwargs['color'] = kwargs.pop('colgt')
kwargs.pop('colorab')
kwargs.pop('colorba')
kwargs['ax']=axs[cptax]
kwargs['inverse']=kwargs.pop('inversegt')
kwargs['log']=kwargs.pop('loggt')
kwargs['gamma']=kwargs.pop('gammagt')
kwargs.pop('tit')
if 'HKB' in display or 'FULL' in display:
kwargs['mode']= 'HKB'
fig,axs[cptax] = self.pltgt(a,b,**kwargs)
elif 'TCR' in display or 'FULL' in display:
kwargs['mode']= 'TCR'
fig,axs[cptax] = self.pltgt(a,b,**kwargs)
return fig,axs
# aa = axs[cptax].axis()
#
# calculates visibility and display NLOS region
# as a yellow patch over the shadowed region
#
def showpattern(self,a,techno='HKB',**kwargs):
""" show pattern configuation for a given link and frame
Parameters
----------
a : int
link index
technoa : string
'HKB'|'TCR'|'BS'
technob
default 'HKB'|'TCR'|'BS'
phi : float
antenna elevation in rad
fig :
ax :
t : float
phi : float
pi/2
ap : boolean
"""
defaults = { 'fig':[],
'ax':[],
't':0,
'phi':np.pi/2.,
'ap':False
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
if kwargs['fig'] == []:
fig=plt.figure()
else :
fig = kwargs['fig']
if kwargs['ax'] == []:
ax=fig.add_subplot(111)
else :
ax = kwargs['ax']
# display nodes
#
#
#
a,ia,ba,subjecta,techno = self.devmapper(a,techno)
pa = self.getdevp(a,techno=techno,t=kwargs['t']).values
if len(pa.shape) >1:
pa=pa[0]
ax.plot(pa[0],pa[1],'ob')
ax.text(pa[0],pa[1],ba)
if subjecta != '':
self.B[subjecta].settopos(t=kwargs['t'])
self.B[subjecta].dev[ba]['ant'].eval()
xa,ya,z,sa,v = self.B[subjecta].dev[ba]['ant']._computemesh(po=pa,T=self.B[subjecta].acs[ba],minr=0.01,maxr=0.1,ilog=False)
p2 = np.where(self.B[subjecta].dev[ba]['ant'].phi<=kwargs['phi'])[0][-1]
# ax.plot(xa[:,p2],ya[:,p2])
ax.plot(xa[p2,:],ya[p2,:])
else:
self.din[ba]['ant'].eval()
xa,ya,z,sa,v = self.din[ba]['ant']._computemesh(po=self.din[ba]['p'],T=self.din[ba]['T'],minr=0.01,maxr=0.1,ilog=False)
p2 = np.where(self.din[ba]['ant'].phi<=kwargs['phi'])[0][-1]
ax.plot(xa[:,p2],ya[:,p2])
return fig,ax
def showlink(self,a='AP1',b='BackCenter',technoa='HKB',technob='HKB',**kwargs):
""" show link configuation for a given frame
Parameters
----------
a : int
link index
b : int
link index
technoa : string
default 'HKB'|'TCR'|'BS'
technob
default 'HKB'|'TCR'|'BS'
phi : float
antenna elevation in rad
"""
defaults = { 'fig':[],
'ax':[],
't':0,
'phi':np.pi/2.,
'ap':False
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
if kwargs['fig'] == []:
fig=plt.figure()
else :
fig = kwargs['fig']
if kwargs['ax'] == []:
ax=fig.add_subplot(111)
else :
ax = kwargs['ax']
# display nodes
fig,ax=self.showpattern(a=a,techno=technoa,fig=fig,ax=ax)
fig,ax=self.showpattern(a=b,techno=technob,fig=fig,ax=ax)
plt.axis('equal')
p1 = self.din['HKB:1']['p']
p2 = self.din['HKB:2']['p']
p3 = self.din['HKB:3']['p']
p4 = self.din['HKB:4']['p']
plt.plot(p1[0],p1[1],'og')
plt.plot(p2[0],p2[1],'ob')
plt.plot(p3[0],p3[1],'or')
plt.plot(p4[0],p4[1],'ok')
plt.axis('equal')
# if A.ndim==2:
# plt.plot(A[iframe,0],A[iframe,1],'ob')
# plt.text(A[iframe,0],A[iframe,1],a)
# else:
# plt.plot(A[0],A[1],'or')
# #plt.text(A[0],A[1],a)
# if B.ndim==2:
# plt.plot(B[iframe,0],B[iframe,1],style)
# plt.text(B[iframe,0]+0.1,B[iframe,1]+0.1,b)
# else:
# plt.plot(B[0],B[1],'ob')
# plt.text(B[0],B[1],b)
# plt.xlim(-6,6)
# plt.ylim(-5,5)
# self.B[subjecta].settopos(t=t)
# self.B[subjectb].settopos(t=t)
#
# # display body
# #pc = self.B.d[:,2,iframe] + self.B.pg[:,iframe].T
# pc0 = self.B[subjecta].d[:,0,iframe] + self.B[subjecta].pg[:,iframe].T
# pc1 = self.B[subjecta].d[:,1,iframe] + self.B[subjecta].pg[:,iframe].T
# pc15 = self.B[subjecta].d[:,15,iframe] + self.B[subjecta].pg[:,iframe].T
# #plt.plot(pc0[0],pc0[1],'og')
# #plt.text(pc0[0]+0.1,pc0[1],str(iframe))
# #plt.plot(pc1[0],pc1[1],'og')
# #plt.plot(pc15[0],pc15[1],'og')
# #ci00 = plt.Circle((pc0[0],pc0[1]),self.B[subjecta].sl[0,2],color='green',alpha=0.6)
# #ci01 = plt.Circle((pc1[0],pc1[1]),self.B[subjecta].sl[0,2],color='green',alpha=0.1)
# #ci100 = plt.Circle((pc0[0],pc0[1]),self.B[subjecta].sl[10,2],color='red',alpha=0.1)
# ci1015 = plt.Circle((pc15[0],pc15[1]),self.B[subjecta].sl[10,2],color='green',alpha=0.5)
# plt.axis('equal')
# ax = plt.gca()
# ax.add_patch(ci1015)
# #ax.add_patch(ci01)
# #ax.add_patch(ci100)
# #ax.add_patch(ci1015)
# #its = self.B[subjecta].intersectBody(A[iframe,:],B[iframe,:],topos=False,frameId=iframe)
# #x.set_title('frameId :'+str(iframe)+' '+str(its.T))
def visidev(self,a,b,technoa='HKB',technob='HKB',dsf=10):
""" get link visibility status
Returns
-------
visi : pandas Series
0 : LOS
1 : NLOS
"""
A,B = self.getlinkp(a,b,technoa=technoa,technob=technob)
A=A.values
B=B.values
aa,ia,ba,subjecta,technoa= self.devmapper(a,technoa)
ab,ib,bb,subjectb,technob= self.devmapper(b,technob)
if 'AP' not in aa:
Nframe = A.shape[0]
if 'AP' not in ab:
Nframe = B.shape[0]
else:
Nframe = len(self.B[self.B.keys()[0]].time)
iframe = np.arange(0,Nframe-1,dsf)
tvisi = []
#
# A : Nframe x 3
# B : Nframe x 3
# B.pg : 3 x Nframe
#
if subjecta != '':
subject = subjecta
elif subjectb != '':
subject = subjectb
else :
raise AttributeError('Visibility can only be determine on a body for now')
if self.B[subject].centered:
A = A-self.B[subject].pg.T
B = B-self.B[subject].pg.T
for k in iframe:
if len(np.shape(A))<2:
A=A[np.newaxis,:]*np.ones((len(B),3))
if len(np.shape(B))<2:
B=B[np.newaxis,:]*np.ones((len(A),3))
its = self.B[subject].intersectBody(A[k,:],B[k,:],topos=False,frameId=k)
tvisi.append(its.any())
visi = pd.Series(tvisi,index=iframe/100.)
#return(visi,iframe)
return(visi)
def visidev2(self,a,b,technoa='HKB',technob='HKB',trange=[]):
""" get link visibility status
Returns
-------
trange : nd array
time range
visi : pandas Series
0 : LOS
1 : NLOS
"""
A,B = self.getlinkp(a,b,technoa,technob)
A=A.values
B=B.values
aa,ia,ba,subjecta,technoa= self.devmapper(a,technoa)
ab,ib,bb,subjectb,technob= self.devmapper(b,technob)
if 'AP' not in a:
Nframe = A.shape[0]
if 'AP' not in b:
Nframe = B.shape[0]
# iframe = np.arange(0,Nframe-1,dsf)
tvisi = []
#
# A : Nframe x 3
# B : Nframe x 3
# B.pg : 3 x Nframe
#
if subjecta != '':
subject = subjecta
elif subjectb != '':
subject = subjectb
else :
raise AttributeError('Visibility can only be determine on a body for now')
if self.B[subject].centered:
A = A-self.B[subject].pg.T
B = B-self.B[subject].pg.T
for t in trange:
fid = self.B[subject].posvel(self.B[subjecta].traj,t)[0]
its = self.B[subject].intersectBody(A[fid,:],B[fid,:],topos=False,frameId=fid)
tvisi.append(its.any())
visi = pd.Series(tvisi,index=trange)
#return(visi,iframe)
return(visi)
def _visiarray(self,a,b,technoa='HKB',technob='HKB'):
""" create entries for plu.rectplot
"""
visi = self.visidev(a,b,technoa=technoa,technob=technob)
tv = visi.index.values
vv = visi.values.astype(int)
if (not(vv.all()) and vv.any()):
df = vv[1:]-vv[0:-1]
um = np.where(df==1)[0]
ud = np.where(df==-1)[0]
lum = len(um)
lud = len(ud)
#
# impose same size and starting
# on leading edge um and endinf on
# falling edge ud
#
if lum==lud:
if ud[0]<um[0]:
um = np.hstack((np.array([0]),um))
ud = np.hstack((ud,np.array([len(vv)-1])))
else:
if ((lum<lud) & (vv[0]==1)):
um = np.hstack((np.array([0]),um))
if ((lud<lum) & (vv[len(vv)-1]==1)):
ud = np.hstack((ud,np.array([len(vv)-1])))
tseg = np.array(zip(um,ud))
#else:
# tseg = np.array(zip(ud,um))
else:
if vv.all():
tseg = np.array(zip(np.array([0]),np.array([len(vv)-1])))
else :
tseg = np.array([[0,0]])
itseg = copy.copy(tseg)
bb = np.insert(itseg[:,1],0,0)
ee = np.hstack((itseg[:,0],len(vv)))
itseg = np.array((bb,ee)).T
# bb = np.hstack((bb,len(vv)))
return vv,tv,tseg,itseg
# def _computedevpdf(self):
# """ create a timestamped data frame
# with all positions of devices
# """
# t=self.B.traj.time()
# pos = np.empty((len(t),12,3))
# for ik,k in enumerate(t):
# self.B.settopos(t=k)
# pos[ik,:,:]=self.B.getlinkp()
# df=[]
# for d in range(pos.shape[1]):
# df_tmp=pd.DataFrame(pos[:,d,:],columns=['x','y','z'],index=t)
# df_tmp['id']=self.B.dev.keys()[d]
# try :
# df = pd.concat([df,df_tmp])
# except:
# df = df_tmp
# df = df.sort_index()
# cols=['id','x','y','z']
# self.devdf=df[cols]
def _computedevpdf(self):
""" create a timestamped data frame
with positions of all devices
"""
if not isinstance(self.B,dict):
B={self.subject[0]:self.B}
else :
B=self.B
for b in B:
if 'dev' in dir(B[b]):
dev = B[b].dev.keys()
udev=[B[b].dev[d]['uc3d'] for d in dev]
postmp = np.array([np.mean(B[b]._f[:,u,:],axis=1) for u in udev])
pos = postmp.swapaxes(0,1)
t = B[b].time
for d in range(len(dev)):
df_tmp=pd.DataFrame(pos[:,d,:],columns=['x','y','z'],index=t)
df_tmp[['vx','vy','vz']]=df_tmp.diff()/(t[1]-t[0])
df_tmp['v']=np.sqrt(np.sum(df_tmp[['vx','vy','vz']]**2,axis=1))
df_tmp[['ax','ay','az']]=df_tmp[['vx','vy','vz']].diff()/(t[1]-t[0])
df_tmp['a']=np.sqrt(np.sum(df_tmp[['ax','ay','az']]**2,axis=1))
df_tmp['id'] = list(B[b].dev.keys())[d]
df_tmp['subject']=B[b].name
try :
df = | pd.concat([df,df_tmp]) | pandas.concat |
from .base import Controller
from .base import Action
import numpy as np
import pandas as pd
import logging
from collections import namedtuple
from tqdm import tqdm
logger = logging.getLogger(__name__)
CONTROL_QUEST = 'simglucose/params/Quest.csv'
PATIENT_PARA_FILE = 'simglucose/params/vpatient_params.csv'
ParamTup = namedtuple('ParamTup', ['basal', 'cf', 'cr'])
class BBController(Controller):
"""
This is a Basal-Bolus Controller that is typically practiced by a Type-1
Diabetes patient. The performance of this controller can serve as a
baseline when developing a more advanced controller.
"""
def __init__(self, target=140):
self.quest = pd.read_csv(CONTROL_QUEST)
self.patient_params = | pd.read_csv(PATIENT_PARA_FILE) | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# # 과제1, 바텀듀오의 티어
# ## 라이브러리, 데이터 로드
# In[937]:
import requests
import json
import pandas as pd
import numpy as np
from pandas.io.json import json_normalize
import warnings
warnings.filterwarnings(action='ignore')
from sklearn.preprocessing import StandardScaler,MinMaxScaler
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
import math
# In[4]:
url='*****************************************************'
# In[ ]:
adc_sup_pick_red
# In[ ]:
lol_data=data.text
lol_data=lol_data.replace('\n', ',\n')
lol_data='['+lol_data+']'
lol_data=lol_data.replace(']},\n]',']}\n]')
# In[ ]:
f = open("data.txt", 'w')
f.write(lol_data)
f.close()
# In[ ]:
lol_data=json.loads(lol_data)
# In[ ]:
output_df=json_normalize(lol_data)
# In[790]:
sample=output_df
sample.reset_index(inplace=True)
del sample['index']
del sample['Unnamed: 0']
sample
# ## 데이터 전처리
# ### teams
# #### 밴, 오브젝트에 대한 간략한 정보
# In[756]:
def array_on_duplicate_keys(ordered_pairs):
d = {}
for k, v in ordered_pairs:
if k in d:
if type(d[k]) is list:
d[k].append(v)
else:
d[k] = [d[k],v]
else:
d[k] = v
return d
# In[757]:
teams_output = pd.DataFrame(columns = ['firstdragon', 'firstinhibitor', 'pickturn', 'championid', 'baronkills',
'firstriftherald', 'firstbaron', 'riftheraldkills', 'firstblood',
'teamid', 'firsttower', 'vilemawkills', 'inhibitorkills', 'towerkills',
'dominionvictoryscore', 'win', 'dragonkills'])
def split_list(a_list):
half = len(a_list)//2
return a_list[:][:half], a_list[:][half:]
# In[758]:
for i in range(len(sample)):
test=sample['teams'][i]
test=test.replace("'", "\"").replace('[{','').replace('}]','').replace('}, {', ', ').replace(' "bans":','').replace('False','\"False\"').replace('True','\"True\"')
test='[{' + test+ '}]'
test=json.loads(test, object_pairs_hook=array_on_duplicate_keys)
test=json_normalize(test)
teams_output=pd.concat([teams_output,test])
teams_output.reset_index(inplace=True)
del teams_output['index']
teams_output.head()
# In[759]:
a=[]
b=[]
teams_output_blue=pd.DataFrame()
teams_output_red=pd.DataFrame()
for i in range(teams_output.shape[0]):
for j in range(teams_output.shape[1]):
A,B=split_list(teams_output.iloc[i][j])
a.append(A)
b.append(B)
teams_output_blue=pd.concat([teams_output_blue,pd.DataFrame(pd.Series(a)).transpose()])
teams_output_red=pd.concat([teams_output_red,pd.DataFrame( | pd.Series(b) | pandas.Series |
# -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
import itertools
import warnings
from warnings import catch_warnings
from datetime import datetime
from pandas.types.common import (is_integer_dtype,
is_float_dtype,
is_scalar)
from pandas.compat import range, lrange, lzip, StringIO, lmap
from pandas.tslib import NaT
from numpy import nan
from numpy.random import randn
import numpy as np
import pandas as pd
from pandas import option_context
from pandas.core.indexing import _non_reducing_slice, _maybe_numeric_slice
from pandas.core.api import (DataFrame, Index, Series, Panel, isnull,
MultiIndex, Timestamp, Timedelta, UInt64Index)
from pandas.formats.printing import pprint_thing
from pandas import concat
from pandas.core.common import PerformanceWarning
from pandas.tests.indexing.common import _mklbl
import pandas.util.testing as tm
from pandas import date_range
_verbose = False
# ------------------------------------------------------------------------
# Indexing test cases
def _generate_indices(f, values=False):
""" generate the indicies
if values is True , use the axis values
is False, use the range
"""
axes = f.axes
if values:
axes = [lrange(len(a)) for a in axes]
return itertools.product(*axes)
def _get_value(f, i, values=False):
""" return the value for the location i """
# check agains values
if values:
return f.values[i]
# this is equiv of f[col][row].....
# v = f
# for a in reversed(i):
# v = v.__getitem__(a)
# return v
with catch_warnings(record=True):
return f.ix[i]
def _get_result(obj, method, key, axis):
""" return the result for this obj with this key and this axis """
if isinstance(key, dict):
key = key[axis]
# use an artifical conversion to map the key as integers to the labels
# so ix can work for comparisions
if method == 'indexer':
method = 'ix'
key = obj._get_axis(axis)[key]
# in case we actually want 0 index slicing
try:
xp = getattr(obj, method).__getitem__(_axify(obj, key, axis))
except:
xp = getattr(obj, method).__getitem__(key)
return xp
def _axify(obj, key, axis):
# create a tuple accessor
axes = [slice(None)] * obj.ndim
axes[axis] = key
return tuple(axes)
class TestIndexing(tm.TestCase):
_objs = set(['series', 'frame', 'panel'])
_typs = set(['ints', 'uints', 'labels', 'mixed',
'ts', 'floats', 'empty', 'ts_rev'])
def setUp(self):
self.series_ints = Series(np.random.rand(4), index=lrange(0, 8, 2))
self.frame_ints = DataFrame(np.random.randn(4, 4),
index=lrange(0, 8, 2),
columns=lrange(0, 12, 3))
self.panel_ints = Panel(np.random.rand(4, 4, 4),
items=lrange(0, 8, 2),
major_axis=lrange(0, 12, 3),
minor_axis=lrange(0, 16, 4))
self.series_uints = Series(np.random.rand(4),
index=UInt64Index(lrange(0, 8, 2)))
self.frame_uints = DataFrame(np.random.randn(4, 4),
index=UInt64Index(lrange(0, 8, 2)),
columns=UInt64Index(lrange(0, 12, 3)))
self.panel_uints = Panel(np.random.rand(4, 4, 4),
items=UInt64Index(lrange(0, 8, 2)),
major_axis=UInt64Index(lrange(0, 12, 3)),
minor_axis=UInt64Index(lrange(0, 16, 4)))
self.series_labels = Series(np.random.randn(4), index=list('abcd'))
self.frame_labels = DataFrame(np.random.randn(4, 4),
index=list('abcd'), columns=list('ABCD'))
self.panel_labels = Panel(np.random.randn(4, 4, 4),
items=list('abcd'),
major_axis=list('ABCD'),
minor_axis=list('ZYXW'))
self.series_mixed = Series(np.random.randn(4), index=[2, 4, 'null', 8])
self.frame_mixed = DataFrame(np.random.randn(4, 4),
index=[2, 4, 'null', 8])
self.panel_mixed = Panel(np.random.randn(4, 4, 4),
items=[2, 4, 'null', 8])
self.series_ts = Series(np.random.randn(4),
index=date_range('20130101', periods=4))
self.frame_ts = DataFrame(np.random.randn(4, 4),
index=date_range('20130101', periods=4))
self.panel_ts = Panel(np.random.randn(4, 4, 4),
items=date_range('20130101', periods=4))
dates_rev = (date_range('20130101', periods=4)
.sort_values(ascending=False))
self.series_ts_rev = Series(np.random.randn(4),
index=dates_rev)
self.frame_ts_rev = DataFrame(np.random.randn(4, 4),
index=dates_rev)
self.panel_ts_rev = Panel(np.random.randn(4, 4, 4),
items=dates_rev)
self.frame_empty = DataFrame({})
self.series_empty = Series({})
self.panel_empty = Panel({})
# form agglomerates
for o in self._objs:
d = dict()
for t in self._typs:
d[t] = getattr(self, '%s_%s' % (o, t), None)
setattr(self, o, d)
def check_values(self, f, func, values=False):
if f is None:
return
axes = f.axes
indicies = itertools.product(*axes)
for i in indicies:
result = getattr(f, func)[i]
# check agains values
if values:
expected = f.values[i]
else:
expected = f
for a in reversed(i):
expected = expected.__getitem__(a)
tm.assert_almost_equal(result, expected)
def check_result(self, name, method1, key1, method2, key2, typs=None,
objs=None, axes=None, fails=None):
def _eq(t, o, a, obj, k1, k2):
""" compare equal for these 2 keys """
if a is not None and a > obj.ndim - 1:
return
def _print(result, error=None):
if error is not None:
error = str(error)
v = ("%-16.16s [%-16.16s]: [typ->%-8.8s,obj->%-8.8s,"
"key1->(%-4.4s),key2->(%-4.4s),axis->%s] %s" %
(name, result, t, o, method1, method2, a, error or ''))
if _verbose:
pprint_thing(v)
try:
rs = getattr(obj, method1).__getitem__(_axify(obj, k1, a))
try:
xp = _get_result(obj, method2, k2, a)
except:
result = 'no comp'
_print(result)
return
detail = None
try:
if is_scalar(rs) and is_scalar(xp):
self.assertEqual(rs, xp)
elif xp.ndim == 1:
tm.assert_series_equal(rs, xp)
elif xp.ndim == 2:
tm.assert_frame_equal(rs, xp)
elif xp.ndim == 3:
tm.assert_panel_equal(rs, xp)
result = 'ok'
except AssertionError as e:
detail = str(e)
result = 'fail'
# reverse the checks
if fails is True:
if result == 'fail':
result = 'ok (fail)'
_print(result)
if not result.startswith('ok'):
raise AssertionError(detail)
except AssertionError:
raise
except Exception as detail:
# if we are in fails, the ok, otherwise raise it
if fails is not None:
if isinstance(detail, fails):
result = 'ok (%s)' % type(detail).__name__
_print(result)
return
result = type(detail).__name__
raise AssertionError(_print(result, error=detail))
if typs is None:
typs = self._typs
if objs is None:
objs = self._objs
if axes is not None:
if not isinstance(axes, (tuple, list)):
axes = [axes]
else:
axes = list(axes)
else:
axes = [0, 1, 2]
# check
for o in objs:
if o not in self._objs:
continue
d = getattr(self, o)
for a in axes:
for t in typs:
if t not in self._typs:
continue
obj = d[t]
if obj is not None:
obj = obj.copy()
k2 = key2
_eq(t, o, a, obj, key1, k2)
def test_ix_deprecation(self):
# GH 15114
df = DataFrame({'A': [1, 2, 3]})
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
df.ix[1, 'A']
def test_indexer_caching(self):
# GH5727
# make sure that indexers are in the _internal_names_set
n = 1000001
arrays = [lrange(n), lrange(n)]
index = MultiIndex.from_tuples(lzip(*arrays))
s = Series(np.zeros(n), index=index)
str(s)
# setitem
expected = Series(np.ones(n), index=index)
s = Series(np.zeros(n), index=index)
s[s == 0] = 1
tm.assert_series_equal(s, expected)
def test_at_and_iat_get(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
result = getattr(f, func)[i]
expected = _get_value(f, i, values)
tm.assert_almost_equal(result, expected)
for o in self._objs:
d = getattr(self, o)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, self.check_values, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_and_iat_set(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
getattr(f, func)[i] = 1
expected = _get_value(f, i, values)
tm.assert_almost_equal(expected, 1)
for t in self._objs:
d = getattr(self, t)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, _check, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_iat_coercion(self):
# as timestamp is not a tuple!
dates = date_range('1/1/2000', periods=8)
df = DataFrame(randn(8, 4), index=dates, columns=['A', 'B', 'C', 'D'])
s = df['A']
result = s.at[dates[5]]
xp = s.values[5]
self.assertEqual(result, xp)
# GH 7729
# make sure we are boxing the returns
s = Series(['2014-01-01', '2014-02-02'], dtype='datetime64[ns]')
expected = Timestamp('2014-02-02')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
s = Series(['1 days', '2 days'], dtype='timedelta64[ns]')
expected = Timedelta('2 days')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
def test_iat_invalid_args(self):
pass
def test_imethods_with_dups(self):
# GH6493
# iat/iloc with dups
s = Series(range(5), index=[1, 1, 2, 2, 3], dtype='int64')
result = s.iloc[2]
self.assertEqual(result, 2)
result = s.iat[2]
self.assertEqual(result, 2)
self.assertRaises(IndexError, lambda: s.iat[10])
self.assertRaises(IndexError, lambda: s.iat[-10])
result = s.iloc[[2, 3]]
expected = Series([2, 3], [2, 2], dtype='int64')
tm.assert_series_equal(result, expected)
df = s.to_frame()
result = df.iloc[2]
expected = Series(2, index=[0], name=2)
tm.assert_series_equal(result, expected)
result = df.iat[2, 0]
expected = 2
self.assertEqual(result, 2)
def test_repeated_getitem_dups(self):
# GH 5678
# repeated gettitems on a dup index returing a ndarray
df = DataFrame(
np.random.random_sample((20, 5)),
index=['ABCDE' [x % 5] for x in range(20)])
expected = df.loc['A', 0]
result = df.loc[:, 0].loc['A']
tm.assert_series_equal(result, expected)
def test_iloc_exceeds_bounds(self):
# GH6296
# iloc should allow indexers that exceed the bounds
df = DataFrame(np.random.random_sample((20, 5)), columns=list('ABCDE'))
expected = df
# lists of positions should raise IndexErrror!
with tm.assertRaisesRegexp(IndexError,
'positional indexers are out-of-bounds'):
df.iloc[:, [0, 1, 2, 3, 4, 5]]
self.assertRaises(IndexError, lambda: df.iloc[[1, 30]])
self.assertRaises(IndexError, lambda: df.iloc[[1, -30]])
self.assertRaises(IndexError, lambda: df.iloc[[100]])
s = df['A']
self.assertRaises(IndexError, lambda: s.iloc[[100]])
self.assertRaises(IndexError, lambda: s.iloc[[-100]])
# still raise on a single indexer
msg = 'single positional indexer is out-of-bounds'
with tm.assertRaisesRegexp(IndexError, msg):
df.iloc[30]
self.assertRaises(IndexError, lambda: df.iloc[-30])
# GH10779
# single positive/negative indexer exceeding Series bounds should raise
# an IndexError
with tm.assertRaisesRegexp(IndexError, msg):
s.iloc[30]
self.assertRaises(IndexError, lambda: s.iloc[-30])
# slices are ok
result = df.iloc[:, 4:10] # 0 < start < len < stop
expected = df.iloc[:, 4:]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -4:-10] # stop < 0 < start < len
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4:-1] # 0 < stop < len < start (down)
expected = df.iloc[:, :4:-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 4:-10:-1] # stop < 0 < start < len (down)
expected = df.iloc[:, 4::-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:4] # start < 0 < stop < len
expected = df.iloc[:, :4]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4] # 0 < stop < len < start
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:-11:-1] # stop < start < 0 < len (down)
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:11] # 0 < len < start < stop
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
# slice bounds exceeding is ok
result = s.iloc[18:30]
expected = s.iloc[18:]
tm.assert_series_equal(result, expected)
result = s.iloc[30:]
expected = s.iloc[:0]
tm.assert_series_equal(result, expected)
result = s.iloc[30::-1]
expected = s.iloc[::-1]
tm.assert_series_equal(result, expected)
# doc example
def check(result, expected):
str(result)
result.dtypes
tm.assert_frame_equal(result, expected)
dfl = DataFrame(np.random.randn(5, 2), columns=list('AB'))
check(dfl.iloc[:, 2:3], DataFrame(index=dfl.index))
check(dfl.iloc[:, 1:3], dfl.iloc[:, [1]])
check(dfl.iloc[4:6], dfl.iloc[[4]])
self.assertRaises(IndexError, lambda: dfl.iloc[[4, 5, 6]])
self.assertRaises(IndexError, lambda: dfl.iloc[:, 4])
def test_iloc_getitem_int(self):
# integer
self.check_result('integer', 'iloc', 2, 'ix',
{0: 4, 1: 6, 2: 8}, typs=['ints', 'uints'])
self.check_result('integer', 'iloc', 2, 'indexer', 2,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int(self):
# neg integer
self.check_result('neg int', 'iloc', -1, 'ix',
{0: 6, 1: 9, 2: 12}, typs=['ints', 'uints'])
self.check_result('neg int', 'iloc', -1, 'indexer', -1,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_list_int(self):
# list of ints
self.check_result('list int', 'iloc', [0, 1, 2], 'ix',
{0: [0, 2, 4], 1: [0, 3, 6], 2: [0, 4, 8]},
typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [2], 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
# array of ints (GH5006), make sure that a single indexer is returning
# the correct type
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'ix',
{0: [0, 2, 4],
1: [0, 3, 6],
2: [0, 4, 8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([2]), 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'indexer',
[0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int_can_reach_first_index(self):
# GH10547 and GH10779
# negative integers should be able to reach index 0
df = DataFrame({'A': [2, 3, 5], 'B': [7, 11, 13]})
s = df['A']
expected = df.iloc[0]
result = df.iloc[-3]
tm.assert_series_equal(result, expected)
expected = df.iloc[[0]]
result = df.iloc[[-3]]
tm.assert_frame_equal(result, expected)
expected = s.iloc[0]
result = s.iloc[-3]
self.assertEqual(result, expected)
expected = s.iloc[[0]]
result = s.iloc[[-3]]
tm.assert_series_equal(result, expected)
# check the length 1 Series case highlighted in GH10547
expected = pd.Series(['a'], index=['A'])
result = expected.iloc[[-1]]
tm.assert_series_equal(result, expected)
def test_iloc_getitem_dups(self):
# no dups in panel (bug?)
self.check_result('list int (dups)', 'iloc', [0, 1, 1, 3], 'ix',
{0: [0, 2, 2, 6], 1: [0, 3, 3, 9]},
objs=['series', 'frame'], typs=['ints', 'uints'])
# GH 6766
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
# cross-sectional indexing
result = df.iloc[0, 0]
self.assertTrue(isnull(result))
result = df.iloc[0, :]
expected = Series([np.nan, 1, 3, 3], index=['A', 'B', 'A', 'B'],
name=0)
tm.assert_series_equal(result, expected)
def test_iloc_getitem_array(self):
# array like
s = Series(index=lrange(1, 4))
self.check_result('array like', 'iloc', s.index, 'ix',
{0: [2, 4, 6], 1: [3, 6, 9], 2: [4, 8, 12]},
typs=['ints', 'uints'])
def test_iloc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False, ]
self.check_result('bool', 'iloc', b, 'ix', b, typs=['ints', 'uints'])
self.check_result('bool', 'iloc', b, 'ix', b,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice(self):
# slices
self.check_result('slice', 'iloc', slice(1, 3), 'ix',
{0: [2, 4], 1: [3, 6], 2: [4, 8]},
typs=['ints', 'uints'])
self.check_result('slice', 'iloc', slice(1, 3), 'indexer',
slice(1, 3),
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice_dups(self):
df1 = DataFrame(np.random.randn(10, 4), columns=['A', 'A', 'B', 'B'])
df2 = DataFrame(np.random.randint(0, 10, size=20).reshape(10, 2),
columns=['A', 'C'])
# axis=1
df = concat([df1, df2], axis=1)
tm.assert_frame_equal(df.iloc[:, :4], df1)
tm.assert_frame_equal(df.iloc[:, 4:], df2)
df = concat([df2, df1], axis=1)
tm.assert_frame_equal(df.iloc[:, :2], df2)
tm.assert_frame_equal(df.iloc[:, 2:], df1)
exp = concat([df2, df1.iloc[:, [0]]], axis=1)
tm.assert_frame_equal(df.iloc[:, 0:3], exp)
# axis=0
df = concat([df, df], axis=0)
tm.assert_frame_equal(df.iloc[0:10, :2], df2)
tm.assert_frame_equal(df.iloc[0:10, 2:], df1)
tm.assert_frame_equal(df.iloc[10:, :2], df2)
tm.assert_frame_equal(df.iloc[10:, 2:], df1)
def test_iloc_setitem(self):
df = self.frame_ints
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
self.assertEqual(result, 1)
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
# GH5771
s = Series(0, index=[4, 5, 6])
s.iloc[1:2] += 1
expected = Series([0, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
def test_loc_setitem_slice(self):
# GH10503
# assigning the same type should not change the type
df1 = DataFrame({'a': [0, 1, 1],
'b': Series([100, 200, 300], dtype='uint32')})
ix = df1['a'] == 1
newb1 = df1.loc[ix, 'b'] + 1
df1.loc[ix, 'b'] = newb1
expected = DataFrame({'a': [0, 1, 1],
'b': Series([100, 201, 301], dtype='uint32')})
tm.assert_frame_equal(df1, expected)
# assigning a new type should get the inferred type
df2 = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
ix = df1['a'] == 1
newb2 = df2.loc[ix, 'b']
df1.loc[ix, 'b'] = newb2
expected = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_setitem_consistency(self):
# GH 5771
# loc with slice and series
s = Series(0, index=[4, 5, 6])
s.loc[4:5] += 1
expected = Series([1, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
# GH 5928
# chained indexing assignment
df = DataFrame({'a': [0, 1, 2]})
expected = df.copy()
with catch_warnings(record=True):
expected.ix[[0, 1, 2], 'a'] = -expected.ix[[0, 1, 2], 'a']
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]]
tm.assert_frame_equal(df, expected)
df = DataFrame({'a': [0, 1, 2], 'b': [0, 1, 2]})
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]].astype(
'float64') + 0.5
expected = DataFrame({'a': [0.5, -0.5, -1.5], 'b': [0, 1, 2]})
tm.assert_frame_equal(df, expected)
# GH 8607
# ix setitem consistency
df = DataFrame({'timestamp': [1413840976, 1413842580, 1413760580],
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
expected = DataFrame({'timestamp': pd.to_datetime(
[1413840976, 1413842580, 1413760580], unit='s'),
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
df2 = df.copy()
df2['timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
df2.loc[:, 'timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
with catch_warnings(record=True):
df2.ix[:, 2] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_consistency(self):
# GH 8613
# some edge cases where ix/loc should return the same
# this is not an exhaustive case
def compare(result, expected):
if is_scalar(expected):
self.assertEqual(result, expected)
else:
self.assertTrue(expected.equals(result))
# failure cases for .loc, but these work for .ix
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'))
for key in [slice(1, 3), tuple([slice(0, 2), slice(0, 2)]),
tuple([slice(0, 2), df.columns[0:2]])]:
for index in [tm.makeStringIndex, tm.makeUnicodeIndex,
tm.makeDateIndex, tm.makePeriodIndex,
tm.makeTimedeltaIndex]:
df.index = index(len(df.index))
with catch_warnings(record=True):
df.ix[key]
self.assertRaises(TypeError, lambda: df.loc[key])
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'),
index=pd.date_range('2012-01-01', periods=5))
for key in ['2012-01-03',
'2012-01-31',
slice('2012-01-03', '2012-01-03'),
slice('2012-01-03', '2012-01-04'),
slice('2012-01-03', '2012-01-06', 2),
slice('2012-01-03', '2012-01-31'),
tuple([[True, True, True, False, True]]), ]:
# getitem
# if the expected raises, then compare the exceptions
try:
with catch_warnings(record=True):
expected = df.ix[key]
except KeyError:
self.assertRaises(KeyError, lambda: df.loc[key])
continue
result = df.loc[key]
compare(result, expected)
# setitem
df1 = df.copy()
df2 = df.copy()
with catch_warnings(record=True):
df1.ix[key] = 10
df2.loc[key] = 10
compare(df2, df1)
# edge cases
s = Series([1, 2, 3, 4], index=list('abde'))
result1 = s['a':'c']
with catch_warnings(record=True):
result2 = s.ix['a':'c']
result3 = s.loc['a':'c']
tm.assert_series_equal(result1, result2)
tm.assert_series_equal(result1, result3)
# now work rather than raising KeyError
s = Series(range(5), [-2, -1, 1, 2, 3])
with catch_warnings(record=True):
result1 = s.ix[-10:3]
result2 = s.loc[-10:3]
tm.assert_series_equal(result1, result2)
with catch_warnings(record=True):
result1 = s.ix[0:3]
result2 = s.loc[0:3]
tm.assert_series_equal(result1, result2)
def test_loc_setitem_dups(self):
# GH 6541
df_orig = DataFrame(
{'me': list('rttti'),
'foo': list('aaade'),
'bar': np.arange(5, dtype='float64') * 1.34 + 2,
'bar2': np.arange(5, dtype='float64') * -.34 + 2}).set_index('me')
indexer = tuple(['r', ['bar', 'bar2']])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_series_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = tuple(['r', 'bar'])
df = df_orig.copy()
df.loc[indexer] *= 2.0
self.assertEqual(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = tuple(['t', ['bar', 'bar2']])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_frame_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
def test_iloc_setitem_dups(self):
# GH 6766
# iloc with a mask aligning from another iloc
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
expected = df.fillna(3)
expected['A'] = expected['A'].astype('float64')
inds = np.isnan(df.iloc[:, 0])
mask = inds[inds].index
df.iloc[mask, 0] = df.iloc[mask, 2]
tm.assert_frame_equal(df, expected)
# del a dup column across blocks
expected = DataFrame({0: [1, 2], 1: [3, 4]})
expected.columns = ['B', 'B']
del df['A']
tm.assert_frame_equal(df, expected)
# assign back to self
df.iloc[[0, 1], [0, 1]] = df.iloc[[0, 1], [0, 1]]
tm.assert_frame_equal(df, expected)
# reversed x 2
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
tm.assert_frame_equal(df, expected)
def test_chained_getitem_with_lists(self):
# GH6394
# Regression in chained getitem indexing with embedded list-like from
# 0.12
def check(result, expected):
tm.assert_numpy_array_equal(result, expected)
tm.assertIsInstance(result, np.ndarray)
df = DataFrame({'A': 5 * [np.zeros(3)], 'B': 5 * [np.ones(3)]})
expected = df['A'].iloc[2]
result = df.loc[2, 'A']
check(result, expected)
result2 = df.iloc[2]['A']
check(result2, expected)
result3 = df['A'].loc[2]
check(result3, expected)
result4 = df['A'].iloc[2]
check(result4, expected)
def test_loc_getitem_int(self):
# int label
self.check_result('int label', 'loc', 2, 'ix', 2,
typs=['ints', 'uints'], axes=0)
self.check_result('int label', 'loc', 3, 'ix', 3,
typs=['ints', 'uints'], axes=1)
self.check_result('int label', 'loc', 4, 'ix', 4,
typs=['ints', 'uints'], axes=2)
self.check_result('int label', 'loc', 2, 'ix', 2,
typs=['label'], fails=KeyError)
def test_loc_getitem_label(self):
# label
self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['labels'],
axes=0)
self.check_result('label', 'loc', 'null', 'ix', 'null', typs=['mixed'],
axes=0)
self.check_result('label', 'loc', 8, 'ix', 8, typs=['mixed'], axes=0)
self.check_result('label', 'loc', Timestamp('20130102'), 'ix', 1,
typs=['ts'], axes=0)
self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['empty'],
fails=KeyError)
def test_loc_getitem_label_out_of_range(self):
# out of range label
self.check_result('label range', 'loc', 'f', 'ix', 'f',
typs=['ints', 'uints', 'labels', 'mixed', 'ts'],
fails=KeyError)
self.check_result('label range', 'loc', 'f', 'ix', 'f',
typs=['floats'], fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20,
typs=['ints', 'uints', 'mixed'], fails=KeyError)
self.check_result('label range', 'loc', 20, 'ix', 20,
typs=['labels'], fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs=['ts'],
axes=0, fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs=['floats'],
axes=0, fails=TypeError)
def test_loc_getitem_label_list(self):
# list of labels
self.check_result('list lbl', 'loc', [0, 2, 4], 'ix', [0, 2, 4],
typs=['ints', 'uints'], axes=0)
self.check_result('list lbl', 'loc', [3, 6, 9], 'ix', [3, 6, 9],
typs=['ints', 'uints'], axes=1)
self.check_result('list lbl', 'loc', [4, 8, 12], 'ix', [4, 8, 12],
typs=['ints', 'uints'], axes=2)
self.check_result('list lbl', 'loc', ['a', 'b', 'd'], 'ix',
['a', 'b', 'd'], typs=['labels'], axes=0)
self.check_result('list lbl', 'loc', ['A', 'B', 'C'], 'ix',
['A', 'B', 'C'], typs=['labels'], axes=1)
self.check_result('list lbl', 'loc', ['Z', 'Y', 'W'], 'ix',
['Z', 'Y', 'W'], typs=['labels'], axes=2)
self.check_result('list lbl', 'loc', [2, 8, 'null'], 'ix',
[2, 8, 'null'], typs=['mixed'], axes=0)
self.check_result('list lbl', 'loc',
[Timestamp('20130102'), Timestamp('20130103')], 'ix',
[Timestamp('20130102'), Timestamp('20130103')],
typs=['ts'], axes=0)
self.check_result('list lbl', 'loc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['empty'], fails=KeyError)
self.check_result('list lbl', 'loc', [0, 2, 3], 'ix', [0, 2, 3],
typs=['ints', 'uints'], axes=0, fails=KeyError)
self.check_result('list lbl', 'loc', [3, 6, 7], 'ix', [3, 6, 7],
typs=['ints', 'uints'], axes=1, fails=KeyError)
self.check_result('list lbl', 'loc', [4, 8, 10], 'ix', [4, 8, 10],
typs=['ints', 'uints'], axes=2, fails=KeyError)
def test_loc_getitem_label_list_fails(self):
# fails
self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
typs=['ints', 'uints'], axes=1, fails=KeyError)
self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
typs=['ints', 'uints'], axes=2, fails=KeyError)
def test_loc_getitem_label_array_like(self):
# array like
self.check_result('array like', 'loc', Series(index=[0, 2, 4]).index,
'ix', [0, 2, 4], typs=['ints', 'uints'], axes=0)
self.check_result('array like', 'loc', Series(index=[3, 6, 9]).index,
'ix', [3, 6, 9], typs=['ints', 'uints'], axes=1)
self.check_result('array like', 'loc', Series(index=[4, 8, 12]).index,
'ix', [4, 8, 12], typs=['ints', 'uints'], axes=2)
def test_loc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False]
self.check_result('bool', 'loc', b, 'ix', b,
typs=['ints', 'uints', 'labels',
'mixed', 'ts', 'floats'])
self.check_result('bool', 'loc', b, 'ix', b, typs=['empty'],
fails=KeyError)
def test_loc_getitem_int_slice(self):
# ok
self.check_result('int slice2', 'loc', slice(2, 4), 'ix', [2, 4],
typs=['ints', 'uints'], axes=0)
self.check_result('int slice2', 'loc', slice(3, 6), 'ix', [3, 6],
typs=['ints', 'uints'], axes=1)
self.check_result('int slice2', 'loc', slice(4, 8), 'ix', [4, 8],
typs=['ints', 'uints'], axes=2)
# GH 3053
# loc should treat integer slices like label slices
from itertools import product
index = MultiIndex.from_tuples([t for t in product(
[6, 7, 8], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[6:8, :]
with catch_warnings(record=True):
expected = df.ix[6:8, :]
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_tuples([t
for t in product(
[10, 20, 30], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[20:30, :]
with catch_warnings(record=True):
expected = df.ix[20:30, :]
tm.assert_frame_equal(result, expected)
# doc examples
result = df.loc[10, :]
with catch_warnings(record=True):
expected = df.ix[10, :]
tm.assert_frame_equal(result, expected)
result = df.loc[:, 10]
# expected = df.ix[:,10] (this fails)
expected = df[10]
tm.assert_frame_equal(result, expected)
def test_loc_to_fail(self):
# GH3449
df = DataFrame(np.random.random((3, 3)),
index=['a', 'b', 'c'],
columns=['e', 'f', 'g'])
# raise a KeyError?
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([[1, 2], [1, 2]]))
# GH 7496
# loc should not fallback
s = Series()
s.loc[1] = 1
s.loc['a'] = 2
self.assertRaises(KeyError, lambda: s.loc[-1])
self.assertRaises(KeyError, lambda: s.loc[[-1, -2]])
self.assertRaises(KeyError, lambda: s.loc[['4']])
s.loc[-1] = 3
result = s.loc[[-1, -2]]
expected = Series([3, np.nan], index=[-1, -2])
tm.assert_series_equal(result, expected)
s['a'] = 2
self.assertRaises(KeyError, lambda: s.loc[[-2]])
del s['a']
def f():
s.loc[[-2]] = 0
self.assertRaises(KeyError, f)
# inconsistency between .loc[values] and .loc[values,:]
# GH 7999
df = DataFrame([['a'], ['b']], index=[1, 2], columns=['value'])
def f():
df.loc[[3], :]
self.assertRaises(KeyError, f)
def f():
df.loc[[3]]
self.assertRaises(KeyError, f)
def test_at_to_fail(self):
# at should not fallback
# GH 7814
s = Series([1, 2, 3], index=list('abc'))
result = s.at['a']
self.assertEqual(result, 1)
self.assertRaises(ValueError, lambda: s.at[0])
df = DataFrame({'A': [1, 2, 3]}, index=list('abc'))
result = df.at['a', 'A']
self.assertEqual(result, 1)
self.assertRaises(ValueError, lambda: df.at['a', 0])
s = Series([1, 2, 3], index=[3, 2, 1])
result = s.at[1]
self.assertEqual(result, 3)
self.assertRaises(ValueError, lambda: s.at['a'])
df = DataFrame({0: [1, 2, 3]}, index=[3, 2, 1])
result = df.at[1, 0]
self.assertEqual(result, 3)
self.assertRaises(ValueError, lambda: df.at['a', 0])
# GH 13822, incorrect error string with non-unique columns when missing
# column is accessed
df = DataFrame({'x': [1.], 'y': [2.], 'z': [3.]})
df.columns = ['x', 'x', 'z']
# Check that we get the correct value in the KeyError
self.assertRaisesRegexp(KeyError, r"\['y'\] not in index",
lambda: df[['x', 'y', 'z']])
def test_loc_getitem_label_slice(self):
# label slices (with ints)
self.check_result('lab slice', 'loc', slice(1, 3),
'ix', slice(1, 3),
typs=['labels', 'mixed', 'empty', 'ts', 'floats'],
fails=TypeError)
# real label slices
self.check_result('lab slice', 'loc', slice('a', 'c'),
'ix', slice('a', 'c'), typs=['labels'], axes=0)
self.check_result('lab slice', 'loc', slice('A', 'C'),
'ix', slice('A', 'C'), typs=['labels'], axes=1)
self.check_result('lab slice', 'loc', slice('W', 'Z'),
'ix', slice('W', 'Z'), typs=['labels'], axes=2)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=0)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=1, fails=TypeError)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=2, fails=TypeError)
# GH 14316
self.check_result('ts slice rev', 'loc', slice('20130104', '20130102'),
'indexer', [0, 1, 2], typs=['ts_rev'], axes=0)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=0, fails=TypeError)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=1, fails=KeyError)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=2, fails=KeyError)
self.check_result('mixed slice', 'loc', slice(2, 4, 2), 'ix', slice(
2, 4, 2), typs=['mixed'], axes=0, fails=TypeError)
def test_loc_general(self):
df = DataFrame(
np.random.rand(4, 4), columns=['A', 'B', 'C', 'D'],
index=['A', 'B', 'C', 'D'])
# want this to work
result = df.loc[:, "A":"B"].iloc[0:2, :]
self.assertTrue((result.columns == ['A', 'B']).all())
self.assertTrue((result.index == ['A', 'B']).all())
# mixed type
result = DataFrame({'a': [Timestamp('20130101')], 'b': [1]}).iloc[0]
expected = Series([Timestamp('20130101'), 1], index=['a', 'b'], name=0)
tm.assert_series_equal(result, expected)
self.assertEqual(result.dtype, object)
def test_loc_setitem_consistency(self):
# GH 6149
# coerce similary for setitem and loc when rows have a null-slice
expected = DataFrame({'date': Series(0, index=range(5),
dtype=np.int64),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(
range(5), dtype=np.int64)})
df.loc[:, 'date'] = 0
tm.assert_frame_equal(df, expected)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = np.array(0, dtype=np.int64)
tm.assert_frame_equal(df, expected)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = np.array([0, 0, 0, 0, 0], dtype=np.int64)
tm.assert_frame_equal(df, expected)
expected = DataFrame({'date': Series('foo', index=range(5)),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = 'foo'
tm.assert_frame_equal(df, expected)
expected = DataFrame({'date': Series(1.0, index=range(5)),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = 1.0
tm.assert_frame_equal(df, expected)
def test_loc_setitem_consistency_empty(self):
# empty (essentially noops)
expected = DataFrame(columns=['x', 'y'])
expected['x'] = expected['x'].astype(np.int64)
df = DataFrame(columns=['x', 'y'])
df.loc[:, 'x'] = 1
tm.assert_frame_equal(df, expected)
df = DataFrame(columns=['x', 'y'])
df['x'] = 1
tm.assert_frame_equal(df, expected)
def test_loc_setitem_consistency_slice_column_len(self):
# .loc[:,column] setting with slice == len of the column
# GH10408
data = """Level_0,,,Respondent,Respondent,Respondent,OtherCat,OtherCat
Level_1,,,Something,StartDate,EndDate,Yes/No,SomethingElse
Region,Site,RespondentID,,,,,
Region_1,Site_1,3987227376,A,5/25/2015 10:59,5/25/2015 11:22,Yes,
Region_1,Site_1,3980680971,A,5/21/2015 9:40,5/21/2015 9:52,Yes,Yes
Region_1,Site_2,3977723249,A,5/20/2015 8:27,5/20/2015 8:41,Yes,
Region_1,Site_2,3977723089,A,5/20/2015 8:33,5/20/2015 9:09,Yes,No"""
df = pd.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1, 2])
df.loc[:, ('Respondent', 'StartDate')] = pd.to_datetime(df.loc[:, (
'Respondent', 'StartDate')])
df.loc[:, ('Respondent', 'EndDate')] = pd.to_datetime(df.loc[:, (
'Respondent', 'EndDate')])
df.loc[:, ('Respondent', 'Duration')] = df.loc[:, (
'Respondent', 'EndDate')] - df.loc[:, ('Respondent', 'StartDate')]
df.loc[:, ('Respondent', 'Duration')] = df.loc[:, (
'Respondent', 'Duration')].astype('timedelta64[s]')
expected = Series([1380, 720, 840, 2160.], index=df.index,
name=('Respondent', 'Duration'))
tm.assert_series_equal(df[('Respondent', 'Duration')], expected)
def test_loc_setitem_frame(self):
df = self.frame_labels
result = df.iloc[0, 0]
df.loc['a', 'A'] = 1
result = df.loc['a', 'A']
self.assertEqual(result, 1)
result = df.iloc[0, 0]
self.assertEqual(result, 1)
df.loc[:, 'B':'D'] = 0
expected = df.loc[:, 'B':'D']
with catch_warnings(record=True):
result = df.ix[:, 1:]
tm.assert_frame_equal(result, expected)
# GH 6254
# setting issue
df = DataFrame(index=[3, 5, 4], columns=['A'])
df.loc[[4, 3, 5], 'A'] = np.array([1, 2, 3], dtype='int64')
expected = DataFrame(dict(A=Series(
[1, 2, 3], index=[4, 3, 5]))).reindex(index=[3, 5, 4])
tm.assert_frame_equal(df, expected)
# GH 6252
# setting with an empty frame
keys1 = ['@' + str(i) for i in range(5)]
val1 = np.arange(5, dtype='int64')
keys2 = ['@' + str(i) for i in range(4)]
val2 = np.arange(4, dtype='int64')
index = list(set(keys1).union(keys2))
df = DataFrame(index=index)
df['A'] = nan
df.loc[keys1, 'A'] = val1
df['B'] = nan
df.loc[keys2, 'B'] = val2
expected = DataFrame(dict(A=Series(val1, index=keys1), B=Series(
val2, index=keys2))).reindex(index=index)
tm.assert_frame_equal(df, expected)
# GH 8669
# invalid coercion of nan -> int
df = DataFrame({'A': [1, 2, 3], 'B': np.nan})
df.loc[df.B > df.A, 'B'] = df.A
expected = DataFrame({'A': [1, 2, 3], 'B': np.nan})
tm.assert_frame_equal(df, expected)
# GH 6546
# setting with mixed labels
df = DataFrame({1: [1, 2], 2: [3, 4], 'a': ['a', 'b']})
result = df.loc[0, [1, 2]]
expected = Series([1, 3], index=[1, 2], dtype=object, name=0)
tm.assert_series_equal(result, expected)
expected = DataFrame({1: [5, 2], 2: [6, 4], 'a': ['a', 'b']})
df.loc[0, [1, 2]] = [5, 6]
tm.assert_frame_equal(df, expected)
def test_loc_setitem_frame_multiples(self):
# multiple setting
df = DataFrame({'A': ['foo', 'bar', 'baz'],
'B': Series(
range(3), dtype=np.int64)})
rhs = df.loc[1:2]
rhs.index = df.index[0:2]
df.loc[0:1] = rhs
expected = DataFrame({'A': ['bar', 'baz', 'baz'],
'B': Series(
[1, 2, 2], dtype=np.int64)})
tm.assert_frame_equal(df, expected)
# multiple setting with frame on rhs (with M8)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(
range(5), dtype=np.int64)})
expected = DataFrame({'date': [Timestamp('20000101'), Timestamp(
'20000102'), Timestamp('20000101'), Timestamp('20000102'),
Timestamp('20000103')],
'val': Series(
[0, 1, 0, 1, 2], dtype=np.int64)})
rhs = df.loc[0:2]
rhs.index = df.index[2:5]
df.loc[2:4] = rhs
tm.assert_frame_equal(df, expected)
def test_iloc_getitem_frame(self):
df = DataFrame(np.random.randn(10, 4), index=lrange(0, 20, 2),
columns=lrange(0, 8, 2))
result = df.iloc[2]
with catch_warnings(record=True):
exp = df.ix[4]
tm.assert_series_equal(result, exp)
result = df.iloc[2, 2]
with catch_warnings(record=True):
exp = df.ix[4, 4]
self.assertEqual(result, exp)
# slice
result = df.iloc[4:8]
with catch_warnings(record=True):
expected = df.ix[8:14]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 2:3]
with catch_warnings(record=True):
expected = df.ix[:, 4:5]
tm.assert_frame_equal(result, expected)
# list of integers
result = df.iloc[[0, 1, 3]]
with catch_warnings(record=True):
expected = df.ix[[0, 2, 6]]
tm.assert_frame_equal(result, expected)
result = df.iloc[[0, 1, 3], [0, 1]]
with catch_warnings(record=True):
expected = df.ix[[0, 2, 6], [0, 2]]
tm.assert_frame_equal(result, expected)
# neg indicies
result = df.iloc[[-1, 1, 3], [-1, 1]]
with catch_warnings(record=True):
expected = df.ix[[18, 2, 6], [6, 2]]
tm.assert_frame_equal(result, expected)
# dups indicies
result = df.iloc[[-1, -1, 1, 3], [-1, 1]]
with catch_warnings(record=True):
expected = df.ix[[18, 18, 2, 6], [6, 2]]
tm.assert_frame_equal(result, expected)
# with index-like
s = Series(index=lrange(1, 5))
result = df.iloc[s.index]
with catch_warnings(record=True):
expected = df.ix[[2, 4, 6, 8]]
tm.assert_frame_equal(result, expected)
def test_iloc_getitem_labelled_frame(self):
# try with labelled frame
df = DataFrame(np.random.randn(10, 4),
index=list('abcdefghij'), columns=list('ABCD'))
result = df.iloc[1, 1]
exp = df.loc['b', 'B']
self.assertEqual(result, exp)
result = df.iloc[:, 2:3]
expected = df.loc[:, ['C']]
tm.assert_frame_equal(result, expected)
# negative indexing
result = df.iloc[-1, -1]
exp = df.loc['j', 'D']
self.assertEqual(result, exp)
# out-of-bounds exception
self.assertRaises(IndexError, df.iloc.__getitem__, tuple([10, 5]))
# trying to use a label
self.assertRaises(ValueError, df.iloc.__getitem__, tuple(['j', 'D']))
def test_iloc_getitem_doc_issue(self):
# multi axis slicing issue with single block
# surfaced in GH 6059
arr = np.random.randn(6, 4)
index = date_range('20130101', periods=6)
columns = list('ABCD')
df = DataFrame(arr, index=index, columns=columns)
# defines ref_locs
df.describe()
result = df.iloc[3:5, 0:2]
str(result)
result.dtypes
expected = DataFrame(arr[3:5, 0:2], index=index[3:5],
columns=columns[0:2])
tm.assert_frame_equal(result, expected)
# for dups
df.columns = list('aaaa')
result = df.iloc[3:5, 0:2]
str(result)
result.dtypes
expected = DataFrame(arr[3:5, 0:2], index=index[3:5],
columns=list('aa'))
tm.assert_frame_equal(result, expected)
# related
arr = np.random.randn(6, 4)
index = list(range(0, 12, 2))
columns = list(range(0, 8, 2))
df = DataFrame(arr, index=index, columns=columns)
df._data.blocks[0].mgr_locs
result = df.iloc[1:5, 2:4]
str(result)
result.dtypes
expected = DataFrame(arr[1:5, 2:4], index=index[1:5],
columns=columns[2:4])
tm.assert_frame_equal(result, expected)
def test_setitem_ndarray_1d(self):
# GH5508
# len of indexer vs length of the 1d ndarray
df = DataFrame(index=Index(lrange(1, 11)))
df['foo'] = np.zeros(10, dtype=np.float64)
df['bar'] = np.zeros(10, dtype=np.complex)
# invalid
def f():
with catch_warnings(record=True):
df.ix[2:5, 'bar'] = np.array([2.33j, 1.23 + 0.1j, 2.2])
self.assertRaises(ValueError, f)
def f():
df.loc[df.index[2:5], 'bar'] = np.array([2.33j, 1.23 + 0.1j,
2.2, 1.0])
self.assertRaises(ValueError, f)
# valid
df.loc[df.index[2:6], 'bar'] = np.array([2.33j, 1.23 + 0.1j,
2.2, 1.0])
result = df.loc[df.index[2:6], 'bar']
expected = Series([2.33j, 1.23 + 0.1j, 2.2, 1.0], index=[3, 4, 5, 6],
name='bar')
tm.assert_series_equal(result, expected)
# dtype getting changed?
df = DataFrame(index=Index(lrange(1, 11)))
df['foo'] = np.zeros(10, dtype=np.float64)
df['bar'] = np.zeros(10, dtype=np.complex)
def f():
df[2:5] = np.arange(1, 4) * 1j
self.assertRaises(ValueError, f)
def test_iloc_setitem_series(self):
df = DataFrame(np.random.randn(10, 4), index=list('abcdefghij'),
columns=list('ABCD'))
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
self.assertEqual(result, 1)
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
s.iloc[1] = 1
result = s.iloc[1]
self.assertEqual(result, 1)
s.iloc[:4] = 0
expected = s.iloc[:4]
result = s.iloc[:4]
tm.assert_series_equal(result, expected)
s = Series([-1] * 6)
s.iloc[0::2] = [0, 2, 4]
s.iloc[1::2] = [1, 3, 5]
result = s
expected = Series([0, 1, 2, 3, 4, 5])
tm.assert_series_equal(result, expected)
def test_iloc_setitem_list_of_lists(self):
# GH 7551
# list-of-list is set incorrectly in mixed vs. single dtyped frames
df = DataFrame(dict(A=np.arange(5, dtype='int64'),
B=np.arange(5, 10, dtype='int64')))
df.iloc[2:4] = [[10, 11], [12, 13]]
expected = DataFrame(dict(A=[0, 1, 10, 12, 4], B=[5, 6, 11, 13, 9]))
tm.assert_frame_equal(df, expected)
df = DataFrame(
dict(A=list('abcde'), B=np.arange(5, 10, dtype='int64')))
df.iloc[2:4] = [['x', 11], ['y', 13]]
expected = DataFrame(dict(A=['a', 'b', 'x', 'y', 'e'],
B=[5, 6, 11, 13, 9]))
tm.assert_frame_equal(df, expected)
def test_ix_general(self):
# ix general issues
# GH 2817
data = {'amount': {0: 700, 1: 600, 2: 222, 3: 333, 4: 444},
'col': {0: 3.5, 1: 3.5, 2: 4.0, 3: 4.0, 4: 4.0},
'year': {0: 2012, 1: 2011, 2: 2012, 3: 2012, 4: 2012}}
df = DataFrame(data).set_index(keys=['col', 'year'])
key = 4.0, 2012
# emits a PerformanceWarning, ok
with self.assert_produces_warning(PerformanceWarning):
tm.assert_frame_equal(df.loc[key], df.iloc[2:])
# this is ok
df.sort_index(inplace=True)
res = df.loc[key]
# col has float dtype, result should be Float64Index
index = MultiIndex.from_arrays([[4.] * 3, [2012] * 3],
names=['col', 'year'])
expected = DataFrame({'amount': [222, 333, 444]}, index=index)
tm.assert_frame_equal(res, expected)
def test_ix_weird_slicing(self):
# http://stackoverflow.com/q/17056560/1240268
df = DataFrame({'one': [1, 2, 3, np.nan, np.nan],
'two': [1, 2, 3, 4, 5]})
df.loc[df['one'] > 1, 'two'] = -df['two']
expected = DataFrame({'one': {0: 1.0,
1: 2.0,
2: 3.0,
3: nan,
4: nan},
'two': {0: 1,
1: -2,
2: -3,
3: 4,
4: 5}})
tm.assert_frame_equal(df, expected)
def test_loc_coerceion(self):
# 12411
df = DataFrame({'date': [pd.Timestamp('20130101').tz_localize('UTC'),
pd.NaT]})
expected = df.dtypes
result = df.iloc[[0]]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[[1]]
tm.assert_series_equal(result.dtypes, expected)
# 12045
import datetime
df = DataFrame({'date': [datetime.datetime(2012, 1, 1),
datetime.datetime(1012, 1, 2)]})
expected = df.dtypes
result = df.iloc[[0]]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[[1]]
tm.assert_series_equal(result.dtypes, expected)
# 11594
df = DataFrame({'text': ['some words'] + [None] * 9})
expected = df.dtypes
result = df.iloc[0:2]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[3:]
tm.assert_series_equal(result.dtypes, expected)
def test_setitem_dtype_upcast(self):
# GH3216
df = DataFrame([{"a": 1}, {"a": 3, "b": 2}])
df['c'] = np.nan
self.assertEqual(df['c'].dtype, np.float64)
df.loc[0, 'c'] = 'foo'
expected = DataFrame([{"a": 1, "c": 'foo'},
{"a": 3, "b": 2, "c": np.nan}])
tm.assert_frame_equal(df, expected)
# GH10280
df = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=list('ab'),
columns=['foo', 'bar', 'baz'])
for val in [3.14, 'wxyz']:
left = df.copy()
left.loc['a', 'bar'] = val
right = DataFrame([[0, val, 2], [3, 4, 5]], index=list('ab'),
columns=['foo', 'bar', 'baz'])
tm.assert_frame_equal(left, right)
self.assertTrue(is_integer_dtype(left['foo']))
self.assertTrue(is_integer_dtype(left['baz']))
left = DataFrame(np.arange(6, dtype='int64').reshape(2, 3) / 10.0,
index=list('ab'),
columns=['foo', 'bar', 'baz'])
left.loc['a', 'bar'] = 'wxyz'
right = DataFrame([[0, 'wxyz', .2], [.3, .4, .5]], index=list('ab'),
columns=['foo', 'bar', 'baz'])
tm.assert_frame_equal(left, right)
self.assertTrue(is_float_dtype(left['foo']))
self.assertTrue(is_float_dtype(left['baz']))
def test_setitem_iloc(self):
# setitem with an iloc list
df = DataFrame(np.arange(9).reshape((3, 3)), index=["A", "B", "C"],
columns=["A", "B", "C"])
df.iloc[[0, 1], [1, 2]]
df.iloc[[0, 1], [1, 2]] += 100
expected = DataFrame(
np.array([0, 101, 102, 3, 104, 105, 6, 7, 8]).reshape((3, 3)),
index=["A", "B", "C"], columns=["A", "B", "C"])
tm.assert_frame_equal(df, expected)
def test_dups_fancy_indexing(self):
# GH 3455
from pandas.util.testing import makeCustomDataframe as mkdf
df = mkdf(10, 3)
df.columns = ['a', 'a', 'b']
result = df[['b', 'a']].columns
expected = Index(['b', 'a', 'a'])
self.assert_index_equal(result, expected)
# across dtypes
df = DataFrame([[1, 2, 1., 2., 3., 'foo', 'bar']],
columns=list('aaaaaaa'))
df.head()
str(df)
result = DataFrame([[1, 2, 1., 2., 3., 'foo', 'bar']])
result.columns = list('aaaaaaa')
# TODO(wesm): unused?
df_v = df.iloc[:, 4] # noqa
res_v = result.iloc[:, 4] # noqa
tm.assert_frame_equal(df, result)
# GH 3561, dups not in selected order
df = DataFrame(
{'test': [5, 7, 9, 11],
'test1': [4., 5, 6, 7],
'other': list('abcd')}, index=['A', 'A', 'B', 'C'])
rows = ['C', 'B']
expected = DataFrame(
{'test': [11, 9],
'test1': [7., 6],
'other': ['d', 'c']}, index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
result = df.loc[Index(rows)]
tm.assert_frame_equal(result, expected)
rows = ['C', 'B', 'E']
expected = DataFrame(
{'test': [11, 9, np.nan],
'test1': [7., 6, np.nan],
'other': ['d', 'c', np.nan]}, index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
# see GH5553, make sure we use the right indexer
rows = ['F', 'G', 'H', 'C', 'B', 'E']
expected = DataFrame({'test': [np.nan, np.nan, np.nan, 11, 9, np.nan],
'test1': [np.nan, np.nan, np.nan, 7., 6, np.nan],
'other': [np.nan, np.nan, np.nan,
'd', 'c', np.nan]},
index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
# inconsistent returns for unique/duplicate indices when values are
# missing
df = DataFrame(randn(4, 3), index=list('ABCD'))
expected = df.ix[['E']]
dfnu = DataFrame(randn(5, 3), index=list('AABCD'))
result = dfnu.ix[['E']]
tm.assert_frame_equal(result, expected)
# ToDo: check_index_type can be True after GH 11497
# GH 4619; duplicate indexer with missing label
df = DataFrame({"A": [0, 1, 2]})
result = df.ix[[0, 8, 0]]
expected = DataFrame({"A": [0, np.nan, 0]}, index=[0, 8, 0])
tm.assert_frame_equal(result, expected, check_index_type=False)
df = DataFrame({"A": list('abc')})
result = df.ix[[0, 8, 0]]
expected = DataFrame({"A": ['a', np.nan, 'a']}, index=[0, 8, 0])
tm.assert_frame_equal(result, expected, check_index_type=False)
# non unique with non unique selector
df = DataFrame({'test': [5, 7, 9, 11]}, index=['A', 'A', 'B', 'C'])
expected = DataFrame(
{'test': [5, 7, 5, 7, np.nan]}, index=['A', 'A', 'A', 'A', 'E'])
result = df.ix[['A', 'A', 'E']]
tm.assert_frame_equal(result, expected)
# GH 5835
# dups on index and missing values
df = DataFrame(
np.random.randn(5, 5), columns=['A', 'B', 'B', 'B', 'A'])
expected = pd.concat(
[df.ix[:, ['A', 'B']], DataFrame(np.nan, columns=['C'],
index=df.index)], axis=1)
result = df.ix[:, ['A', 'B', 'C']]
tm.assert_frame_equal(result, expected)
# GH 6504, multi-axis indexing
df = DataFrame(np.random.randn(9, 2),
index=[1, 1, 1, 2, 2, 2, 3, 3, 3], columns=['a', 'b'])
expected = df.iloc[0:6]
result = df.loc[[1, 2]]
tm.assert_frame_equal(result, expected)
expected = df
result = df.loc[:, ['a', 'b']]
tm.assert_frame_equal(result, expected)
expected = df.iloc[0:6, :]
result = df.loc[[1, 2], ['a', 'b']]
tm.assert_frame_equal(result, expected)
def test_indexing_mixed_frame_bug(self):
# GH3492
df = DataFrame({'a': {1: 'aaa', 2: 'bbb', 3: 'ccc'},
'b': {1: 111, 2: 222, 3: 333}})
# this works, new column is created correctly
df['test'] = df['a'].apply(lambda x: '_' if x == 'aaa' else x)
# this does not work, ie column test is not changed
idx = df['test'] == '_'
temp = df.ix[idx, 'a'].apply(lambda x: '-----' if x == 'aaa' else x)
df.ix[idx, 'test'] = temp
self.assertEqual(df.iloc[0, 2], '-----')
# if I look at df, then element [0,2] equals '_'. If instead I type
# df.ix[idx,'test'], I get '-----', finally by typing df.iloc[0,2] I
# get '_'.
def test_multitype_list_index_access(self):
# GH 10610
df = pd.DataFrame(np.random.random((10, 5)),
columns=["a"] + [20, 21, 22, 23])
with self.assertRaises(KeyError):
df[[22, 26, -8]]
self.assertEqual(df[21].shape[0], df.shape[0])
def test_set_index_nan(self):
# GH 3586
df = DataFrame({'PRuid': {17: 'nonQC',
18: 'nonQC',
19: 'nonQC',
20: '10',
21: '11',
22: '12',
23: '13',
24: '24',
25: '35',
26: '46',
27: '47',
28: '48',
29: '59',
30: '10'},
'QC': {17: 0.0,
18: 0.0,
19: 0.0,
20: nan,
21: nan,
22: nan,
23: nan,
24: 1.0,
25: nan,
26: nan,
27: nan,
28: nan,
29: nan,
30: nan},
'data': {17: 7.9544899999999998,
18: 8.0142609999999994,
19: 7.8591520000000008,
20: 0.86140349999999999,
21: 0.87853110000000001,
22: 0.8427041999999999,
23: 0.78587700000000005,
24: 0.73062459999999996,
25: 0.81668560000000001,
26: 0.81927080000000008,
27: 0.80705009999999999,
28: 0.81440240000000008,
29: 0.80140849999999997,
30: 0.81307740000000006},
'year': {17: 2006,
18: 2007,
19: 2008,
20: 1985,
21: 1985,
22: 1985,
23: 1985,
24: 1985,
25: 1985,
26: 1985,
27: 1985,
28: 1985,
29: 1985,
30: 1986}}).reset_index()
result = df.set_index(['year', 'PRuid', 'QC']).reset_index().reindex(
columns=df.columns)
tm.assert_frame_equal(result, df)
def test_multi_nan_indexing(self):
# GH 3588
df = DataFrame({"a": ['R1', 'R2', np.nan, 'R4'],
'b': ["C1", "C2", "C3", "C4"],
"c": [10, 15, np.nan, 20]})
result = df.set_index(['a', 'b'], drop=False)
expected = DataFrame({"a": ['R1', 'R2', np.nan, 'R4'],
'b': ["C1", "C2", "C3", "C4"],
"c": [10, 15, np.nan, 20]},
index=[Index(['R1', 'R2', np.nan, 'R4'],
name='a'),
Index(['C1', 'C2', 'C3', 'C4'], name='b')])
tm.assert_frame_equal(result, expected)
def test_multi_assign(self):
# GH 3626, an assignement of a sub-df to a df
df = DataFrame({'FC': ['a', 'b', 'a', 'b', 'a', 'b'],
'PF': [0, 0, 0, 0, 1, 1],
'col1': lrange(6),
'col2': lrange(6, 12)})
df.ix[1, 0] = np.nan
df2 = df.copy()
mask = ~df2.FC.isnull()
cols = ['col1', 'col2']
dft = df2 * 2
dft.ix[3, 3] = np.nan
expected = DataFrame({'FC': ['a', np.nan, 'a', 'b', 'a', 'b'],
'PF': [0, 0, 0, 0, 1, 1],
'col1': Series([0, 1, 4, 6, 8, 10]),
'col2': [12, 7, 16, np.nan, 20, 22]})
# frame on rhs
df2.ix[mask, cols] = dft.ix[mask, cols]
tm.assert_frame_equal(df2, expected)
df2.ix[mask, cols] = dft.ix[mask, cols]
tm.assert_frame_equal(df2, expected)
# with an ndarray on rhs
df2 = df.copy()
df2.ix[mask, cols] = dft.ix[mask, cols].values
tm.assert_frame_equal(df2, expected)
df2.ix[mask, cols] = dft.ix[mask, cols].values
tm.assert_frame_equal(df2, expected)
# broadcasting on the rhs is required
df = DataFrame(dict(A=[1, 2, 0, 0, 0], B=[0, 0, 0, 10, 11], C=[
0, 0, 0, 10, 11], D=[3, 4, 5, 6, 7]))
expected = df.copy()
mask = expected['A'] == 0
for col in ['A', 'B']:
expected.loc[mask, col] = df['D']
df.loc[df['A'] == 0, ['A', 'B']] = df['D']
tm.assert_frame_equal(df, expected)
def test_ix_assign_column_mixed(self):
# GH #1142
df = DataFrame(tm.getSeriesData())
df['foo'] = 'bar'
orig = df.ix[:, 'B'].copy()
df.ix[:, 'B'] = df.ix[:, 'B'] + 1
tm.assert_series_equal(df.B, orig + 1)
# GH 3668, mixed frame with series value
df = DataFrame({'x': lrange(10), 'y': lrange(10, 20), 'z': 'bar'})
expected = df.copy()
for i in range(5):
indexer = i * 2
v = 1000 + i * 200
expected.ix[indexer, 'y'] = v
self.assertEqual(expected.ix[indexer, 'y'], v)
df.ix[df.x % 2 == 0, 'y'] = df.ix[df.x % 2 == 0, 'y'] * 100
tm.assert_frame_equal(df, expected)
# GH 4508, making sure consistency of assignments
df = DataFrame({'a': [1, 2, 3], 'b': [0, 1, 2]})
df.ix[[0, 2, ], 'b'] = [100, -100]
expected = DataFrame({'a': [1, 2, 3], 'b': [100, 1, -100]})
tm.assert_frame_equal(df, expected)
df = pd.DataFrame({'a': lrange(4)})
df['b'] = np.nan
df.ix[[1, 3], 'b'] = [100, -100]
expected = DataFrame({'a': [0, 1, 2, 3],
'b': [np.nan, 100, np.nan, -100]})
tm.assert_frame_equal(df, expected)
# ok, but chained assignments are dangerous
# if we turn off chained assignement it will work
with option_context('chained_assignment', None):
df = pd.DataFrame({'a': lrange(4)})
df['b'] = np.nan
df['b'].ix[[1, 3]] = [100, -100]
tm.assert_frame_equal(df, expected)
def test_ix_get_set_consistency(self):
# GH 4544
# ix/loc get/set not consistent when
# a mixed int/string index
df = DataFrame(np.arange(16).reshape((4, 4)),
columns=['a', 'b', 8, 'c'],
index=['e', 7, 'f', 'g'])
self.assertEqual(df.ix['e', 8], 2)
self.assertEqual(df.loc['e', 8], 2)
df.ix['e', 8] = 42
self.assertEqual(df.ix['e', 8], 42)
self.assertEqual(df.loc['e', 8], 42)
df.loc['e', 8] = 45
self.assertEqual(df.ix['e', 8], 45)
self.assertEqual(df.loc['e', 8], 45)
def test_setitem_list(self):
# GH 6043
# ix with a list
df = DataFrame(index=[0, 1], columns=[0])
df.ix[1, 0] = [1, 2, 3]
df.ix[1, 0] = [1, 2]
result = DataFrame(index=[0, 1], columns=[0])
result.ix[1, 0] = [1, 2]
tm.assert_frame_equal(result, df)
# ix with an object
class TO(object):
def __init__(self, value):
self.value = value
def __str__(self):
return "[{0}]".format(self.value)
__repr__ = __str__
def __eq__(self, other):
return self.value == other.value
def view(self):
return self
df = DataFrame(index=[0, 1], columns=[0])
df.ix[1, 0] = TO(1)
df.ix[1, 0] = TO(2)
result = DataFrame(index=[0, 1], columns=[0])
result.ix[1, 0] = TO(2)
tm.assert_frame_equal(result, df)
# remains object dtype even after setting it back
df = DataFrame(index=[0, 1], columns=[0])
df.ix[1, 0] = TO(1)
df.ix[1, 0] = np.nan
result = DataFrame(index=[0, 1], columns=[0])
tm.assert_frame_equal(result, df)
def test_iloc_mask(self):
# GH 3631, iloc with a mask (of a series) should raise
df = DataFrame(lrange(5), list('ABCDE'), columns=['a'])
mask = (df.a % 2 == 0)
self.assertRaises(ValueError, df.iloc.__getitem__, tuple([mask]))
mask.index = lrange(len(mask))
self.assertRaises(NotImplementedError, df.iloc.__getitem__,
tuple([mask]))
# ndarray ok
result = df.iloc[np.array([True] * len(mask), dtype=bool)]
tm.assert_frame_equal(result, df)
# the possibilities
locs = np.arange(4)
nums = 2 ** locs
reps = lmap(bin, nums)
df = DataFrame({'locs': locs, 'nums': nums}, reps)
expected = {
(None, ''): '0b1100',
(None, '.loc'): '0b1100',
(None, '.iloc'): '0b1100',
('index', ''): '0b11',
('index', '.loc'): '0b11',
('index', '.iloc'): ('iLocation based boolean indexing '
'cannot use an indexable as a mask'),
('locs', ''): 'Unalignable boolean Series provided as indexer '
'(index of the boolean Series and of the indexed '
'object do not match',
('locs', '.loc'): 'Unalignable boolean Series provided as indexer '
'(index of the boolean Series and of the '
'indexed object do not match',
('locs', '.iloc'): ('iLocation based boolean indexing on an '
'integer type is not available'),
}
# UserWarnings from reindex of a boolean mask
with warnings.catch_warnings(record=True):
result = dict()
for idx in [None, 'index', 'locs']:
mask = (df.nums > 2).values
if idx:
mask = Series(mask, list(reversed(getattr(df, idx))))
for method in ['', '.loc', '.iloc']:
try:
if method:
accessor = getattr(df, method[1:])
else:
accessor = df
ans = str(bin(accessor[mask]['nums'].sum()))
except Exception as e:
ans = str(e)
key = tuple([idx, method])
r = expected.get(key)
if r != ans:
raise AssertionError(
"[%s] does not match [%s], received [%s]"
% (key, ans, r))
def test_ix_slicing_strings(self):
# GH3836
data = {'Classification':
['SA EQUITY CFD', 'bbb', 'SA EQUITY', 'SA SSF', 'aaa'],
'Random': [1, 2, 3, 4, 5],
'X': ['correct', 'wrong', 'correct', 'correct', 'wrong']}
df = DataFrame(data)
x = df[~df.Classification.isin(['SA EQUITY CFD', 'SA EQUITY', 'SA SSF'
])]
df.ix[x.index, 'X'] = df['Classification']
expected = DataFrame({'Classification': {0: 'SA EQUITY CFD',
1: 'bbb',
2: 'SA EQUITY',
3: 'SA SSF',
4: 'aaa'},
'Random': {0: 1,
1: 2,
2: 3,
3: 4,
4: 5},
'X': {0: 'correct',
1: 'bbb',
2: 'correct',
3: 'correct',
4: 'aaa'}}) # bug was 4: 'bbb'
tm.assert_frame_equal(df, expected)
def test_non_unique_loc(self):
# GH3659
# non-unique indexer with loc slice
# https://groups.google.com/forum/?fromgroups#!topic/pydata/zTm2No0crYs
# these are going to raise becuase the we are non monotonic
df = DataFrame({'A': [1, 2, 3, 4, 5, 6],
'B': [3, 4, 5, 6, 7, 8]}, index=[0, 1, 0, 1, 2, 3])
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([slice(1, None)]))
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([slice(0, None)]))
self.assertRaises(KeyError, df.loc.__getitem__, tuple([slice(1, 2)]))
# monotonic are ok
df = DataFrame({'A': [1, 2, 3, 4, 5, 6],
'B': [3, 4, 5, 6, 7, 8]},
index=[0, 1, 0, 1, 2, 3]).sort_index(axis=0)
result = df.loc[1:]
expected = DataFrame({'A': [2, 4, 5, 6], 'B': [4, 6, 7, 8]},
index=[1, 1, 2, 3])
tm.assert_frame_equal(result, expected)
result = df.loc[0:]
tm.assert_frame_equal(result, df)
result = df.loc[1:2]
expected = DataFrame({'A': [2, 4, 5], 'B': [4, 6, 7]},
index=[1, 1, 2])
tm.assert_frame_equal(result, expected)
def test_loc_name(self):
# GH 3880
df = DataFrame([[1, 1], [1, 1]])
df.index.name = 'index_name'
result = df.iloc[[0, 1]].index.name
self.assertEqual(result, 'index_name')
result = df.ix[[0, 1]].index.name
self.assertEqual(result, 'index_name')
result = df.loc[[0, 1]].index.name
self.assertEqual(result, 'index_name')
def test_iloc_non_unique_indexing(self):
# GH 4017, non-unique indexing (on the axis)
df = DataFrame({'A': [0.1] * 3000, 'B': [1] * 3000})
idx = np.array(lrange(30)) * 99
expected = df.iloc[idx]
df3 = pd.concat([df, 2 * df, 3 * df])
result = df3.iloc[idx]
tm.assert_frame_equal(result, expected)
df2 = DataFrame({'A': [0.1] * 1000, 'B': [1] * 1000})
df2 = pd.concat([df2, 2 * df2, 3 * df2])
sidx = df2.index.to_series()
expected = df2.iloc[idx[idx <= sidx.max()]]
new_list = []
for r, s in expected.iterrows():
new_list.append(s)
new_list.append(s * 2)
new_list.append(s * 3)
expected = DataFrame(new_list)
expected = pd.concat([expected, DataFrame(index=idx[idx > sidx.max()])
])
result = df2.loc[idx]
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_string_slice(self):
# GH 14424
# string indexing against datetimelike with object
# dtype should properly raises KeyError
df = pd.DataFrame([1], pd.Index([pd.Timestamp('2011-01-01')],
dtype=object))
self.assertTrue(df.index.is_all_dates)
with tm.assertRaises(KeyError):
df['2011']
with tm.assertRaises(KeyError):
df.loc['2011', 0]
df = pd.DataFrame()
self.assertFalse(df.index.is_all_dates)
with tm.assertRaises(KeyError):
df['2011']
with tm.assertRaises(KeyError):
df.loc['2011', 0]
def test_mi_access(self):
# GH 4145
data = """h1 main h3 sub h5
0 a A 1 A1 1
1 b B 2 B1 2
2 c B 3 A1 3
3 d A 4 B2 4
4 e A 5 B2 5
5 f B 6 A2 6
"""
df = pd.read_csv(StringIO(data), sep=r'\s+', index_col=0)
df2 = df.set_index(['main', 'sub']).T.sort_index(1)
index = Index(['h1', 'h3', 'h5'])
columns = MultiIndex.from_tuples([('A', 'A1')], names=['main', 'sub'])
expected = DataFrame([['a', 1, 1]], index=columns, columns=index).T
result = df2.loc[:, ('A', 'A1')]
tm.assert_frame_equal(result, expected)
result = df2[('A', 'A1')]
tm.assert_frame_equal(result, expected)
# GH 4146, not returning a block manager when selecting a unique index
# from a duplicate index
# as of 4879, this returns a Series (which is similar to what happens
# with a non-unique)
expected = Series(['a', 1, 1], index=['h1', 'h3', 'h5'], name='A1')
result = df2['A']['A1']
tm.assert_series_equal(result, expected)
# selecting a non_unique from the 2nd level
expected = DataFrame([['d', 4, 4], ['e', 5, 5]],
index=Index(['B2', 'B2'], name='sub'),
columns=['h1', 'h3', 'h5'], ).T
result = df2['A']['B2']
tm.assert_frame_equal(result, expected)
def test_non_unique_loc_memory_error(self):
# GH 4280
# non_unique index with a large selection triggers a memory error
columns = list('ABCDEFG')
def gen_test(l, l2):
return pd.concat([DataFrame(randn(l, len(columns)),
index=lrange(l), columns=columns),
DataFrame(np.ones((l2, len(columns))),
index=[0] * l2, columns=columns)])
def gen_expected(df, mask):
l = len(mask)
return pd.concat([df.take([0], convert=False),
DataFrame(np.ones((l, len(columns))),
index=[0] * l,
columns=columns),
df.take(mask[1:], convert=False)])
df = gen_test(900, 100)
self.assertFalse(df.index.is_unique)
mask = np.arange(100)
result = df.loc[mask]
expected = gen_expected(df, mask)
tm.assert_frame_equal(result, expected)
df = gen_test(900000, 100000)
self.assertFalse(df.index.is_unique)
mask = np.arange(100000)
result = df.loc[mask]
expected = gen_expected(df, mask)
tm.assert_frame_equal(result, expected)
def test_astype_assignment(self):
# GH4312 (iloc)
df_orig = DataFrame([['1', '2', '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
df = df_orig.copy()
df.iloc[:, 0:2] = df.iloc[:, 0:2].astype(np.int64)
expected = DataFrame([[1, 2, '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.iloc[:, 0:2] = df.iloc[:, 0:2]._convert(datetime=True, numeric=True)
expected = DataFrame([[1, 2, '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
# GH5702 (loc)
df = df_orig.copy()
df.loc[:, 'A'] = df.loc[:, 'A'].astype(np.int64)
expected = DataFrame([[1, '2', '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc[:, ['B', 'C']] = df.loc[:, ['B', 'C']].astype(np.int64)
expected = DataFrame([['1', 2, 3, '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
# full replacements / no nans
df = DataFrame({'A': [1., 2., 3., 4.]})
df.iloc[:, 0] = df['A'].astype(np.int64)
expected = DataFrame({'A': [1, 2, 3, 4]})
tm.assert_frame_equal(df, expected)
df = DataFrame({'A': [1., 2., 3., 4.]})
df.loc[:, 'A'] = df['A'].astype(np.int64)
expected = DataFrame({'A': [1, 2, 3, 4]})
tm.assert_frame_equal(df, expected)
def test_astype_assignment_with_dups(self):
# GH 4686
# assignment with dups that has a dtype change
cols = pd.MultiIndex.from_tuples([('A', '1'), ('B', '1'), ('A', '2')])
df = DataFrame(np.arange(3).reshape((1, 3)),
columns=cols, dtype=object)
index = df.index.copy()
df['A'] = df['A'].astype(np.float64)
self.assert_index_equal(df.index, index)
# TODO(wesm): unused variables
# result = df.get_dtype_counts().sort_index()
# expected = Series({'float64': 2, 'object': 1}).sort_index()
def test_dups_loc(self):
# GH4726
# dup indexing with iloc/loc
df = DataFrame([[1, 2, 'foo', 'bar', Timestamp('20130101')]],
columns=['a', 'a', 'a', 'a', 'a'], index=[1])
expected = Series([1, 2, 'foo', 'bar', Timestamp('20130101')],
index=['a', 'a', 'a', 'a', 'a'], name=1)
result = df.iloc[0]
tm.assert_series_equal(result, expected)
result = df.loc[1]
tm.assert_series_equal(result, expected)
def test_partial_setting(self):
# GH2578, allow ix and friends to partially set
# series
s_orig = Series([1, 2, 3])
s = s_orig.copy()
s[5] = 5
expected = Series([1, 2, 3, 5], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s.loc[5] = 5
expected = Series([1, 2, 3, 5], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s[5] = 5.
expected = Series([1, 2, 3, 5.], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s.loc[5] = 5.
expected = Series([1, 2, 3, 5.], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
# iloc/iat raise
s = s_orig.copy()
def f():
s.iloc[3] = 5.
self.assertRaises(IndexError, f)
def f():
s.iat[3] = 5.
self.assertRaises(IndexError, f)
# ## frame ##
df_orig = DataFrame(
np.arange(6).reshape(3, 2), columns=['A', 'B'], dtype='int64')
# iloc/iat raise
df = df_orig.copy()
def f():
df.iloc[4, 2] = 5.
self.assertRaises(IndexError, f)
def f():
df.iat[4, 2] = 5.
self.assertRaises(IndexError, f)
# row setting where it exists
expected = DataFrame(dict({'A': [0, 4, 4], 'B': [1, 5, 5]}))
df = df_orig.copy()
df.iloc[1] = df.iloc[2]
tm.assert_frame_equal(df, expected)
expected = DataFrame(dict({'A': [0, 4, 4], 'B': [1, 5, 5]}))
df = df_orig.copy()
df.loc[1] = df.loc[2]
tm.assert_frame_equal(df, expected)
# like 2578, partial setting with dtype preservation
expected = DataFrame(dict({'A': [0, 2, 4, 4], 'B': [1, 3, 5, 5]}))
df = df_orig.copy()
df.loc[3] = df.loc[2]
tm.assert_frame_equal(df, expected)
# single dtype frame, overwrite
expected = DataFrame(dict({'A': [0, 2, 4], 'B': [0, 2, 4]}))
df = df_orig.copy()
df.ix[:, 'B'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# mixed dtype frame, overwrite
expected = DataFrame(dict({'A': [0, 2, 4], 'B': Series([0, 2, 4])}))
df = df_orig.copy()
df['B'] = df['B'].astype(np.float64)
df.ix[:, 'B'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# single dtype frame, partial setting
expected = df_orig.copy()
expected['C'] = df['A']
df = df_orig.copy()
df.ix[:, 'C'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# mixed frame, partial setting
expected = df_orig.copy()
expected['C'] = df['A']
df = df_orig.copy()
df.ix[:, 'C'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# ## panel ##
p_orig = Panel(np.arange(16).reshape(2, 4, 2),
items=['Item1', 'Item2'],
major_axis=pd.date_range('2001/1/12', periods=4),
minor_axis=['A', 'B'], dtype='float64')
# panel setting via item
p_orig = Panel(np.arange(16).reshape(2, 4, 2),
items=['Item1', 'Item2'],
major_axis=pd.date_range('2001/1/12', periods=4),
minor_axis=['A', 'B'], dtype='float64')
expected = p_orig.copy()
expected['Item3'] = expected['Item1']
p = p_orig.copy()
p.loc['Item3'] = p['Item1']
tm.assert_panel_equal(p, expected)
# panel with aligned series
expected = p_orig.copy()
expected = expected.transpose(2, 1, 0)
expected['C'] = DataFrame({'Item1': [30, 30, 30, 30],
'Item2': [32, 32, 32, 32]},
index=p_orig.major_axis)
expected = expected.transpose(2, 1, 0)
p = p_orig.copy()
p.loc[:, :, 'C'] = Series([30, 32], index=p_orig.items)
tm.assert_panel_equal(p, expected)
# GH 8473
dates = date_range('1/1/2000', periods=8)
df_orig = DataFrame(np.random.randn(8, 4), index=dates,
columns=['A', 'B', 'C', 'D'])
expected = pd.concat([df_orig, DataFrame(
{'A': 7}, index=[dates[-1] + 1])])
df = df_orig.copy()
df.loc[dates[-1] + 1, 'A'] = 7
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.at[dates[-1] + 1, 'A'] = 7
tm.assert_frame_equal(df, expected)
exp_other = DataFrame({0: 7}, index=[dates[-1] + 1])
expected = pd.concat([df_orig, exp_other], axis=1)
df = df_orig.copy()
df.loc[dates[-1] + 1, 0] = 7
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.at[dates[-1] + 1, 0] = 7
tm.assert_frame_equal(df, expected)
def test_partial_setting_mixed_dtype(self):
# in a mixed dtype environment, try to preserve dtypes
# by appending
df = DataFrame([[True, 1], [False, 2]], columns=["female", "fitness"])
s = df.loc[1].copy()
s.name = 2
expected = df.append(s)
df.loc[2] = df.loc[1]
tm.assert_frame_equal(df, expected)
# columns will align
df = DataFrame(columns=['A', 'B'])
df.loc[0] = Series(1, index=range(4))
tm.assert_frame_equal(df, DataFrame(columns=['A', 'B'], index=[0]))
# columns will align
df = DataFrame(columns=['A', 'B'])
df.loc[0] = Series(1, index=['B'])
exp = DataFrame([[np.nan, 1]], columns=['A', 'B'],
index=[0], dtype='float64')
tm.assert_frame_equal(df, exp)
# list-like must conform
df = DataFrame(columns=['A', 'B'])
def f():
df.loc[0] = [1, 2, 3]
self.assertRaises(ValueError, f)
# these are coerced to float unavoidably (as its a list-like to begin)
df = DataFrame(columns=['A', 'B'])
df.loc[3] = [6, 7]
exp = DataFrame([[6, 7]], index=[3], columns=['A', 'B'],
dtype='float64')
tm.assert_frame_equal(df, exp)
def test_series_partial_set(self):
# partial set with new index
# Regression from GH4825
ser = Series([0.1, 0.2], index=[1, 2])
# loc
expected = Series([np.nan, 0.2, np.nan], index=[3, 2, 3])
result = ser.loc[[3, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([np.nan, 0.2, np.nan, np.nan], index=[3, 2, 3, 'x'])
result = ser.loc[[3, 2, 3, 'x']]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.2, 0.2, 0.1], index=[2, 2, 1])
result = ser.loc[[2, 2, 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.2, 0.2, np.nan, 0.1], index=[2, 2, 'x', 1])
result = ser.loc[[2, 2, 'x', 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
# raises as nothing in in the index
self.assertRaises(KeyError, lambda: ser.loc[[3, 3, 3]])
expected = Series([0.2, 0.2, np.nan], index=[2, 2, 3])
result = ser.loc[[2, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.3, np.nan, np.nan], index=[3, 4, 4])
result = Series([0.1, 0.2, 0.3], index=[1, 2, 3]).loc[[3, 4, 4]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([np.nan, 0.3, 0.3], index=[5, 3, 3])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[1, 2, 3, 4]).loc[[5, 3, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([np.nan, 0.4, 0.4], index=[5, 4, 4])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[1, 2, 3, 4]).loc[[5, 4, 4]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.4, np.nan, np.nan], index=[7, 2, 2])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[4, 5, 6, 7]).loc[[7, 2, 2]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.4, np.nan, np.nan], index=[4, 5, 5])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[1, 2, 3, 4]).loc[[4, 5, 5]]
tm.assert_series_equal(result, expected, check_index_type=True)
# iloc
expected = Series([0.2, 0.2, 0.1, 0.1], index=[2, 2, 1, 1])
result = ser.iloc[[1, 1, 0, 0]]
tm.assert_series_equal(result, expected, check_index_type=True)
def test_series_partial_set_with_name(self):
# GH 11497
idx = Index([1, 2], dtype='int64', name='idx')
ser = Series([0.1, 0.2], index=idx, name='s')
# loc
exp_idx = Index([3, 2, 3], dtype='int64', name='idx')
expected = Series([np.nan, 0.2, np.nan], index=exp_idx, name='s')
result = ser.loc[[3, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([3, 2, 3, 'x'], dtype='object', name='idx')
expected = Series([np.nan, 0.2, np.nan, np.nan], index=exp_idx,
name='s')
result = ser.loc[[3, 2, 3, 'x']]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([2, 2, 1], dtype='int64', name='idx')
expected = Series([0.2, 0.2, 0.1], index=exp_idx, name='s')
result = ser.loc[[2, 2, 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([2, 2, 'x', 1], dtype='object', name='idx')
expected = Series([0.2, 0.2, np.nan, 0.1], index=exp_idx, name='s')
result = ser.loc[[2, 2, 'x', 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
# raises as nothing in in the index
self.assertRaises(KeyError, lambda: ser.loc[[3, 3, 3]])
exp_idx = Index([2, 2, 3], dtype='int64', name='idx')
expected = Series([0.2, 0.2, np.nan], index=exp_idx, name='s')
result = ser.loc[[2, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([3, 4, 4], dtype='int64', name='idx')
expected = Series([0.3, np.nan, np.nan], index=exp_idx, name='s')
idx = Index([1, 2, 3], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3], index=idx, name='s').loc[[3, 4, 4]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([5, 3, 3], dtype='int64', name='idx')
expected = Series([np.nan, 0.3, 0.3], index=exp_idx, name='s')
idx = Index([1, 2, 3, 4], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
name='s').loc[[5, 3, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([5, 4, 4], dtype='int64', name='idx')
expected = Series([np.nan, 0.4, 0.4], index=exp_idx, name='s')
idx = Index([1, 2, 3, 4], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
name='s').loc[[5, 4, 4]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([7, 2, 2], dtype='int64', name='idx')
expected = Series([0.4, np.nan, np.nan], index=exp_idx, name='s')
idx = Index([4, 5, 6, 7], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
name='s').loc[[7, 2, 2]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([4, 5, 5], dtype='int64', name='idx')
expected = Series([0.4, np.nan, np.nan], index=exp_idx, name='s')
idx = Index([1, 2, 3, 4], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
name='s').loc[[4, 5, 5]]
tm.assert_series_equal(result, expected, check_index_type=True)
# iloc
exp_idx = Index([2, 2, 1, 1], dtype='int64', name='idx')
expected = Series([0.2, 0.2, 0.1, 0.1], index=exp_idx, name='s')
result = ser.iloc[[1, 1, 0, 0]]
tm.assert_series_equal(result, expected, check_index_type=True)
def test_partial_set_invalid(self):
# GH 4940
# allow only setting of 'valid' values
orig = tm.makeTimeDataFrame()
df = orig.copy()
# don't allow not string inserts
def f():
df.loc[100.0, :] = df.ix[0]
self.assertRaises(TypeError, f)
def f():
df.loc[100, :] = df.ix[0]
self.assertRaises(TypeError, f)
def f():
df.ix[100.0, :] = df.ix[0]
self.assertRaises(TypeError, f)
def f():
df.ix[100, :] = df.ix[0]
self.assertRaises(ValueError, f)
# allow object conversion here
df = orig.copy()
df.loc['a', :] = df.ix[0]
exp = orig.append(pd.Series(df.ix[0], name='a'))
tm.assert_frame_equal(df, exp)
tm.assert_index_equal(df.index,
pd.Index(orig.index.tolist() + ['a']))
self.assertEqual(df.index.dtype, 'object')
def test_partial_set_empty_series(self):
# GH5226
# partially set with an empty object series
s = Series()
s.loc[1] = 1
tm.assert_series_equal(s, Series([1], index=[1]))
s.loc[3] = 3
tm.assert_series_equal(s, Series([1, 3], index=[1, 3]))
s = Series()
s.loc[1] = 1.
tm.assert_series_equal(s, Series([1.], index=[1]))
s.loc[3] = 3.
tm.assert_series_equal(s, Series([1., 3.], index=[1, 3]))
s = Series()
s.loc['foo'] = 1
tm.assert_series_equal(s, Series([1], index=['foo']))
s.loc['bar'] = 3
tm.assert_series_equal(s, Series([1, 3], index=['foo', 'bar']))
s.loc[3] = 4
tm.assert_series_equal(s, Series([1, 3, 4], index=['foo', 'bar', 3]))
def test_partial_set_empty_frame(self):
# partially set with an empty object
# frame
df = DataFrame()
def f():
df.loc[1] = 1
self.assertRaises(ValueError, f)
def f():
df.loc[1] = Series([1], index=['foo'])
self.assertRaises(ValueError, f)
def f():
df.loc[:, 1] = 1
self.assertRaises(ValueError, f)
# these work as they don't really change
# anything but the index
# GH5632
expected = DataFrame(columns=['foo'], index=pd.Index(
[], dtype='int64'))
def f():
df = DataFrame()
df['foo'] = Series([], dtype='object')
return df
tm.assert_frame_equal(f(), expected)
def f():
df = DataFrame()
df['foo'] = Series(df.index)
return df
tm.assert_frame_equal(f(), expected)
def f():
df = DataFrame()
df['foo'] = df.index
return df
tm.assert_frame_equal(f(), expected)
expected = DataFrame(columns=['foo'],
index=pd.Index([], dtype='int64'))
expected['foo'] = expected['foo'].astype('float64')
def f():
df = DataFrame()
df['foo'] = []
return df
tm.assert_frame_equal(f(), expected)
def f():
df = DataFrame()
df['foo'] = Series(range(len(df)))
return df
tm.assert_frame_equal(f(), expected)
def f():
df = DataFrame()
tm.assert_index_equal(df.index, pd.Index([], dtype='object'))
df['foo'] = range(len(df))
return df
expected = DataFrame(columns=['foo'],
index=pd.Index([], dtype='int64'))
expected['foo'] = expected['foo'].astype('float64')
tm.assert_frame_equal(f(), expected)
df = DataFrame()
tm.assert_index_equal(df.columns, pd.Index([], dtype=object))
df2 = DataFrame()
df2[1] = Series([1], index=['foo'])
df.loc[:, 1] = Series([1], index=['foo'])
tm.assert_frame_equal(df, | DataFrame([[1]], index=['foo'], columns=[1]) | pandas.core.api.DataFrame |
# Copyright (c) 2020 Huawei Technologies Co., Ltd.
# <EMAIL>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from typing import Dict, List
import numpy as np
import pandas as pd
from collections import Counter
from src.compress import compress
# 日志器
from src.logger_setting.my_logger import get_logger
from src.setting import setting
LOGGER = get_logger()
def groupby_calc(df):
df['esn'] = df['esn'].astype('str')
df = df.groupby(['esn'])
return df
def calc_total(series):
series = series.values
count = 0
for d in range(len(series)):
if d < len(series) - 1:
if pd.isna(series[d]) or pd.isna(series[d + 1]):
continue
if float(series[d]) <= float(series[d + 1]):
count += float(series[d + 1]) - float(series[d])
else:
count += float(series[d + 1])
return count
def is_active(series):
series = calc_total(series)
if float(series) / setting.mb > 10:
return 1
else:
return 0
def get_max(series):
if series:
return np.max(series)
else:
return setting.INVALID_VALUE
def get_min(series):
if series:
return np.min(series)
else:
return setting.INVALID_VALUE
def get_avg(values, counts):
count = sum(counts) if type(counts) == list else counts
if count == 0:
return setting.INVALID_VALUE
else:
return sum(values) / count if type(values) == list else values / count
def get_avg_max_min(df, avg_name, max_name, min_name, counts):
avg_list = list(filter(lambda x: int(x) != setting.INVALID_VALUE, df[avg_name].values))
sum_value = get_sum(avg_list)
cnt = get_sum(list(df[counts].values))
avg = sum_value / cnt if cnt != 0 else setting.INVALID_VALUE
max_list = list(filter(lambda x: int(x) != setting.INVALID_VALUE, df[max_name].values))
max_value = get_max(max_list)
min_list = list(filter(lambda x: int(x) != setting.INVALID_VALUE, df[min_name].values))
min_value = get_min(min_list)
return {avg_name: avg,
max_name: max_value,
min_name: min_value}
def get_sum(series):
if series:
return np.sum(series)
else:
return setting.INVALID_VALUE
def get_std(series):
if series:
return np.std(series)
else:
return setting.INVALID_VALUE
def get_all_day():
all_day_file = compress.get_all_csv_file(os.path.join(setting.data_path, 'extractData'))
day_list = []
for file in all_day_file:
day_list.append(os.path.split(file)[1].split("\\")[-1].split('_')[0])
return list(set(day_list))
def merge_day_data(day_dict: Dict[str, List[str]]):
for day in day_dict.keys():
file_list: List[str] = day_dict.get(day)
df = pd.concat(pd.read_csv(file, error_bad_lines=False, index_col=False) for file in file_list)
df.columns = setting.parameter_json["extract_data_columns"]
df = df.sort_values('collectTime', ascending=True)
# 把-9999变成了NaN,但是原来是空的值,在读进来的时候已经变成NaN了,所有空值和-9999都变成了NaN
df = df.replace(setting.INVALID_VALUE, np.nan)
grouped = groupby_calc(df).agg(
MaxRSRP=pd.NamedAgg(column='RSRP', aggfunc=max),
MinRSRP=pd.NamedAgg(column='RSRP', aggfunc=min),
AvgRSRP=pd.NamedAgg(column='RSRP', aggfunc=sum),
CntRSRP=pd.NamedAgg(column='RSRP', aggfunc="count"),
MaxCQI=pd.NamedAgg(column='CQI', aggfunc=max),
MinCQI=pd.NamedAgg(column='CQI', aggfunc=min),
AvgCQI=pd.NamedAgg(column='CQI', aggfunc=sum),
CntCQI=pd.NamedAgg(column='CQI', aggfunc="count"),
MaxRSRQ=pd.NamedAgg(column='RSRQ', aggfunc=max),
MinRSRQ=pd.NamedAgg(column='RSRQ', aggfunc=min),
AvgRSRQ=pd.NamedAgg(column='RSRQ', aggfunc=sum),
CntRSRQ=pd.NamedAgg(column='RSRQ', aggfunc="count"),
MaxRSSI=pd.NamedAgg(column='RSSI', aggfunc=max),
MinRSSI=pd.NamedAgg(column='RSSI', aggfunc=min),
AvgRSSI=pd.NamedAgg(column='RSSI', aggfunc=sum),
CntRSSI=pd.NamedAgg(column='RSSI', aggfunc="count"),
MaxSINR=pd.NamedAgg(column='SINR', aggfunc=max),
MinSINR=pd.NamedAgg(column='SINR', aggfunc=min),
AvgSINR=pd.NamedAgg(column='SINR', aggfunc=sum),
CntSINR=pd.NamedAgg(column='SINR', aggfunc="count"),
TotalDownload=pd.NamedAgg(column='TotalDownload', aggfunc=calc_total),
TotalUpload=pd.NamedAgg(column='TotalUpload', aggfunc=calc_total),
TotalConnectTime=pd.NamedAgg(column='TotalConnectTime', aggfunc=calc_total),
ModelName=pd.NamedAgg(column='ModelName', aggfunc=lambda x: x.iloc[-1]),
IMSI=pd.NamedAgg(column='IMSI', aggfunc=lambda x: x.iloc[-1]),
IMEI=pd.NamedAgg(column='IMEI', aggfunc=lambda x: x.iloc[-1]),
MSISDN=pd.NamedAgg(column='MSISDN', aggfunc=lambda x: x.iloc[-1]),
isActive=pd.NamedAgg(column='TotalDownload', aggfunc=is_active),
AvgDlThroughput=pd.NamedAgg(column='MaxDLThroughput', aggfunc=sum),
CntDlThroughput=pd.NamedAgg(column='MaxDLThroughput', aggfunc="count"),
AvgUlThroughput=pd.NamedAgg(column='MaxULThroughput', aggfunc=sum),
CntUlThroughput=pd.NamedAgg(column='MaxULThroughput', aggfunc="count"),
WiFiUserQty=pd.NamedAgg(column='WiFiUserQty', aggfunc=sum),
CntWiFiUserQty= | pd.NamedAgg(column='WiFiUserQty', aggfunc="count") | pandas.NamedAgg |
# -*- coding: utf-8 -*-
"""METRICS.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1L78bqwUCI5fD90ZFudHX_ZooObj8rdMz
"""
# Commented out IPython magic to ensure Python compatibility.
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
# %matplotlib inline
from scipy import stats
import pandas as pd
import multiprocessing
import random
from matplotlib import rc
from sklearn.preprocessing import scale
from sklearn.preprocessing import OneHotEncoder
import timeit
import itertools
import seaborn as sns
from collections import Counter
import operator
import seaborn as sns
from __future__ import division
import plotly.offline as pyoff
import plotly.graph_objs as go
from google.colab import drive
drive.mount('/content/drive')
"""## LOAD EVENTOS"""
txt_event = '/content/drive/My Drive/TFM/03_DATASETS/eventos_2.rpt'
widths=[10,39,12,16,12,39,20,10,41,16,50,39,17,18,39,41,41,41,41,41,30,39]
dfevent = pd.read_fwf(txt_event, widths=widths, header=1, index_col=None, index=True)
rowcl,colcl = dfevent.shape
dfevent=dfevent[0:(rowcl-3)]
new_header = ['TipoEvento','CodigoEvento','FechaEvento','UsuarioEvento','HoraEvento','ClienteEvento','CodigoPostalEvento','PaísEvento','RepresentanteEvento','TipoPortesEvento','FormaPagoEvento','PlazoPagoEvento','SkuArticuloEvento','TipoArticuloEvento','FamiliaArticuloEvento','SubfamiliaArticuloEvento','CantidadArticuloEvento','AlmacenArticuloEvento','TarifaArticuloEvento','DescuentoArticuloEvento','MotivoEvento','CosteEvento']
dfevent.columns = new_header
dfevent
#vamos a utilizar solo los últimos dos años
dfevent = dfevent[(dfevent['FechaEvento'] >= '2009-01-07') &
(dfevent['FechaEvento'] <= '2019-12-31')]
#eliminamos los eventos que sean una oferta
indexNames = dfevent[ dfevent['TipoEvento'] == 'OFERTA' ].index
# Delete these row indexes from dataFrame
dfevent.drop(indexNames , inplace=True)
events= dfevent.sort_values(by=['FechaEvento'])
events['ClienteEvento'] = events.ClienteEvento.map(lambda x: str(x)[:-2])
events = events[['ClienteEvento','FechaEvento','CodigoEvento','CantidadArticuloEvento','TarifaArticuloEvento','DescuentoArticuloEvento']]
events.info()
# Load the dataset CLIENTES
txt_client = '/content/drive/My Drive/TFM/03_DATASETS/clientes.rpt'
widths = [40, 31, 16, 21, 12, 51, 17, 40, 40, 15, 50]
dfclient = pd.read_fwf(txt_client, widths=widths, header=1, index_col=None, index=True)
rowcl,colcl = dfclient.shape
dfclient=dfclient[0:(rowcl-3)]
new_header = ['CodigoCliente','NombreCliente','NIFCliente','CodigoPostalCliente','PaisCliente','SegmentoCliente','FechaAltaCliente','RepresentanteCliente','AreaCliente','MercadoCliente','GrupoDescuentoCliente']
dfclient.columns = new_header
clientes = dfclient
id_nombre = clientes[['CodigoCliente','NombreCliente','PaisCliente', 'SegmentoCliente']]
events = events.merge(id_nombre,left_on='ClienteEvento', right_on='CodigoCliente')
events
"""## KNOW EMUCA METRICS
### Yearly revenues
"""
#converting the type of Invoice Date Field from string to datetime.
events['FechaEvento'] = pd.to_datetime(events['FechaEvento'])
#creating YearMonth field for the ease of reporting and visualization
events['InvoiceYear'] = events['FechaEvento'].map(lambda date: date.year)
#calculate Revenue for each row and create a new dataframe with YearMonth - Revenue columns
events['Revenue'] = events["CantidadArticuloEvento"] * events["TarifaArticuloEvento"] * (100 - events["DescuentoArticuloEvento"])/100
tx_revenue = events.groupby(['InvoiceYear'])['Revenue'].sum().reset_index()
tx_revenue
#X and Y axis inputs for Plotly graph. We use Scatter for line graphs
plot_data = [
go.Scatter(
x=tx_revenue['InvoiceYear'],
y=tx_revenue['Revenue'],
)
]
plot_layout = go.Layout(
xaxis={"type": "category"},
title='Yearly Revenue'
)
fig = go.Figure(data=plot_data, layout=plot_layout)
pyoff.iplot(fig)
"""### Yearly Revenue Growth Rate"""
#using pct_change() function to see monthly percentage change
tx_revenue['YearlyGrowth'] = tx_revenue['Revenue'].pct_change()
#showing first 5 rows
tx_revenue.head()
#visualization - line graph
plot_data = [
go.Scatter(
x=tx_revenue.query("InvoiceYear <= 2020")['InvoiceYear'],
y=tx_revenue.query("InvoiceYear <= 2020")['YearlyGrowth'],
)
]
plot_layout = go.Layout(
xaxis={"type": "category"},
title='Yearly Growth Rate'
)
fig = go.Figure(data=plot_data, layout=plot_layout)
pyoff.iplot(fig)
"""### Yearly active customers"""
#creating a new dataframe with España customers only
#tx_es = tx_data.query("PaisEvento=='ES'").reset_index(drop=True)
#creating monthly active customers dataframe by counting unique Customer IDs
tx_yearly_active = events.groupby('InvoiceYear')['ClienteEvento'].nunique().reset_index()
#print the dataframe
tx_yearly_active
#plotting the output
plot_data = [
go.Bar(
x=tx_yearly_active['InvoiceYear'],
y=tx_yearly_active['ClienteEvento'],
)
]
plot_layout = go.Layout(
xaxis={"type": "category"},
title='Yearly Active Customers'
)
fig = go.Figure(data=plot_data, layout=plot_layout)
pyoff.iplot(fig)
"""### Yearly orders count"""
#create a new dataframe for no. of order by using quantity field
tx_yearly_sales = events.groupby('InvoiceYear')['CantidadArticuloEvento'].sum().reset_index()
#print the dataframe
tx_yearly_sales
#plot
plot_data = [
go.Bar(
x=tx_yearly_sales['InvoiceYear'],
y=tx_yearly_sales['CantidadArticuloEvento'],
)
]
plot_layout = go.Layout(
xaxis={"type": "category"},
title='Yearly Total Number of Orders'
)
fig = go.Figure(data=plot_data, layout=plot_layout)
pyoff.iplot(fig)
"""### Average Revenue per Order"""
# create a new dataframe for average revenue by taking the mean of it
tx_yearly_order_avg = events.groupby('InvoiceYear')['Revenue'].mean().reset_index()
#print the dataframe
tx_yearly_order_avg
#plot the bar chart
plot_data = [
go.Bar(
x=tx_yearly_order_avg['InvoiceYear'],
y=tx_yearly_order_avg['Revenue'],
)
]
plot_layout = go.Layout(
xaxis={"type": "category"},
title='Yearly Order Average'
)
fig = go.Figure(data=plot_data, layout=plot_layout)
pyoff.iplot(fig)
"""### New Customer ratio"""
#consideramos cliente nuevo cuando se realiza la primera compra en el periodo temporal establecido
#create a dataframe contaning ClienteEvento and first purchase date
tx_min_purchase = events.groupby('ClienteEvento').FechaEvento.min().reset_index()
tx_min_purchase.columns = ['ClienteEvento','MinPurchaseDate']
tx_min_purchase['MinPurchaseYear'] = tx_min_purchase['MinPurchaseDate'].map(lambda date: date.year)
#merge first purchase date column to our main dataframe (events)
events = pd.merge(events, tx_min_purchase, on='ClienteEvento')
events.head()
#create a column called User Type and assign Existing
#if User's First Purchase Year Month before the selected Invoice Year Month
events['UserType'] = 'New'
events.loc[events['InvoiceYear']>events['MinPurchaseYear'],'UserType'] = 'Existing'
#calculate the Revenue per month for each user type
tx_user_type_revenue = events.groupby(['InvoiceYear','UserType'])['Revenue'].sum().reset_index()
#filtering the dates and plot the result
plot_data = [
go.Scatter(
x=tx_user_type_revenue.query("UserType == 'Existing'")['InvoiceYear'],
y=tx_user_type_revenue.query("UserType == 'Existing'")['Revenue'],
name = 'Existing'
),
go.Scatter(
x=tx_user_type_revenue.query("UserType == 'New'")['InvoiceYear'],
y=tx_user_type_revenue.query("UserType == 'New'")['Revenue'],
name = 'New'
)
]
plot_layout = go.Layout(
xaxis={"type": "category"},
title='New vs Existing'
)
fig = go.Figure(data=plot_data, layout=plot_layout)
pyoff.iplot(fig)
#LET'S HAVE A BETTER VIEW TO OUR NEW CUSTOMER RATIO
#create a dataframe that shows new user ratio - we also need to drop NA values (first month new user ratio is 0)
tx_user_ratio = events.query("UserType == 'New'").groupby(['InvoiceYear'])['ClienteEvento'].nunique()/events.query("UserType == 'Existing'").groupby(['InvoiceYear'])['ClienteEvento'].nunique()
tx_user_ratio = tx_user_ratio.reset_index()
tx_user_ratio = tx_user_ratio.dropna()
#print the dafaframe
tx_user_ratio
#plot the result
plot_data = [
go.Bar(
x=tx_user_ratio.query("InvoiceYear>=2009 and InvoiceYear<=2020")['InvoiceYear'],
y=tx_user_ratio.query("InvoiceYear>=2009 and InvoiceYear<=2020")['ClienteEvento'],
)
]
plot_layout = go.Layout(
xaxis={"type": "category"},
title='New Customer Ratio'
)
fig = go.Figure(data=plot_data, layout=plot_layout)
pyoff.iplot(fig)
"""### Yearly retention rate
Yearly Retention Rate = Retained Customers From Prev. year/Active Customers Total
"""
#identify which users are active by looking at their revenue per month
tx_user_purchase = events.groupby(['ClienteEvento','InvoiceYear'])['Revenue'].sum().reset_index()
#create retention matrix with crosstab
tx_retention = pd.crosstab(tx_user_purchase['ClienteEvento'], tx_user_purchase['InvoiceYear']).reset_index()
tx_retention.head()
#create an array of dictionary which keeps Retained & Total User count for each month
months = tx_retention.columns[2:]
retention_array = []
for i in range(len(months)-1):
retention_data = {}
selected_month = months[i+1]
prev_month = months[i]
retention_data['InvoiceYear'] = int(selected_month)
retention_data['TotalUserCount'] = tx_retention[selected_month].sum()
retention_data['RetainedUserCount'] = tx_retention[(tx_retention[selected_month]>0) & (tx_retention[prev_month]>0)][selected_month].sum()
retention_array.append(retention_data)
#convert the array to dataframe and calculate Retention Rate
tx_retention = pd.DataFrame(retention_array)
tx_retention['RetentionRate'] = tx_retention['RetainedUserCount']/tx_retention['TotalUserCount']
#plot the retention rate graph
plot_data = [
go.Scatter(
x=tx_retention.query("InvoiceYear<=2020")['InvoiceYear'],
y=tx_retention.query("InvoiceYear<=2020")['RetentionRate'],
name="organic"
)
]
plot_layout = go.Layout(
xaxis={"type": "category"},
title='Yearly Retention Rate'
)
fig = go.Figure(data=plot_data, layout=plot_layout)
pyoff.iplot(fig)
"""## PREDICTING NEXT PURCHASE DAY"""
import xgboost as xgb
from sklearn.model_selection import KFold, cross_val_score, train_test_split
import xgboost as xgb
from sklearn.cluster import KMeans
events= dfevent.sort_values(by=['FechaEvento'])
events['ClienteEvento'] = events.ClienteEvento.map(lambda x: str(x)[:-2])
events = events.merge(id_nombre,left_on='ClienteEvento', right_on='CodigoCliente')
events = events[['ClienteEvento','FechaEvento','CodigoEvento','CantidadArticuloEvento','TarifaArticuloEvento','DescuentoArticuloEvento']]
events['FechaEvento'] = pd.to_datetime(events['FechaEvento'])
events['ClienteEvento'] = events['ClienteEvento'].astype(int)
events
"""We use 12 months of behavioral data to predict customers’ first purchase date in the next 6 months. If there is no purchase, we will predict that too. Let’s assume our cut off date is 1st july 2019 and split the data:"""
tx_6m = events[(events.FechaEvento < '2019-07-01') & (events.FechaEvento >= '2018-06-01')].reset_index(drop=True)
tx_next = events[(events.FechaEvento >= '2019-07-01') & (events.FechaEvento < '2020-01-01')].reset_index(drop=True)
"""tx_6m represents the 12 months performance whereas we will use tx_next for the find out the days between the last purchase date in tx_6m and the first one in tx_next."""
#Also, we will create a dataframe called tx_user to possess a user-level feature set for the prediction model:
tx_user = pd.DataFrame(tx_6m['ClienteEvento'].unique())
tx_user.columns = ['ClienteEvento']
#create a dataframe with customer id and first purchase date in tx_next
tx_next_first_purchase = tx_next.groupby('ClienteEvento').FechaEvento.min().reset_index()
tx_next_first_purchase.columns = ['ClienteEvento','MinPurchaseDate']
#create a dataframe with customer id and last purchase date in tx_6m
tx_last_purchase = tx_6m.groupby('ClienteEvento').FechaEvento.max().reset_index()
tx_last_purchase.columns = ['ClienteEvento','MaxPurchaseDate']
#merge two dataframes
tx_purchase_dates = pd.merge(tx_last_purchase,tx_next_first_purchase,on='ClienteEvento',how='left')
#calculate the time difference in days:
tx_purchase_dates['NextPurchaseDay'] = (tx_purchase_dates['MinPurchaseDate'] - tx_purchase_dates['MaxPurchaseDate']).dt.days
#merge with tx_user
tx_user = pd.merge(tx_user, tx_purchase_dates[['ClienteEvento','NextPurchaseDay']],on='ClienteEvento',how='left')
#print tx_user
tx_user.head()
#fill NA values with 999. we have NaN values because those customers haven’t made any purchase yet. We fill NaN with 999 to quickly identify them later.
tx_user = tx_user.fillna(999)
tx_user.head()
"""### Feature Engineering
For this project, we have selected our feature candidates like below:
- RFM scores & clusters
- Days between the last three purchases
- Mean & standard deviation of the difference between purchases in days
"""
#get max purchase date for Recency and create a dataframe
tx_max_purchase = tx_6m.groupby('ClienteEvento').FechaEvento.max().reset_index()
tx_max_purchase.columns = ['ClienteEvento','MaxPurchaseDate']
#find the recency in days and add it to tx_user
tx_max_purchase['Recency'] = (tx_max_purchase['MaxPurchaseDate'].max() - tx_max_purchase['MaxPurchaseDate']).dt.days
tx_user = | pd.merge(tx_user, tx_max_purchase[['ClienteEvento','Recency']], on='ClienteEvento') | pandas.merge |
import pandas as pd
import numpy as np
import math
import os
from scipy.interpolate import interp1d
import time
from sklearn.ensemble import RandomForestRegressor
import xgboost as xgb
from lightgbm import LGBMRegressor
from catboost import CatBoostRegressor
from information_measures import *
from joblib import Parallel, delayed
#from arch import arch_model
def rmspe(y_true, y_pred):
return (np.sqrt(np.mean(np.square((y_true - y_pred) / y_true))))
def log_return(list_stock_prices): # Stock prices are estimated through wap values
return np.log(list_stock_prices).diff()
def realized_volatility(series_log_return):
return np.sqrt(np.sum(series_log_return**2))
def compute_wap(book_pd):
wap = (book_pd['bid_price1'] * book_pd['ask_size1'] + book_pd['ask_price1'] * book_pd['bid_size1']) / (book_pd['bid_size1']+ book_pd['ask_size1'])
return wap
def realized_volatility_from_book_pd(book_stock_time):
wap = compute_wap(book_stock_time)
returns = log_return(wap)
volatility = realized_volatility(returns)
return volatility
def realized_volatility_per_time_id(file_path, prediction_column_name):
df_book_data = pd.read_parquet(file_path)
# Estimate stock price per time point
df_book_data['wap'] = compute_wap(df_book_data)
# Compute log return from wap values per time_id
df_book_data['log_return'] = df_book_data.groupby(['time_id'])['wap'].apply(log_return)
df_book_data = df_book_data[~df_book_data['log_return'].isnull()]
# Compute the square root of the sum of log return squared to get realized volatility
df_realized_vol_per_stock = pd.DataFrame(df_book_data.groupby(['time_id'])['log_return'].agg(realized_volatility)).reset_index()
# Formatting
df_realized_vol_per_stock = df_realized_vol_per_stock.rename(columns = {'log_return':prediction_column_name})
stock_id = file_path.split('=')[1]
df_realized_vol_per_stock['row_id'] = df_realized_vol_per_stock['time_id'].apply(lambda x:f'{stock_id}-{x}')
return df_realized_vol_per_stock[['row_id',prediction_column_name]]
def past_realized_volatility_per_stock(list_file,prediction_column_name):
df_past_realized = pd.DataFrame()
for file in list_file:
df_past_realized = pd.concat([df_past_realized,
realized_volatility_per_time_id(file,prediction_column_name)])
return df_past_realized
def stupidForestPrediction(book_path_train,prediction_column_name,train_targets_pd,book_path_test):
naive_predictions_train = past_realized_volatility_per_stock(list_file=book_path_train,prediction_column_name=prediction_column_name)
df_joined_train = train_targets_pd.merge(naive_predictions_train[['row_id','pred']], on = ['row_id'], how = 'left')
X = np.array(df_joined_train['pred']).reshape(-1,1)
y = np.array(df_joined_train['target']).reshape(-1,)
regr = RandomForestRegressor(random_state=0)
regr.fit(X, y)
naive_predictions_test = past_realized_volatility_per_stock(list_file=book_path_test,prediction_column_name='target')
yhat = regr.predict(np.array(naive_predictions_test['target']).reshape(-1,1))
updated_predictions = naive_predictions_test.copy()
updated_predictions['target'] = yhat
return updated_predictions
def garch_fit_predict_volatility(returns_series, N=10000):
model = arch_model(returns_series * N, p=1, q=1)
model_fit = model.fit(update_freq=0, disp='off')
yhat = model_fit.forecast(horizon=600, reindex=False)
pred_volatility = np.sqrt(np.sum(yhat.variance.values)) / N
return pred_volatility
def garch_volatility_per_time_id(file_path, prediction_column_name):
# read the data
df_book_data = pd.read_parquet(file_path)
# calculate the midprice (not the WAP)
df_book_data['midprice'] =(df_book_data['bid_price1'] + df_book_data['ask_price1'])/2
# leave only WAP for now
df_book_data = df_book_data[['time_id', 'seconds_in_bucket', 'midprice']]
df_book_data = df_book_data.sort_values('seconds_in_bucket')
# make the book updates evenly spaced
df_book_data_evenly = pd.DataFrame({'time_id':np.repeat(df_book_data['time_id'].unique(), 600),
'second':np.tile(range(0,600), df_book_data['time_id'].nunique())})
df_book_data_evenly['second'] = df_book_data_evenly['second'].astype(np.int16)
df_book_data_evenly = df_book_data_evenly.sort_values('second')
df_book_data_evenly = pd.merge_asof(df_book_data_evenly,
df_book_data,
left_on='second',right_on='seconds_in_bucket',
by = 'time_id')
# Ordering for easier use
df_book_data_evenly = df_book_data_evenly[['time_id', 'second', 'midprice']]
df_book_data_evenly = df_book_data_evenly.sort_values(['time_id','second']).reset_index(drop=True)
# calculate log returns
df_book_data_evenly['log_return'] = df_book_data_evenly.groupby(['time_id'])['midprice'].apply(log_return)
df_book_data_evenly = df_book_data_evenly[~df_book_data_evenly['log_return'].isnull()]
# fit GARCH(1, 1) and predict the volatility of returns
df_garch_vol_per_stock = \
pd.DataFrame(df_book_data_evenly.groupby(['time_id'])['log_return'].agg(garch_fit_predict_volatility)).reset_index()
df_garch_vol_per_stock = df_garch_vol_per_stock.rename(columns = {'log_return':prediction_column_name})
# add row_id column to the data
stock_id = file_path.split('=')[1]
df_garch_vol_per_stock['row_id'] = df_garch_vol_per_stock['time_id'].apply(lambda x:f'{stock_id}-{x}')
# return the result
return df_garch_vol_per_stock[['row_id', prediction_column_name]]
def garch_volatility_per_stock(list_file, prediction_column_name):
df_garch_predicted = pd.DataFrame()
for file in list_file:
df_garch_predicted = pd.concat([df_garch_predicted,
garch_volatility_per_time_id(file, prediction_column_name)])
return df_garch_predicted
def entropy_from_book(book_stock_time,last_min):
if last_min < 10:
book_stock_time = book_stock_time[book_stock_time['seconds_in_bucket'] >= (600-last_min*60)]
if book_stock_time.empty == True or book_stock_time.shape[0] < 3:
return 0
wap = compute_wap(book_stock_time)
t_init = book_stock_time['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, wap, kind='nearest')
resampled_wap = nearest(t_new)
# Compute sample entropy
# sampleEntropy = nolds.sampen(resampled_wap)
sampleEntropy = sampen(resampled_wap)
return sampleEntropy
def entropy_from_wap(wap,seconds,last_seconds):
if last_seconds < 600:
idx = np.where(seconds >= last_seconds)[0]
if len(idx) < 3:
return 0
else:
wap = wap[idx]
seconds = seconds[idx]
# Closest neighbour interpolation (no changes in wap between lines)
t_new = np.arange(np.min(seconds),np.max(seconds))
nearest = interp1d(seconds, wap, kind='nearest')
resampled_wap = nearest(t_new)
# Compute sample entropy
# sampleEntropy = nolds.sampen(resampled_wap)
sampleEntropy = sampen(resampled_wap)
# sampleEntropy = ApEn_new(resampled_wap,3,0.001)
return sampleEntropy
def linearFit(book_stock_time, last_min):
if last_min < 10:
book_stock_time = book_stock_time[book_stock_time['seconds_in_bucket'] >= (600-last_min*60)]
if book_stock_time.empty == True or book_stock_time.shape[0] < 2:
return 0
wap = np.array(compute_wap(book_stock_time))
t_init = book_stock_time['seconds_in_bucket']
return (wap[-1] - wap[0])/(np.max(t_init) - np.min(t_init))
def wapStat(book_stock_time, last_min):
if last_min < 10:
book_stock_time = book_stock_time[book_stock_time['seconds_in_bucket'] >= (600-last_min*60)]
if book_stock_time.empty == True or book_stock_time.shape[0] < 2:
return 0
wap = compute_wap(book_stock_time)
t_init = book_stock_time['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, wap, kind='nearest')
resampled_wap = nearest(t_new)
return np.std(resampled_wap)
def entropy_Prediction(book_path_train,prediction_column_name,train_targets_pd,book_path_test,all_stocks_ids,test_file):
# Compute features
book_features_encoded_test = computeFeatures_1(book_path_test,'test',test_file,all_stocks_ids)
book_features_encoded_train = computeFeatures_1(book_path_train,'train',train_targets_pd,all_stocks_ids)
X = book_features_encoded_train.drop(['row_id','target','stock_id'],axis=1)
y = book_features_encoded_train['target']
# Modeling
catboost_default = CatBoostRegressor(verbose=0)
catboost_default.fit(X,y)
# Predict
X_test = book_features_encoded_test.drop(['row_id','stock_id'],axis=1)
yhat = catboost_default.predict(X_test)
# Formatting
yhat_pd = pd.DataFrame(yhat,columns=['target'])
predictions = pd.concat([test_file,yhat_pd],axis=1)
return predictions
def computeFeatures_1(book_path,prediction_column_name,train_targets_pd,all_stocks_ids):
book_all_features = pd.DataFrame()
encoder = np.eye(len(all_stocks_ids))
stocks_id_list, row_id_list = [], []
volatility_list, entropy2_list = [], []
linearFit_list, linearFit5_list, linearFit2_list = [], [], []
wap_std_list, wap_std5_list, wap_std2_list = [], [], []
for file in book_path:
start = time.time()
book_stock = pd.read_parquet(file)
stock_id = file.split('=')[1]
print('stock id computing = ' + str(stock_id))
stock_time_ids = book_stock['time_id'].unique()
for time_id in stock_time_ids:
# Access book data at this time + stock
book_stock_time = book_stock[book_stock['time_id'] == time_id]
# Create feature matrix
stocks_id_list.append(stock_id)
row_id_list.append(str(f'{stock_id}-{time_id}'))
volatility_list.append(realized_volatility_from_book_pd(book_stock_time=book_stock_time))
entropy2_list.append(entropy_from_book(book_stock_time=book_stock_time,last_min=2))
linearFit_list.append(linearFit(book_stock_time=book_stock_time,last_min=10))
linearFit5_list.append(linearFit(book_stock_time=book_stock_time,last_min=5))
linearFit2_list.append(linearFit(book_stock_time=book_stock_time,last_min=2))
wap_std_list.append(wapStat(book_stock_time=book_stock_time,last_min=10))
wap_std5_list.append(wapStat(book_stock_time=book_stock_time,last_min=5))
wap_std2_list.append(wapStat(book_stock_time=book_stock_time,last_min=2))
print('Computing one stock entropy took', time.time() - start, 'seconds for stock ', stock_id)
# Merge targets
stocks_id_pd = pd.DataFrame(stocks_id_list,columns=['stock_id'])
row_id_pd = pd.DataFrame(row_id_list,columns=['row_id'])
volatility_pd = pd.DataFrame(volatility_list,columns=['volatility'])
entropy2_pd = pd.DataFrame(entropy2_list,columns=['entropy2'])
linearFit_pd = pd.DataFrame(linearFit_list,columns=['linearFit_coef'])
linearFit5_pd = pd.DataFrame(linearFit5_list,columns=['linearFit_coef5'])
linearFit2_pd = pd.DataFrame(linearFit2_list,columns=['linearFit_coef2'])
wap_std_pd = pd.DataFrame(wap_std_list,columns=['wap_std'])
wap_std5_pd = pd.DataFrame(wap_std5_list,columns=['wap_std5'])
wap_std2_pd = pd.DataFrame(wap_std2_list,columns=['wap_std2'])
book_all_features = pd.concat([stocks_id_pd,row_id_pd,volatility_pd,entropy2_pd,linearFit_pd,linearFit5_pd,linearFit2_pd,
wap_std_pd,wap_std5_pd,wap_std2_pd],axis=1)
# This line makes sure the predictions are aligned with the row_id in the submission file
book_all_features = train_targets_pd.merge(book_all_features, on = ['row_id'])
# Add encoded stock
encoded = list()
for i in range(book_all_features.shape[0]):
stock_id = book_all_features['stock_id'][i]
encoded_stock = encoder[np.where(all_stocks_ids == int(stock_id))[0],:]
encoded.append(encoded_stock)
encoded_pd = pd.DataFrame(np.array(encoded).reshape(book_all_features.shape[0],np.array(all_stocks_ids).shape[0]))
book_all_features_encoded = pd.concat([book_all_features, encoded_pd],axis=1)
return book_all_features_encoded
def calc_wap(df):
return (df['bid_price1'] * df['ask_size1'] + df['ask_price1'] * df['bid_size1']) / (df['bid_size1'] + df['ask_size1'])
def calc_wap2(df):
return (df['bid_price2'] * df['ask_size2'] + df['ask_price2'] * df['bid_size2']) / (df['bid_size2'] + df['ask_size2'])
def calc_wap3(df):
return (df['bid_price2'] * df['bid_size2'] + df['ask_price2'] * df['ask_size2']) / (df['bid_size2'] + df['ask_size2'])
def calc_wap4(df):
return (df['bid_price1'] * df['bid_size1'] + df['ask_price1'] * df['ask_size1']) / (df['bid_size1'] + df['ask_size1'])
def mid_price(df):
return df['bid_price1'] /2 + df['ask_price1'] / 2
def calc_rv_from_wap_numba(values, index):
log_return = np.diff(np.log(values))
realized_vol = np.sqrt(np.sum(np.square(log_return[1:])))
return realized_vol
def load_book_data_by_id(stock_id,datapath,train_test):
file_to_read = os.path.join(datapath,'book_' + str(train_test) + str('.parquet'),'stock_id=' + str(stock_id))
df = pd.read_parquet(file_to_read)
return df
def load_trades_data_by_id(stock_id,datapath,train_test):
file_to_read = os.path.join(datapath,'trade_' + str(train_test) + str('.parquet'),'stock_id=' + str(stock_id))
df = pd.read_parquet(file_to_read)
return df
def entropy_from_df(df):
if df.shape[0] < 3:
return 0
t_init = df['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, df['wap'], kind='nearest')
resampled_wap = nearest(t_new)
# Compute sample entropy
# sampleEntropy = nolds.sampen(resampled_wap)
sampleEntropy = sampen(resampled_wap)
return sampleEntropy
def entropy_from_df2(df):
if df.shape[0] < 3:
return 0
t_init = df['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, df['wap2'], kind='nearest')
resampled_wap = nearest(t_new)
# Compute sample entropy
# sampleEntropy = nolds.sampen(resampled_wap)
sampleEntropy = sampen(resampled_wap)
return sampleEntropy
def entropy_from_df3(df):
if df.shape[0] < 3:
return 0
t_init = df['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, df['wap3'], kind='nearest')
resampled_wap = nearest(t_new)
# Compute sample entropy
sampleEntropy = sampen(resampled_wap)
return sampleEntropy
def financial_metrics(df):
wap_imbalance = np.mean(df['wap'] - df['wap2'])
price_spread = np.mean((df['ask_price1'] - df['bid_price1']) / ((df['ask_price1'] + df['bid_price1'])/2))
bid_spread = np.mean(df['bid_price1'] - df['bid_price2'])
ask_spread = np.mean(df['ask_price1'] - df['ask_price2']) # Abs to take
total_volume = np.mean((df['ask_size1'] + df['ask_size2']) + (df['bid_size1'] + df['bid_size2']))
volume_imbalance = np.mean(abs((df['ask_size1'] + df['ask_size2']) - (df['bid_size1'] + df['bid_size2'])))
return [wap_imbalance,price_spread,bid_spread,ask_spread,total_volume,volume_imbalance]
def financial_metrics_2(df):
wap_imbalance = df['wap'] - df['wap2']
price_spread = (df['ask_price1'] - df['bid_price1']) / ((df['ask_price1'] + df['bid_price1'])/2)
bid_spread = df['bid_price1'] - df['bid_price2']
ask_spread = df['ask_price1'] - df['ask_price2'] # Abs to take
total_volume = (df['ask_size1'] + df['ask_size2']) + (df['bid_size1'] + df['bid_size2'])
volume_imbalance = abs((df['ask_size1'] + df['ask_size2']) - (df['bid_size1'] + df['bid_size2']))
# New features here
wap_imbalance_mean = np.mean(wap_imbalance)
wap_imbalance_sum = np.sum(wap_imbalance)
wap_imbalance_std = np.std(wap_imbalance)
wap_imbalance_max = np.max(wap_imbalance)
wap_imbalance_min = np.min(wap_imbalance)
price_spread_mean = np.mean(price_spread)
price_spread_sum = np.sum(price_spread)
price_spread_std = np.std(price_spread)
price_spread_max = np.max(price_spread)
price_spread_min = np.min(price_spread)
bid_spread_mean = np.mean(bid_spread)
bid_spread_sum = np.sum(bid_spread)
bid_spread_std = np.std(bid_spread)
bid_spread_max = np.max(bid_spread)
bid_spread_min = np.min(bid_spread)
ask_spread_mean = np.mean(ask_spread)
ask_spread_sum = np.sum(ask_spread)
ask_spread_std = np.std(ask_spread)
ask_spread_max = np.max(ask_spread)
ask_spread_min = np.min(ask_spread)
total_volume_mean = np.mean(total_volume)
total_volume_sum = np.sum(total_volume)
total_volume_std = np.std(total_volume)
total_volume_max = np.max(total_volume)
total_volume_min = np.min(total_volume)
volume_imbalance_mean = np.mean(volume_imbalance)
volume_imbalance_sum = np.sum(volume_imbalance)
volume_imbalance_std = np.std(volume_imbalance)
volume_imbalance_max = np.max(volume_imbalance)
volume_imbalance_min = np.min(volume_imbalance)
return [wap_imbalance_mean,price_spread_mean,bid_spread_mean,ask_spread_mean,total_volume_mean,volume_imbalance_mean, wap_imbalance_sum,price_spread_sum,bid_spread_sum,ask_spread_sum,total_volume_sum,volume_imbalance_sum, wap_imbalance_std,price_spread_std,bid_spread_std,ask_spread_std,total_volume_std,volume_imbalance_std, wap_imbalance_max,price_spread_max,bid_spread_max,ask_spread_max,total_volume_max,volume_imbalance_max, wap_imbalance_min,price_spread_min,bid_spread_min,ask_spread_min,total_volume_min,volume_imbalance_min]
def other_metrics(df):
if df.shape[0] < 2:
linearFit = 0
linearFit2 = 0
linearFit3 = 0
std_1 = 0
std_2 = 0
std_3 = 0
else:
linearFit = (df['wap'].iloc[-1] - df['wap'].iloc[0]) / ((np.max(df['seconds_in_bucket']) - np.min(df['seconds_in_bucket'])))
linearFit2 = (df['wap2'].iloc[-1] - df['wap2'].iloc[0]) / ((np.max(df['seconds_in_bucket']) - np.min(df['seconds_in_bucket'])))
linearFit3 = (df['wap3'].iloc[-1] - df['wap3'].iloc[0]) / ((np.max(df['seconds_in_bucket']) - np.min(df['seconds_in_bucket'])))
# Resampling
t_init = df['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, df['wap'], kind='nearest')
nearest2 = interp1d(t_init, df['wap2'], kind='nearest')
nearest3 = interp1d(t_init, df['wap3'], kind='nearest')
std_1 = np.std(nearest(t_new))
std_2 = np.std(nearest2(t_new))
std_3 = np.std(nearest3(t_new))
return [linearFit, linearFit2, linearFit3, std_1, std_2, std_3]
def load_book_data_by_id_kaggle(stock_id,train_test):
df = pd.read_parquet(f'../input/optiver-realized-volatility-prediction/book_{train_test}.parquet/stock_id={stock_id}')
return df
def load_trades_data_by_id_kaggle(stock_id,train_test):
df = pd.read_parquet(f'../input/optiver-realized-volatility-prediction/trade_{train_test}.parquet/stock_id={stock_id}')
return df
def computeFeatures_wEntropy(machine, dataset, all_stocks_ids, datapath):
list_rv, list_rv2, list_rv3 = [], [], []
list_ent, list_fin, list_fin2 = [], [], []
list_others, list_others2, list_others3 = [], [], []
for stock_id in range(127):
start = time.time()
if machine == 'local':
try:
book_stock = load_book_data_by_id(stock_id,datapath,dataset)
except:
continue
elif machine == 'kaggle':
try:
book_stock = load_book_data_by_id_kaggle(stock_id,dataset)
except:
continue
# Useful
all_time_ids_byStock = book_stock['time_id'].unique()
# Calculate wap for the book
book_stock['wap'] = calc_wap(book_stock)
book_stock['wap2'] = calc_wap2(book_stock)
book_stock['wap3'] = calc_wap3(book_stock)
# Calculate realized volatility
df_sub = book_stock.groupby('time_id')['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2 = book_stock.groupby('time_id')['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3 = book_stock.groupby('time_id')['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub['time_id']]
df_sub = pd.concat([df_sub,df_sub2['wap2'],df_sub3['wap3']],axis=1)
df_sub = df_sub.rename(columns={'time_id':'row_id','wap': 'rv', 'wap2': 'rv2', 'wap3': 'rv3'})
# Calculate realized volatility last 5 min
isEmpty = book_stock.query(f'seconds_in_bucket >= 300').empty
if isEmpty == False:
df_sub_5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_5['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_5['time_id']]
df_sub_5 = pd.concat([df_sub_5,df_sub2_5['wap2'],df_sub3_5['wap3']],axis=1)
df_sub_5 = df_sub_5.rename(columns={'time_id':'row_id','wap': 'rv_5', 'wap2': 'rv2_5', 'wap3': 'rv3_5'})
else: # 0 volatility
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_5'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_5'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_5'])
df_sub_5 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3],axis=1)
# Calculate realized volatility last 2 min
isEmpty = book_stock.query(f'seconds_in_bucket >= 480').empty
if isEmpty == False:
df_sub_2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_2['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_2['time_id']]
df_sub_2 = pd.concat([df_sub_2,df_sub2_2['wap2'],df_sub3_2['wap3']],axis=1)
df_sub_2 = df_sub_2.rename(columns={'time_id':'row_id','wap': 'rv_2', 'wap2': 'rv2_2', 'wap3': 'rv3_2'})
else: # 0 volatility
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_2'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_2'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_2'])
df_sub_2 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3],axis=1)
list_rv.append(df_sub)
list_rv2.append(df_sub_5)
list_rv3.append(df_sub_2)
# Calculate other financial metrics from book
df_sub_book_feats = book_stock.groupby(['time_id']).apply(financial_metrics).to_frame().reset_index()
df_sub_book_feats = df_sub_book_feats.rename(columns={0:'embedding'})
df_sub_book_feats[['wap_imbalance','price_spread','bid_spread','ask_spread','total_vol','vol_imbalance']] = pd.DataFrame(df_sub_book_feats.embedding.tolist(), index=df_sub_book_feats.index)
df_sub_book_feats['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats['time_id']]
df_sub_book_feats = df_sub_book_feats.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
isEmpty = book_stock.query(f'seconds_in_bucket >= 300').empty
if isEmpty == False:
df_sub_book_feats5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id']).apply(financial_metrics).to_frame().reset_index()
df_sub_book_feats5 = df_sub_book_feats5.rename(columns={0:'embedding'})
df_sub_book_feats5[['wap_imbalance5','price_spread5','bid_spread5','ask_spread5','total_vol5','vol_imbalance5']] = pd.DataFrame(df_sub_book_feats5.embedding.tolist(), index=df_sub_book_feats5.index)
df_sub_book_feats5['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats5['time_id']]
df_sub_book_feats5 = df_sub_book_feats5.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['wap_imbalance5'])
temp2 = pd.DataFrame([0],columns=['price_spread5'])
temp3 = pd.DataFrame([0],columns=['bid_spread5'])
temp4 = pd.DataFrame([0],columns=['ask_spread5'])
temp5 = pd.DataFrame([0],columns=['total_vol5'])
temp6 = pd.DataFrame([0],columns=['vol_imbalance5'])
df_sub_book_feats5 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_fin.append(df_sub_book_feats)
list_fin2.append(df_sub_book_feats5)
# Compute entropy
isEmpty = book_stock.query(f'seconds_in_bucket >= 480').empty
if isEmpty == False:
df_ent = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id']).apply(entropy_from_df).to_frame().reset_index().fillna(0)
df_ent2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id']).apply(entropy_from_df2).to_frame().reset_index().fillna(0)
df_ent3 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id']).apply(entropy_from_df3).to_frame().reset_index().fillna(0)
df_ent['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_ent['time_id']]
df_ent = df_ent.rename(columns={'time_id':'row_id',0:'entropy'})
df_ent2 = df_ent2.rename(columns={0:'entropy2'}).drop(['time_id'],axis=1)
df_ent3 = df_ent3.rename(columns={0:'entropy3'}).drop(['time_id'],axis=1)
df_ent = pd.concat([df_ent,df_ent2,df_ent3],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['entropy'])
temp2 = pd.DataFrame([0],columns=['entropy2'])
temp3 = pd.DataFrame([0],columns=['entropy3'])
df_ent = pd.concat([times_pd,temp,temp2,temp3],axis=1)
list_ent.append(df_ent)
# Compute other metrics
df_others = book_stock.groupby(['time_id']).apply(other_metrics).to_frame().reset_index().fillna(0)
df_others = df_others.rename(columns={0:'embedding'})
df_others[['linearFit1_1','linearFit1_2','linearFit1_3','wap_std1_1','wap_std1_2','wap_std1_3']] = pd.DataFrame(df_others.embedding.tolist(), index=df_others.index)
df_others['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_others['time_id']]
df_others = df_others.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
list_others.append(df_others)
isEmpty = book_stock.query(f'seconds_in_bucket >= 300').empty
if isEmpty == False:
df_others2 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id']).apply(other_metrics).to_frame().reset_index().fillna(0)
df_others2 = df_others2.rename(columns={0:'embedding'})
df_others2[['linearFit2_1','linearFit2_2','linearFit2_3','wap_std2_1','wap_std2_2','wap_std2_3']] = pd.DataFrame(df_others2.embedding.tolist(), index=df_others2.index)
df_others2['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_others2['time_id']]
df_others2 = df_others2.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['linearFit2_1'])
temp2 = pd.DataFrame([0],columns=['linearFit2_2'])
temp3 = pd.DataFrame([0],columns=['linearFit2_3'])
temp4 = pd.DataFrame([0],columns=['wap_std2_1'])
temp5 = pd.DataFrame([0],columns=['wap_std2_2'])
temp6 = pd.DataFrame([0],columns=['wap_std2_3'])
df_others2 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_others2.append(df_others2)
isEmpty = book_stock.query(f'seconds_in_bucket >= 480').empty
if isEmpty == False:
df_others3 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id']).apply(other_metrics).to_frame().reset_index().fillna(0)
df_others3 = df_others3.rename(columns={0:'embedding'})
df_others3[['linearFit3_1','linearFit3_2','linearFit3_3','wap_std3_1','wap_std3_2','wap_std3_3']] = pd.DataFrame(df_others3.embedding.tolist(), index=df_others3.index)
df_others3['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_others3['time_id']]
df_others3 = df_others3.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['linearFit3_1'])
temp2 = pd.DataFrame([0],columns=['linearFit3_2'])
temp3 = pd.DataFrame([0],columns=['linearFit3_3'])
temp4 = pd.DataFrame([0],columns=['wap_std3_1'])
temp5 = pd.DataFrame([0],columns=['wap_std3_2'])
temp6 = pd.DataFrame([0],columns=['wap_std3_3'])
df_others3 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_others3.append(df_others3)
print('Computing one stock took', time.time() - start, 'seconds for stock ', stock_id)
# Create features dataframe
df_submission = pd.concat(list_rv)
df_submission2 = pd.concat(list_rv2)
df_submission3 = pd.concat(list_rv3)
df_ent_concat = pd.concat(list_ent)
df_fin_concat = pd.concat(list_fin)
df_fin2_concat = pd.concat(list_fin2)
df_others = pd.concat(list_others)
df_others2 = pd.concat(list_others2)
df_others3 = pd.concat(list_others3)
df_book_features = df_submission.merge(df_submission2, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_submission3, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_ent_concat, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_fin_concat, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_fin2_concat, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_others, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_others2, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_others3, on = ['row_id'], how='left').fillna(0)
# Add encoded stock
encoder = np.eye(len(all_stocks_ids))
encoded = list()
for i in range(df_book_features.shape[0]):
stock_id = int(df_book_features['row_id'][i].split('-')[0])
encoded_stock = encoder[np.where(all_stocks_ids == int(stock_id))[0],:]
encoded.append(encoded_stock)
encoded_pd = pd.DataFrame(np.array(encoded).reshape(df_book_features.shape[0],np.array(all_stocks_ids).shape[0]))
df_book_features_encoded = pd.concat([df_book_features, encoded_pd],axis=1)
return df_book_features_encoded
def computeFeatures_july(machine, dataset, all_stocks_ids, datapath):
list_rv, list_rv2, list_rv3 = [], [], []
list_ent, list_fin, list_fin2 = [], [], []
list_others, list_others2, list_others3 = [], [], []
for stock_id in range(127):
start = time.time()
if machine == 'local':
try:
book_stock = load_book_data_by_id(stock_id,datapath,dataset)
except:
continue
elif machine == 'kaggle':
try:
book_stock = load_book_data_by_id_kaggle(stock_id,dataset)
except:
continue
# Useful
all_time_ids_byStock = book_stock['time_id'].unique()
# Calculate wap for the book
book_stock['wap'] = calc_wap(book_stock)
book_stock['wap2'] = calc_wap2(book_stock)
book_stock['wap3'] = calc_wap3(book_stock)
# Calculate realized volatility
df_sub = book_stock.groupby('time_id')['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2 = book_stock.groupby('time_id')['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3 = book_stock.groupby('time_id')['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub['time_id']]
df_sub = pd.concat([df_sub,df_sub2['wap2'],df_sub3['wap3']],axis=1)
df_sub = df_sub.rename(columns={'time_id':'row_id','wap': 'rv', 'wap2': 'rv2', 'wap3': 'rv3'})
# Calculate realized volatility last 5 min
isEmpty = book_stock.query(f'seconds_in_bucket >= 300').empty
if isEmpty == False:
df_sub_5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_5['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_5['time_id']]
df_sub_5 = pd.concat([df_sub_5,df_sub2_5['wap2'],df_sub3_5['wap3']],axis=1)
df_sub_5 = df_sub_5.rename(columns={'time_id':'row_id','wap': 'rv_5', 'wap2': 'rv2_5', 'wap3': 'rv3_5'})
else: # 0 volatility
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_5'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_5'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_5'])
df_sub_5 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3],axis=1)
# Calculate realized volatility last 2 min
isEmpty = book_stock.query(f'seconds_in_bucket >= 480').empty
if isEmpty == False:
df_sub_2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_2['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_2['time_id']]
df_sub_2 = pd.concat([df_sub_2,df_sub2_2['wap2'],df_sub3_2['wap3']],axis=1)
df_sub_2 = df_sub_2.rename(columns={'time_id':'row_id','wap': 'rv_2', 'wap2': 'rv2_2', 'wap3': 'rv3_2'})
else: # 0 volatility
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_2'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_2'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_2'])
df_sub_2 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3],axis=1)
list_rv.append(df_sub)
list_rv2.append(df_sub_5)
list_rv3.append(df_sub_2)
# Calculate other financial metrics from book
df_sub_book_feats = book_stock.groupby(['time_id']).apply(financial_metrics_2).to_frame().reset_index()
df_sub_book_feats = df_sub_book_feats.rename(columns={0:'embedding'})
df_sub_book_feats[['wap_imbalance','price_spread','bid_spread','ask_spread','total_vol','vol_imbalance']] = pd.DataFrame(df_sub_book_feats.embedding.tolist(), index=df_sub_book_feats.index)
df_sub_book_feats['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats['time_id']]
df_sub_book_feats = df_sub_book_feats.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
isEmpty = book_stock.query(f'seconds_in_bucket >= 300').empty
if isEmpty == False:
df_sub_book_feats5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id']).apply(financial_metrics_2).to_frame().reset_index()
df_sub_book_feats5 = df_sub_book_feats5.rename(columns={0:'embedding'})
df_sub_book_feats5[['wap_imbalance5','price_spread5','bid_spread5','ask_spread5','total_vol5','vol_imbalance5']] = pd.DataFrame(df_sub_book_feats5.embedding.tolist(), index=df_sub_book_feats5.index)
df_sub_book_feats5['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats5['time_id']]
df_sub_book_feats5 = df_sub_book_feats5.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['wap_imbalance5'])
temp2 = pd.DataFrame([0],columns=['price_spread5'])
temp3 = pd.DataFrame([0],columns=['bid_spread5'])
temp4 = pd.DataFrame([0],columns=['ask_spread5'])
temp5 = pd.DataFrame([0],columns=['total_vol5'])
temp6 = pd.DataFrame([0],columns=['vol_imbalance5'])
df_sub_book_feats5 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_fin.append(df_sub_book_feats)
list_fin2.append(df_sub_book_feats5)
# Compute other metrics
df_others = book_stock.groupby(['time_id']).apply(other_metrics).to_frame().reset_index().fillna(0)
df_others = df_others.rename(columns={0:'embedding'})
df_others[['linearFit1_1','linearFit1_2','linearFit1_3','wap_std1_1','wap_std1_2','wap_std1_3']] = pd.DataFrame(df_others.embedding.tolist(), index=df_others.index)
df_others['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_others['time_id']]
df_others = df_others.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
list_others.append(df_others)
isEmpty = book_stock.query(f'seconds_in_bucket >= 300').empty
if isEmpty == False:
df_others2 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id']).apply(other_metrics).to_frame().reset_index().fillna(0)
df_others2 = df_others2.rename(columns={0:'embedding'})
df_others2[['linearFit2_1','linearFit2_2','linearFit2_3','wap_std2_1','wap_std2_2','wap_std2_3']] = pd.DataFrame(df_others2.embedding.tolist(), index=df_others2.index)
df_others2['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_others2['time_id']]
df_others2 = df_others2.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['linearFit2_1'])
temp2 = pd.DataFrame([0],columns=['linearFit2_2'])
temp3 = pd.DataFrame([0],columns=['linearFit2_3'])
temp4 = pd.DataFrame([0],columns=['wap_std2_1'])
temp5 = pd.DataFrame([0],columns=['wap_std2_2'])
temp6 = pd.DataFrame([0],columns=['wap_std2_3'])
df_others2 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_others2.append(df_others2)
isEmpty = book_stock.query(f'seconds_in_bucket >= 480').empty
if isEmpty == False:
df_others3 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id']).apply(other_metrics).to_frame().reset_index().fillna(0)
df_others3 = df_others3.rename(columns={0:'embedding'})
df_others3[['linearFit3_1','linearFit3_2','linearFit3_3','wap_std3_1','wap_std3_2','wap_std3_3']] = pd.DataFrame(df_others3.embedding.tolist(), index=df_others3.index)
df_others3['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_others3['time_id']]
df_others3 = df_others3.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['linearFit3_1'])
temp2 = pd.DataFrame([0],columns=['linearFit3_2'])
temp3 = pd.DataFrame([0],columns=['linearFit3_3'])
temp4 = pd.DataFrame([0],columns=['wap_std3_1'])
temp5 = pd.DataFrame([0],columns=['wap_std3_2'])
temp6 = pd.DataFrame([0],columns=['wap_std3_3'])
df_others3 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_others3.append(df_others3)
print('Computing one stock took', time.time() - start, 'seconds for stock ', stock_id)
# Create features dataframe
df_submission = pd.concat(list_rv)
df_submission2 = pd.concat(list_rv2)
df_submission3 = pd.concat(list_rv3)
df_ent_concat = pd.concat(list_ent)
df_fin_concat = pd.concat(list_fin)
df_fin2_concat = pd.concat(list_fin2)
df_others = pd.concat(list_others)
df_others2 = pd.concat(list_others2)
df_others3 = pd.concat(list_others3)
df_book_features = df_submission.merge(df_submission2, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_submission3, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_ent_concat, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_fin_concat, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_fin2_concat, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_others, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_others2, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_others3, on = ['row_id'], how='left').fillna(0)
# Add encoded stock
encoder = np.eye(len(all_stocks_ids))
encoded = list()
for i in range(df_book_features.shape[0]):
stock_id = int(df_book_features['row_id'][i].split('-')[0])
encoded_stock = encoder[np.where(all_stocks_ids == int(stock_id))[0],:]
encoded.append(encoded_stock)
encoded_pd = pd.DataFrame(np.array(encoded).reshape(df_book_features.shape[0],np.array(all_stocks_ids).shape[0]))
df_book_features_encoded = pd.concat([df_book_features, encoded_pd],axis=1)
return df_book_features_encoded
def computeFeatures_newTest_Laurent(machine, dataset, all_stocks_ids, datapath):
list_rv, list_rv2, list_rv3 = [], [], []
list_ent, list_fin, list_fin2 = [], [], []
list_others, list_others2, list_others3 = [], [], []
for stock_id in range(127):
start = time.time()
if machine == 'local':
try:
book_stock = load_book_data_by_id(stock_id,datapath,dataset)
except:
continue
elif machine == 'kaggle':
try:
book_stock = load_book_data_by_id_kaggle(stock_id,dataset)
except:
continue
# Useful
all_time_ids_byStock = book_stock['time_id'].unique()
# Calculate wap for the entire book
book_stock['wap'] = calc_wap(book_stock)
book_stock['wap2'] = calc_wap2(book_stock)
book_stock['wap3'] = calc_wap3(book_stock)
book_stock['wap4'] = calc_wap2(book_stock)
book_stock['mid_price'] = calc_wap3(book_stock)
# Calculate past realized volatility per time_id
df_sub = book_stock.groupby('time_id')['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2 = book_stock.groupby('time_id')['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3 = book_stock.groupby('time_id')['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub4 = book_stock.groupby('time_id')['wap4'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub5 = book_stock.groupby('time_id')['mid_price'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub['time_id']]
df_sub = df_sub.rename(columns={'time_id':'row_id'})
df_sub = pd.concat([df_sub,df_sub2['wap2'],df_sub3['wap3'], df_sub4['wap4'], df_sub5['mid_price']],axis=1)
df_sub = df_sub.rename(columns={'wap': 'rv', 'wap2': 'rv2', 'wap3': 'rv3', 'wap4':'rv4','mid_price':'rv5'})
list_rv.append(df_sub)
# Query segments
bucketQuery480 = book_stock.query(f'seconds_in_bucket >= 480')
isEmpty480 = bucketQuery480.empty
bucketQuery300 = book_stock.query(f'seconds_in_bucket >= 300')
isEmpty300 = bucketQuery300.empty
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
# Calculate past realized volatility per time_id and query subset
if isEmpty300 == False:
df_sub_300 = bucketQuery300.groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_300 = bucketQuery300.groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_300 = bucketQuery300.groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub4_300 = bucketQuery300.groupby(['time_id'])['wap4'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub5_300 = bucketQuery300.groupby(['time_id'])['mid_price'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_300 = pd.concat([times_pd,df_sub_300['wap'],df_sub2_300['wap2'],df_sub3_300['wap3'],df_sub4_300['wap4'],df_sub5_300['mid_price']],axis=1)
df_sub_300 = df_sub_300.rename(columns={'wap': 'rv_300', 'wap2_300': 'rv2', 'wap3_300': 'rv3', 'wap4':'rv4_300','mid_price':'rv5_300'})
else: # 0 volatility
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_300'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_300'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_300'])
zero_rv4 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv4_300'])
zero_rv5 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv5_300'])
df_sub_300 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3,zero_rv4,zero_rv5],axis=1)
list_rv2.append(df_sub_300)
# Calculate realized volatility last 2 min
if isEmpty480 == False:
df_sub_480 = bucketQuery480.groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_480 = bucketQuery480.groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_480 = bucketQuery480.groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub4_480 = bucketQuery480.groupby(['time_id'])['wap4'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub5_480 = bucketQuery480.groupby(['time_id'])['mid_price'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_480 = pd.concat([times_pd,df_sub_480['wap'],df_sub2_480['wap2'],df_sub3_480['wap3'],df_sub4_480['wap4'],df_sub5_480['mid_price']],axis=1)
df_sub_480 = df_sub_480.rename(columns={'wap': 'rv_480', 'wap2_480': 'rv2', 'wap3_480': 'rv3', 'wap4':'rv4_480','mid_price':'rv5_480'})
else: # 0 volatility
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_480'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_480'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_480'])
zero_rv4 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv4_480'])
zero_rv5 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv5_480'])
df_sub_480 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3,zero_rv4,zero_rv5],axis=1)
list_rv3.append(df_sub_480)
# Calculate other financial metrics from book
df_sub_book_feats = book_stock.groupby(['time_id']).apply(financial_metrics).to_frame().reset_index()
df_sub_book_feats = df_sub_book_feats.rename(columns={0:'embedding'})
df_sub_book_feats[['wap_imbalance','price_spread','bid_spread','ask_spread','total_vol','vol_imbalance']] = pd.DataFrame(df_sub_book_feats.embedding.tolist(), index=df_sub_book_feats.index)
df_sub_book_feats['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats['time_id']]
df_sub_book_feats = df_sub_book_feats.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
list_fin.append(df_sub_book_feats)
if isEmpty300 == False:
df_sub_book_feats_300 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id']).apply(financial_metrics).to_frame().reset_index()
df_sub_book_feats_300 = df_sub_book_feats_300.rename(columns={0:'embedding'})
df_sub_book_feats_300[['wap_imbalance5','price_spread5','bid_spread5','ask_spread5','total_vol5','vol_imbalance5']] = pd.DataFrame(df_sub_book_feats_300.embedding.tolist(), index=df_sub_book_feats_300.index)
df_sub_book_feats_300['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats_300['time_id']]
df_sub_book_feats_300 = df_sub_book_feats_300.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['wap_imbalance5'])
temp2 = pd.DataFrame([0],columns=['price_spread5'])
temp3 = pd.DataFrame([0],columns=['bid_spread5'])
temp4 = pd.DataFrame([0],columns=['ask_spread5'])
temp5 = pd.DataFrame([0],columns=['total_vol5'])
temp6 = pd.DataFrame([0],columns=['vol_imbalance5'])
df_sub_book_feats_300 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_fin2.append(df_sub_book_feats_300)
print('Computing one stock took', time.time() - start, 'seconds for stock ', stock_id)
# Create features dataframe
df_submission = pd.concat(list_rv)
df_submission2 = pd.concat(list_rv2)
df_submission3 = pd.concat(list_rv3)
df_fin_concat = pd.concat(list_fin)
df_fin2_concat = pd.concat(list_fin2)
df_book_features = df_submission.merge(df_submission2, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_submission3, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_fin_concat, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_fin2_concat, on = ['row_id'], how='left').fillna(0)
# Add encoded stock
encoder = np.eye(len(all_stocks_ids))
encoded = list()
for i in range(df_book_features.shape[0]):
stock_id = int(df_book_features['row_id'][i].split('-')[0])
encoded_stock = encoder[np.where(all_stocks_ids == int(stock_id))[0],:]
encoded.append(encoded_stock)
encoded_pd = pd.DataFrame(np.array(encoded).reshape(df_book_features.shape[0],np.array(all_stocks_ids).shape[0]))
df_book_features_encoded = pd.concat([df_book_features, encoded_pd],axis=1)
return df_book_features_encoded
def computeFeatures_newTest_Laurent_noCode(machine, dataset, all_stocks_ids, datapath):
list_rv, list_rv2, list_rv3 = [], [], []
list_ent, list_fin, list_fin2 = [], [], []
list_others, list_others2, list_others3 = [], [], []
for stock_id in range(127):
start = time.time()
if machine == 'local':
try:
book_stock = load_book_data_by_id(stock_id,datapath,dataset)
except:
continue
elif machine == 'kaggle':
try:
book_stock = load_book_data_by_id_kaggle(stock_id,dataset)
except:
continue
# Useful
all_time_ids_byStock = book_stock['time_id'].unique()
# Calculate wap for the entire book
book_stock['wap'] = calc_wap(book_stock)
book_stock['wap2'] = calc_wap2(book_stock)
book_stock['wap3'] = calc_wap3(book_stock)
book_stock['wap4'] = calc_wap2(book_stock)
book_stock['mid_price'] = calc_wap3(book_stock)
# Calculate past realized volatility per time_id
df_sub = book_stock.groupby('time_id')['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2 = book_stock.groupby('time_id')['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3 = book_stock.groupby('time_id')['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub4 = book_stock.groupby('time_id')['wap4'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub5 = book_stock.groupby('time_id')['mid_price'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub['time_id']]
df_sub = df_sub.rename(columns={'time_id':'row_id'})
df_sub = pd.concat([df_sub,df_sub2['wap2'],df_sub3['wap3'], df_sub4['wap4'], df_sub5['mid_price']],axis=1)
df_sub = df_sub.rename(columns={'wap': 'rv', 'wap2': 'rv2', 'wap3': 'rv3', 'wap4':'rv4','mid_price':'rv5'})
list_rv.append(df_sub)
# Query segments
bucketQuery480 = book_stock.query(f'seconds_in_bucket >= 480')
isEmpty480 = bucketQuery480.empty
bucketQuery300 = book_stock.query(f'seconds_in_bucket >= 300')
isEmpty300 = bucketQuery300.empty
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
# Calculate past realized volatility per time_id and query subset
if isEmpty300 == False:
df_sub_300 = bucketQuery300.groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_300 = bucketQuery300.groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_300 = bucketQuery300.groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub4_300 = bucketQuery300.groupby(['time_id'])['wap4'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub5_300 = bucketQuery300.groupby(['time_id'])['mid_price'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_300 = pd.concat([times_pd,df_sub_300['wap'],df_sub2_300['wap2'],df_sub3_300['wap3'],df_sub4_300['wap4'],df_sub5_300['mid_price']],axis=1)
df_sub_300 = df_sub_300.rename(columns={'wap': 'rv_300', 'wap2_300': 'rv2', 'wap3_300': 'rv3', 'wap4':'rv4_300','mid_price':'rv5_300'})
else: # 0 volatility
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_300'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_300'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_300'])
zero_rv4 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv4_300'])
zero_rv5 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv5_300'])
df_sub_300 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3,zero_rv4,zero_rv5],axis=1)
list_rv2.append(df_sub_300)
# Calculate realized volatility last 2 min
if isEmpty480 == False:
df_sub_480 = bucketQuery480.groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_480 = bucketQuery480.groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_480 = bucketQuery480.groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub4_480 = bucketQuery480.groupby(['time_id'])['wap4'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub5_480 = bucketQuery480.groupby(['time_id'])['mid_price'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_480 = pd.concat([times_pd,df_sub_480['wap'],df_sub2_480['wap2'],df_sub3_480['wap3'],df_sub4_480['wap4'],df_sub5_480['mid_price']],axis=1)
df_sub_480 = df_sub_480.rename(columns={'wap': 'rv_480', 'wap2_480': 'rv2', 'wap3_480': 'rv3', 'wap4':'rv4_480','mid_price':'rv5_480'})
else: # 0 volatility
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_480'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_480'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_480'])
zero_rv4 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv4_480'])
zero_rv5 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv5_480'])
df_sub_480 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3,zero_rv4,zero_rv5],axis=1)
list_rv3.append(df_sub_480)
# Calculate other financial metrics from book
df_sub_book_feats = book_stock.groupby(['time_id']).apply(financial_metrics).to_frame().reset_index()
df_sub_book_feats = df_sub_book_feats.rename(columns={0:'embedding'})
df_sub_book_feats[['wap_imbalance','price_spread','bid_spread','ask_spread','total_vol','vol_imbalance']] = pd.DataFrame(df_sub_book_feats.embedding.tolist(), index=df_sub_book_feats.index)
df_sub_book_feats['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats['time_id']]
df_sub_book_feats = df_sub_book_feats.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
list_fin.append(df_sub_book_feats)
if isEmpty300 == False:
df_sub_book_feats_300 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id']).apply(financial_metrics).to_frame().reset_index()
df_sub_book_feats_300 = df_sub_book_feats_300.rename(columns={0:'embedding'})
df_sub_book_feats_300[['wap_imbalance5','price_spread5','bid_spread5','ask_spread5','total_vol5','vol_imbalance5']] = pd.DataFrame(df_sub_book_feats_300.embedding.tolist(), index=df_sub_book_feats_300.index)
df_sub_book_feats_300['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats_300['time_id']]
df_sub_book_feats_300 = df_sub_book_feats_300.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = | pd.DataFrame([0],columns=['wap_imbalance5']) | pandas.DataFrame |
import Functions
import pandas as pd
from datetime import datetime
from datetime import timedelta
import matplotlib.pyplot as plt
coin_list_NA = ['BTC', 'BCHNA', 'CardonaNA', 'dogecoinNA', 'EOS_RNA', 'ETHNA', 'LTCNA', 'XRP_RNA', 'MoneroNA',
'BNB_RNA',
'IOTANA', 'TEZOSNA', ]
coin_list = ['BTC', 'BCH', 'Cardona', 'dogecoin', 'EOS', 'ETH', 'LTC', 'XRP', 'Monero', 'BNB', 'IOTA', 'TEZOS', ]
dfAllCoins = pd.DataFrame()
dfWMR = pd.read_csv('Data/' + coin_list[0] + '_marketdata.csv', sep=';', thousands=',', decimal='.')
dfWMR['Date'] = pd.to_datetime(dfWMR['Date'], format='%b %d, %Y')
dfWMR['Date'] = pd.DatetimeIndex(dfWMR['Date'])
dfWMR.index = dfWMR['Date']
dfWMR = dfWMR.sort_index()
logic = {'Open*': 'first',
'High': 'max',
'Low': 'min',
'Close**': 'last',
'Volume': 'sum',
'Market Cap': 'last'
}
offset = pd.offsets.timedelta(days=-6)
dfWMR = dfWMR.resample('W', loffset=offset).apply(logic)
for column in dfWMR.columns:
dfWMR = dfWMR.drop(columns=column)
dfReturns = dfWMR
dfMarketCap = dfWMR
dfPositive = dfWMR
dfNeutral = dfWMR
dfNegative = dfWMR
dfMOM3 = dfWMR
dfMOM5 = dfWMR
dfMOM7 = dfWMR
dfMOM14 = dfWMR
for i in range(0, len(coin_list)):
dfMarket = pd.read_csv('Data/' + coin_list[i] + '_marketdata.csv', sep=';', thousands=',', decimal='.')
dfMarket['Date'] = pd.to_datetime(dfMarket['Date'], format='%b %d, %Y')
dfMarket['Date'] = pd.DatetimeIndex(dfMarket['Date'])
dfMarket.index = dfMarket['Date']
dfMarket = dfMarket.sort_index()
logic = {'Open*': 'first',
'High': 'max',
'Low': 'min',
'Close**': 'last',
'Volume': 'sum',
'Market Cap': 'last'
}
offset = pd.offsets.timedelta(days=-6)
dfMarket = dfMarket.resample('W', loffset=offset).apply(logic)
dfMarket['Return'] = dfMarket['Close**'].pct_change()
dfMarket['Mom3'] = dfMarket.Return.rolling(3).sum()
dfMarket['Mom5'] = dfMarket.Return.rolling(5).sum()
dfMarket['Mom7'] = dfMarket.Return.rolling(7).sum()
dfMarket['Mom14'] = dfMarket.Return.rolling(14).sum()
dfTemp = pd.DataFrame()
dfTemp[coin_list[i]] = dfMarket['Return']
dfReturns = dfReturns.merge(dfTemp, how='left', left_index=True, right_index=True)
dfTemp = pd.DataFrame()
dfTemp[coin_list[i]] = dfMarket['Mom3']
dfMOM3 = dfMOM3.merge(dfTemp, how='left', left_index=True, right_index=True)
dfTemp = pd.DataFrame()
dfTemp[coin_list[i]] = dfMarket['Mom5']
dfMOM5 = dfMOM5.merge(dfTemp, how='left', left_index=True, right_index=True)
dfTemp = pd.DataFrame()
dfTemp[coin_list[i]] = dfMarket['Mom7']
dfMOM7 = dfMOM7.merge(dfTemp, how='left', left_index=True, right_index=True)
dfTemp = pd.DataFrame()
dfTemp[coin_list[i]] = dfMarket['Mom14']
dfMOM14 = dfMOM14.merge(dfTemp, how='left', left_index=True, right_index=True)
dfTemp = pd.DataFrame()
dfTemp[coin_list[i]] = dfMarket['Market Cap']
dfMarketCap = dfMarketCap.merge(dfTemp, how='left', left_index=True, right_index=True)
dfSentiment = pd.read_csv('Data/' + coin_list_NA[i] + '_Actual_Sentiment.csv', index_col=0, sep=',')
if coin_list[i] == 'BTC':
# dfSentiment = pd.read_csv('Data/' + coin_list_NA[i] + '_Actual_Sentiment.csv', index_col=0, sep=';')
dfSentiment = pd.read_csv('Data/All_Merged.csv', index_col=0, sep=',')
dfSentiment = dfSentiment[['positive_comment', 'neutral_comment', 'negative_comment']]
dfSentiment['Date'] = dfSentiment.index
dfSentiment['Date'] = pd.to_datetime(dfSentiment['Date'])
dfSentiment.index = pd.DatetimeIndex(dfSentiment['Date'])
logic = {'positive_comment': 'sum',
'neutral_comment': 'sum',
'negative_comment': 'sum'
}
offset = pd.offsets.timedelta(days=-6)
dfSentiment = dfSentiment.resample('W', loffset=offset).apply(logic)
dfTemp = pd.DataFrame()
dfTemp[coin_list[i]] = dfSentiment['positive_comment']
dfPositive = dfPositive.merge(dfTemp, how='left', left_index=True, right_index=True)
dfTemp = pd.DataFrame()
dfTemp[coin_list[i]] = dfSentiment['negative_comment']
dfNegative = dfNegative.merge(dfTemp, how='left', left_index=True, right_index=True)
dfTemp = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
from numpy import nan
import pandas as pd
import pytest
from pandas.util.testing import assert_frame_equal
from numpy.testing import assert_allclose
from pvlib.location import Location
from pvlib import tracking
SINGLEAXIS_COL_ORDER = ['tracker_theta', 'aoi',
'surface_azimuth', 'surface_tilt']
def test_solar_noon():
index = pd.date_range(start='20180701T1200', freq='1s', periods=1)
apparent_zenith = pd.Series([10], index=index)
apparent_azimuth = pd.Series([180], index=index)
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'tracker_theta': 0, 'aoi': 10,
'surface_azimuth': 90, 'surface_tilt': 0},
index=index, dtype=np.float64)
expect = expect[SINGLEAXIS_COL_ORDER]
assert_frame_equal(expect, tracker_data)
def test_scalars():
apparent_zenith = 10
apparent_azimuth = 180
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
assert isinstance(tracker_data, dict)
expect = {'tracker_theta': 0, 'aoi': 10, 'surface_azimuth': 90,
'surface_tilt': 0}
for k, v in expect.items():
assert_allclose(tracker_data[k], v)
def test_arrays():
apparent_zenith = np.array([10])
apparent_azimuth = np.array([180])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
assert isinstance(tracker_data, dict)
expect = {'tracker_theta': 0, 'aoi': 10, 'surface_azimuth': 90,
'surface_tilt': 0}
for k, v in expect.items():
assert_allclose(tracker_data[k], v)
def test_nans():
apparent_zenith = np.array([10, np.nan, 10])
apparent_azimuth = np.array([180, 180, np.nan])
with np.errstate(invalid='ignore'):
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = {'tracker_theta': np.array([0, nan, nan]),
'aoi': np.array([10, nan, nan]),
'surface_azimuth': np.array([90, nan, nan]),
'surface_tilt': np.array([0, nan, nan])}
for k, v in expect.items():
assert_allclose(tracker_data[k], v)
# repeat with Series because nans can differ
apparent_zenith = pd.Series(apparent_zenith)
apparent_azimuth = pd.Series(apparent_azimuth)
with np.errstate(invalid='ignore'):
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame(np.array(
[[ 0., 10., 90., 0.],
[nan, nan, nan, nan],
[nan, nan, nan, nan]]),
columns=['tracker_theta', 'aoi', 'surface_azimuth', 'surface_tilt'])
assert_frame_equal(tracker_data, expect)
def test_arrays_multi():
apparent_zenith = np.array([[10, 10], [10, 10]])
apparent_azimuth = np.array([[180, 180], [180, 180]])
# singleaxis should fail for num dim > 1
with pytest.raises(ValueError):
tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
# uncomment if we ever get singleaxis to support num dim > 1 arrays
# assert isinstance(tracker_data, dict)
# expect = {'tracker_theta': np.full_like(apparent_zenith, 0),
# 'aoi': np.full_like(apparent_zenith, 10),
# 'surface_azimuth': np.full_like(apparent_zenith, 90),
# 'surface_tilt': np.full_like(apparent_zenith, 0)}
# for k, v in expect.items():
# assert_allclose(tracker_data[k], v)
def test_azimuth_north_south():
apparent_zenith = pd.Series([60])
apparent_azimuth = pd.Series([90])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=180,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'tracker_theta': -60, 'aoi': 0,
'surface_azimuth': 90, 'surface_tilt': 60},
index=[0], dtype=np.float64)
expect = expect[SINGLEAXIS_COL_ORDER]
assert_frame_equal(expect, tracker_data)
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect['tracker_theta'] *= -1
assert_frame_equal(expect, tracker_data)
def test_max_angle():
apparent_zenith = pd.Series([60])
apparent_azimuth = pd.Series([90])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=45, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 15, 'surface_azimuth': 90,
'surface_tilt': 45, 'tracker_theta': 45},
index=[0], dtype=np.float64)
expect = expect[SINGLEAXIS_COL_ORDER]
assert_frame_equal(expect, tracker_data)
def test_backtrack():
apparent_zenith = pd.Series([80])
apparent_azimuth = pd.Series([90])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=90, backtrack=False,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 0, 'surface_azimuth': 90,
'surface_tilt': 80, 'tracker_theta': 80},
index=[0], dtype=np.float64)
expect = expect[SINGLEAXIS_COL_ORDER]
assert_frame_equal(expect, tracker_data)
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 52.5716, 'surface_azimuth': 90,
'surface_tilt': 27.42833, 'tracker_theta': 27.4283},
index=[0], dtype=np.float64)
expect = expect[SINGLEAXIS_COL_ORDER]
assert_frame_equal(expect, tracker_data)
def test_axis_tilt():
apparent_zenith = pd.Series([30])
apparent_azimuth = | pd.Series([135]) | pandas.Series |
import pandas as pd
from jinja2 import Template
from code.config import (CSV_FOLDER, COLUMNS_IN_CHECK_RESULT, TABLE_CSV_FILE,
FIELD_CSV_FILE, CONCEPT_CSV_FILE, MAPPING_CSV_FILE, CHECK_LIST_CSV_FILE)
from collections import defaultdict
from IPython.display import display, HTML
def load_check_description(rule_code=None):
"""Extract the csv file containing the descriptions of checks
Parameters
----------
rule_code: str or list
contains all the codes to be checked
Returns
-------
pd.DataFrame
"""
check_df = pd.read_csv(CSV_FOLDER/CHECK_LIST_CSV_FILE, dtype='object')
if rule_code:
valid_rule_code = extract_valid_codes_to_run(check_df, rule_code)
if valid_rule_code:
make_header(f"Running the following checks {str(valid_rule_code)}")
check_df = filter_data_by_rule(check_df, valid_rule_code)
else:
make_header("Code(s) invalid so running all checks...")
return check_df
def make_header(message):
print("#####################################################")
print(message)
print("#####################################################\n")
return True
def is_rule_valid(check_df, code):
return code in check_df['rule'].values
def extract_valid_codes_to_run(check_df, rule_code):
# valid_rule_code = []
if not isinstance(rule_code, list):
rule_code = [rule_code]
return [code for code in rule_code if is_rule_valid(check_df, code)]
def filter_data_by_rule(check_df, rule_code):
"""Filter specific check rules by using code
Parameters
----------
check_df: pd.DataFrame
contains all the checks
rule_code: str or list
contains the codes to be checked
Returns
-------
pd.DataFrame
"""
if not isinstance(rule_code, list):
rule_code = [rule_code]
return check_df[check_df['rule'].isin(rule_code)]
def load_tables_for_check():
"""Load all the csv files for check
Returns
-------
dict
"""
check_dict = defaultdict()
list_of_files = [TABLE_CSV_FILE, FIELD_CSV_FILE, CONCEPT_CSV_FILE, MAPPING_CSV_FILE]
list_of_levels = ['Table', 'Field', 'Concept', 'Mapping']
for level, filename in zip(list_of_levels, list_of_files):
check_dict[level]= pd.read_csv(CSV_FOLDER/filename, dtype='object')
return check_dict
def form_field_param_from_row(row, field):
return row[field] if field in row and row[field] != None else ''
def get_list_of_common_columns_for_merge(check_df, results_df):
"""Extract common columns from two dataframes
Parameters
----------
check_df : pd.DataFrame
results_df: pd.DataFrame
Returns
-------
list
"""
return [col for col in check_df if col in results_df]
def format_cols_to_string(df):
"""Format all columns (except for some) to string
Parameters
----------
df: pd.DataFrame
Returns
-------
pd.DataFrame
"""
df = df.copy()
for col in df:
if col == 'n_row_violation':
df[col] = df[col].astype(int)
continue
if df[col].dtype == 'float':
df[col] = df[col].astype( | pd.Int64Dtype() | pandas.Int64Dtype |
from rdkit import Chem
from smdt.descriptors import AtomProperty
import numpy
import pandas as pd
def _CalculateGearyAutocorrelation(mol, lag=1, propertylabel='m'):
"""
**Internal used only**
Calculation of Geary autocorrelation descriptors based on
different property weights.
"""
Natom = mol.GetNumAtoms()
prolist = []
for i in mol.GetAtoms():
temp = AtomProperty.GetRelativeAtomicProperty(i.GetSymbol(), propertyname=propertylabel)
prolist.append(temp)
aveweight = sum(prolist) / Natom
tempp = [numpy.square(x - aveweight) for x in prolist]
GetDistanceMatrix = Chem.GetDistanceMatrix(mol)
res = 0.0
index = 0
for i in range(Natom):
for j in range(Natom):
if GetDistanceMatrix[i, j] == lag:
atom1 = mol.GetAtomWithIdx(i)
atom2 = mol.GetAtomWithIdx(j)
temp1 = AtomProperty.GetRelativeAtomicProperty(element=atom1.GetSymbol(), propertyname=propertylabel)
temp2 = AtomProperty.GetRelativeAtomicProperty(element=atom2.GetSymbol(), propertyname=propertylabel)
res = res + numpy.square(temp1 - temp2)
index = index + 1
else:
res = res + 0.0
if sum(tempp) == 0 or index == 0:
result = 0
else:
result = (res / index / 2) / (sum(tempp) / (Natom - 1))
return round(result, 3)
def CalculateGearyAutoMass(mol):
"""
Calculation of Geary autocorrelation descriptors based on
carbon-scaled atomic mass.
"""
res = {}
for i in range(8):
res['GATSm' + str(i + 1)] = _CalculateGearyAutocorrelation(mol, lag=i + 1, propertylabel='m')
return res
def CalculateGearyAutoVolume(mol):
"""
Calculation of Geary autocorrelation descriptors based on
carbon-scaled atomic van <NAME>als volume.
"""
res = {}
for i in range(8):
res['GATSv' + str(i + 1)] = _CalculateGearyAutocorrelation(mol, lag=i + 1, propertylabel='V')
return res
def CalculateGearyAutoElectronegativity(mol):
"""
Calculation of Geary autocorrelation descriptors based on
carbon-scaled atomic Sanderson electronegativity.
"""
res = {}
for i in range(8):
res['GATSe' + str(i + 1)] = _CalculateGearyAutocorrelation(mol, lag=i + 1, propertylabel='En')
return res
def CalculateGearyAutoPolarizability(mol):
"""
Calculation of Geary autocorrelation descriptors based on
carbon-scaled atomic polarizability.
"""
res = {}
for i in range(8):
res['GATSp' + str(i + 1)] = _CalculateGearyAutocorrelation(mol, lag=i + 1, propertylabel='alapha')
return res
def GetGearyAutoofMol(mol):
"""
Calcualate all Geary autocorrelation descriptors.
"""
res = {}
res.update(CalculateGearyAutoMass(mol))
res.update(CalculateGearyAutoVolume(mol))
res.update(CalculateGearyAutoElectronegativity(mol))
res.update(CalculateGearyAutoPolarizability(mol))
return res
def getGearyAuto(df_x):
"""
Calculates all Geary Auto-correlation descriptors for the dataset
Parameters:
df_x: pandas.DataFrame
SMILES DataFrame
Returns:
geary_descriptors: pandas.DataFrame
Geary Auto-correlation Descriptors DataFrame
"""
labels = []
for i in range(8):
labels.append('GATSm' + str(i + 1))
labels.append('GATSv' + str(i + 1))
labels.append('GATSe' + str(i + 1))
labels.append('GATSp' + str(i + 1))
r = {}
for key in labels:
r[key] = []
for m in df_x['SMILES']:
mol = Chem.MolFromSmiles(m)
res = GetGearyAutoofMol(mol)
for key in labels:
r[key].append(res[key])
geary_descriptors = | pd.DataFrame(r) | pandas.DataFrame |
from typing import Dict, Any, Optional
import os
import json
CONFIG_LOCATION = os.path.abspath(os.path.join(__file__, os.pardir, os.pardir, "data", "path_config.json"))
with open(CONFIG_LOCATION) as _json_file:
CONFIG = json.load(_json_file)
os.environ["OMP_NUM_THREADS"] = "8"
import copy
import numpy as np
import pandas as pd
import argparse
import logging
import pickle
logger = logging.getLogger("s2and")
from tqdm import tqdm
os.environ["S2AND_CACHE"] = os.path.join(CONFIG["main_data_dir"], ".feature_cache")
from sklearn.cluster import DBSCAN
from s2and.data import ANDData
from s2and.featurizer import featurize, FeaturizationInfo
from s2and.model import PairwiseModeler, Clusterer, FastCluster
from s2and.eval import pairwise_eval, cluster_eval
from s2and.consts import FEATURIZER_VERSION, DEFAULT_CHUNK_SIZE, NAME_COUNTS_PATH
from s2and.file_cache import cached_path
from hyperopt import hp
search_space = {
"eps": hp.uniform("choice", 0, 1),
}
pd.set_option("display.max_rows", None)
pd.set_option("display.max_columns", None)
pd.set_option("display.width", None)
| pd.set_option("display.max_colwidth", None) | pandas.set_option |
import os
from typing import Iterator, List
from struct import unpack as struct_unpack
import pandas as pd
import numpy as np
from network_flow_generator.utils.file_utils import ensure_file
from network_flow_generator.log import Logger
log = Logger.get()
class Preprocessor:
def __init__(self, df: Iterator[pd.DataFrame]):
self._df = df
def save(self, fpath: str, force=False) -> None:
"""Saves the processed dataframe to a csv file.
Args:
fpath (str): The file path
force (bool, optional): Overwrite an existing file. Defaults to False.
Raises:
OSError: If the given path exists and is not a file.
FileExistsError: If file already exists and force is set to false.
"""
ensure_file(fpath, force)
first_chunk = True
for df in self._df:
df = self._process(df)
df.to_csv(fpath, mode="a", compression="infer", header=first_chunk, index=False)
first_chunk = False
def get(self) -> Iterator[pd.DataFrame]:
"""Gets the processed dataframe.
Returns:
Iterator[pd.DataFrame]: Generator of processsed dataframes
"""
for df in self._df:
df = self._process(df)
yield df
def _process(self, df: pd.DataFrame):
"""Processes a single pandas dataframe.
Args:
df (pd.DataFrame): The dataframe to process
"""
raise NotImplementedError()
class CiddsBinaryPreprocessor(Preprocessor):
def _unpack_bits(self, number: int, bits: int) -> List[np.uint8]:
"""Converts a decimal number to a binary number.
Args:
number (int): A decimal number to convert
bits (int): Number of bits the resulting binary number shall have.
Returns:
List[np.uint8]: Binary representation of the decimal number as a list of bits.
"""
dtype = {
8: ">i1",
16: ">i2",
32: ">i4",
}.get(bits, ">i4")
return np.unpackbits(np.array([number], dtype=dtype).view(np.uint8))
def _process(self, df: pd.DataFrame):
"""Processes a single pandas dataframe in cidds format to be used with tensorflow.
Usefull information: https://www.tensorflow.org/tutorials/structured_data/feature_columns
Args:
df (pd.DataFrame): The dataframe to process
"""
log.debug("Processing dataframe")
# date_first_seen
indicies = [
"isMonday",
"isTuesday",
"isWednesday",
"isThursday",
"isFriday",
"isSaturday",
"isSunday",
]
dayofweek = df["date_first_seen"].dt.dayofweek
weekdays = []
for i, weekday in enumerate(indicies):
weekdays.append((dayofweek == i).map(int).rename(weekday))
daytime = (df["date_first_seen"] -
df["date_first_seen"].astype("datetime64[D]")).apply(lambda v: v.seconds / 86400)
# duration
# normalize over chunk
min_duration = df["duration"].min()
max_duration = df["duration"].max()
norm_duration = ((df["duration"] - min_duration) / (max_duration - min_duration)).rename("norm_dur")
# proto
proto_tcp = (df["proto"] == "TCP").apply(int).rename("isTCP")
proto_udp = (df["proto"] == "UDP").apply(int).rename("isUDP")
proto_icmp = (df["proto"] == "ICMP").apply(int).rename("isICMP")
# src_ip_addr
indicies = ["src_ip_" + str(i) for i in range(32)]
src_ip_addr = df["src_ip_addr"].apply(lambda v: pd.Series(self._unpack_bits(int(v), 32), index=indicies))
# src_pt
indicies = ["src_pt_" + str(i) for i in range(16)]
src_pt = df["src_pt"].apply(lambda v: pd.Series(self._unpack_bits(int(v), 16), index=indicies))
# dst_ip_addr
indicies = ["dst_ip_" + str(i) for i in range(32)]
dst_ip_addr = df["dst_ip_addr"].apply(lambda v: pd.Series(self._unpack_bits(int(v), 32), index=indicies))
# dst_pt
indicies = ["dst_pt_" + str(i) for i in range(16)]
dst_pt = df["dst_pt"].apply(lambda v: pd.Series(self._unpack_bits(int(v), 16), index=indicies))
# packets
indicies = ["pck_" + str(i) for i in range(32)]
packets = df["packets"].apply(lambda v: pd.Series(self._unpack_bits(int(v), 32), index=indicies))
# bytes
indicies = ["byt_" + str(i) for i in range(32)]
_bytes = df["bytes"].apply(lambda v: pd.Series(self._unpack_bits(int(v), 32), index=indicies))
# tcp flags
indicies = ["isURG", "isACK", "isPSH", "isRES", "isSYN", "isFIN"]
flags = df["flags"].apply(lambda v: pd.Series(map(int, v), index=indicies))
# create DataFrame
all_series = weekdays + [
daytime, norm_duration, proto_tcp, proto_udp, proto_icmp, src_ip_addr, src_pt, dst_ip_addr, dst_pt, packets,
_bytes, flags
]
processed_df = pd.concat(all_series, axis=1)
return processed_df
class CiddsNumericPreprocessor(Preprocessor):
def _process(self, df: pd.DataFrame):
"""Processes a single pandas dataframe in cidds format to be used with tensorflow.
Usefull information: https://www.tensorflow.org/tutorials/structured_data/feature_columns
Args:
df (pd.DataFrame): The dataframe to process
"""
log.debug("Processing dataframe")
indicies = [
"isMonday",
"isTuesday",
"isWednesday",
"isThursday",
"isFriday",
"isSaturday",
"isSunday",
]
dayofweek = df["date_first_seen"].dt.dayofweek
weekdays = []
for i, weekday in enumerate(indicies):
weekdays.append((dayofweek == i).map(int).rename(weekday))
daytime = (df["date_first_seen"] -
df["date_first_seen"].astype("datetime64[D]")).apply(lambda v: v.seconds / 86400)
# duration
# normalize over chunk
min_duration = df["duration"].min()
max_duration = df["duration"].max()
norm_duration = ((df["duration"] - min_duration) / (max_duration - min_duration)).rename("norm_dur")
# proto
proto_tcp = (df["proto"] == "TCP").apply(int).rename("isTCP")
proto_udp = (df["proto"] == "UDP").apply(int).rename("isUDP")
proto_icmp = (df["proto"] == "ICMP").apply(int).rename("isICMP")
# src_ip_addr
indicies = ["src_ip_" + str(i) for i in range(4)]
src_ip_addr = df["src_ip_addr"].apply(
lambda v: pd.Series([x / 255 for x in struct_unpack('BBBB', v.packed)], index=indicies))
# src_pt
src_pt = df["src_pt"].apply(lambda v: v / 65535).rename("src_pt")
# dst_ip_addr
indicies = ["dst_ip_" + str(i) for i in range(4)]
dst_ip_addr = df["dst_ip_addr"].apply(
lambda v: pd.Series([x / 255 for x in struct_unpack('BBBB', v.packed)], index=indicies))
# dst_pt
dst_pt = df["dst_pt"].apply(lambda v: v / 65535).rename("dst_pt")
# packets
min_packets = df["packets"].min()
max_packets = df["packets"].max()
norm_packets = ((df["packets"] - min_packets) / (max_packets - min_packets)).rename("norm_pck")
# bytes
min_bytes = df["bytes"].min()
max_bytes = df["bytes"].max()
norm_bytes = ((df["bytes"] - min_bytes) / (max_bytes - min_bytes)).rename("norm_byt")
# tcp flags
indicies = ["isURG", "isACK", "isPSH", "isRES", "isSYN", "isFIN"]
flags = df["flags"].apply(lambda v: pd.Series(map(int, v), index=indicies))
# create DataFrame
all_series = weekdays + [
daytime, norm_duration, proto_tcp, proto_udp, proto_icmp, src_ip_addr, src_pt, dst_ip_addr, dst_pt,
norm_packets, norm_bytes, flags
]
processed_df = | pd.concat(all_series, axis=1) | pandas.concat |
"""Set operations
https://github.com/tidyverse/dplyr/blob/master/R/sets.r
"""
import pandas
from pandas import DataFrame
from pipda import register_verb
from pipda.utils import CallingEnvs
from ..core.contexts import Context
from ..core.grouped import DataFrameGroupBy
from ..core.utils import reconstruct_tibble
from ..base.verbs import intersect, union, setdiff, setequal
from .bind import bind_rows
def _check_xy(x: DataFrame, y: DataFrame) -> None:
"""Check the dimension and columns of x and y for set operations"""
if x.shape[1] != y.shape[1]:
raise ValueError(
"not compatible:\n"
f"- different number of columns: {x.shape[1]} vs {y.shape[1]}"
)
in_y_not_x = setdiff(
y.columns, x.columns, __calling_env=CallingEnvs.REGULAR
)
in_x_not_y = setdiff(
x.columns, y.columns, __calling_env=CallingEnvs.REGULAR
)
if in_y_not_x or in_x_not_y:
msg = ["not compatible:"]
if in_y_not_x:
msg.append(f"- Cols in `y` but not `x`: {in_y_not_x}.")
if in_x_not_y:
msg.append(f"- Cols in `x` but not `y`: {in_x_not_y}.")
raise ValueError("\n".join(msg))
@intersect.register(DataFrame, context=Context.EVAL)
def _(x: DataFrame, y: DataFrame) -> DataFrame:
"""Intersect of two dataframes
Args:
_data, data2, *datas: Dataframes to perform operations
on: The columns to the dataframes to perform operations on
Returns:
The dataframe of intersect of input dataframes
"""
_check_xy(x, y)
from .distinct import distinct
return distinct(
| pandas.merge(x, y, how="inner") | pandas.merge |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# QuantStats: Portfolio analytics for quants
# https://github.com/ranaroussi/quantstats
#
# Copyright 2019 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# ˜
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io as _io
import datetime as _dt
import pandas as _pd
import numpy as _np
import yfinance as _yf
from . import stats as _stats
def _mtd(df):
return df[df.index >= _dt.datetime.now(
).strftime('%Y-%m-01')]
def _qtd(df):
date = _dt.datetime.now()
for q in [1, 4, 7, 10]:
if date.month <= q:
return df[df.index >= _dt.datetime(
date.year, q, 1).strftime('%Y-%m-01')]
return df[df.index >= date.strftime('%Y-%m-01')]
def _ytd(df):
return df[df.index >= _dt.datetime.now(
).strftime('%Y-01-01')]
def _pandas_date(df, dates):
if not isinstance(dates, list):
dates = [dates]
return df[df.index.isin(dates)]
def _pandas_current_month(df):
n = _dt.datetime.now()
daterange = _pd.date_range(_dt.date(n.year, n.month, 1), n)
return df[df.index.isin(daterange)]
def multi_shift(df, shift=3):
"""Get last N rows relative to another row in pandas"""
if isinstance(df, _pd.Series):
df = _pd.DataFrame(df)
dfs = [df.shift(i) for i in _np.arange(shift)]
for ix, dfi in enumerate(dfs[1:]):
dfs[ix + 1].columns = [str(col) for col in dfi.columns + str(ix + 1)]
return _pd.concat(dfs, 1, sort=True)
def to_returns(prices, rf=0.):
"""Calculates the simple arithmetic returns of a price series"""
return _prepare_returns(prices, rf)
def to_prices(returns, base=1e5):
"""Converts returns series to price data"""
returns = returns.copy().fillna(0).replace(
[_np.inf, -_np.inf], float('NaN'))
return base + base * _stats.compsum(returns)
def log_returns(returns, rf=0., nperiods=None):
"""Shorthand for to_log_returns"""
return to_log_returns(returns, rf, nperiods)
def to_log_returns(returns, rf=0., nperiods=None):
"""Converts returns series to log returns"""
returns = _prepare_returns(returns, rf, nperiods)
try:
return _np.log(returns+1).replace([_np.inf, -_np.inf], float('NaN'))
except Exception:
return 0.
def exponential_stdev(returns, window=30, is_halflife=False):
"""Returns series representing exponential volatility of returns"""
returns = _prepare_returns(returns)
halflife = window if is_halflife else None
return returns.ewm(com=None, span=window,
halflife=halflife, min_periods=window).std()
def rebase(prices, base=100.):
"""
Rebase all series to a given intial base.
This makes comparing/plotting different series together easier.
Args:
* prices: Expects a price series/dataframe
* base (number): starting value for all series.
"""
return prices.dropna() / prices.dropna().iloc[0] * base
def group_returns(returns, groupby, compounded=False):
"""Summarize returns
group_returns(df, df.index.year)
group_returns(df, [df.index.year, df.index.month])
"""
if compounded:
return returns.groupby(groupby).apply(_stats.comp)
return returns.groupby(groupby).sum()
def aggregate_returns(returns, period=None, compounded=True):
"""Aggregates returns based on date periods"""
if period is None or 'day' in period:
return returns
index = returns.index
if 'month' in period:
return group_returns(returns, index.month, compounded=compounded)
if 'quarter' in period:
return group_returns(returns, index.quarter, compounded=compounded)
if period == "A" or any(x in period for x in ['year', 'eoy', 'yoy']):
return group_returns(returns, index.year, compounded=compounded)
if 'week' in period:
return group_returns(returns, index.week, compounded=compounded)
if 'eow' in period or period == "W":
return group_returns(returns, [index.year, index.week],
compounded=compounded)
if 'eom' in period or period == "M":
return group_returns(returns, [index.year, index.month],
compounded=compounded)
if 'eoq' in period or period == "Q":
return group_returns(returns, [index.year, index.quarter],
compounded=compounded)
if not isinstance(period, str):
return group_returns(returns, period, compounded)
return returns
def to_excess_returns(returns, rf, nperiods=None):
"""
Calculates excess returns by subtracting
risk-free returns from total returns
Args:
* returns (Series, DataFrame): Returns
* rf (float, Series, DataFrame): Risk-Free rate(s)
* nperiods (int): Optional. If provided, will convert rf to different
frequency using deannualize
Returns:
* excess_returns (Series, DataFrame): Returns - rf
"""
if isinstance(rf, int):
rf = float(rf)
if not isinstance(rf, float):
rf = rf[rf.index.isin(returns.index)]
if nperiods is not None:
# deannualize
rf = _np.power(1 + rf, 1. / nperiods) - 1.
return returns - rf
def _prepare_prices(data, base=1.):
"""Converts return data into prices + cleanup"""
data = data.copy()
if isinstance(data, _pd.DataFrame):
for col in data.columns:
if data[col].dropna().min() <= 0 or data[col].dropna().max() < 1:
data[col] = to_prices(data[col], base)
# is it returns?
# elif data.min() < 0 and data.max() < 1:
elif data.min() < 0 or data.max() < 1:
data = to_prices(data, base)
if isinstance(data, (_pd.DataFrame, _pd.Series)):
data = data.fillna(0).replace(
[_np.inf, -_np.inf], float('NaN'))
return data
def _prepare_returns(data, rf=0., nperiods=None):
"""Converts price data into returns + cleanup"""
data = data.copy()
if isinstance(data, _pd.DataFrame):
for col in data.columns:
if data[col].dropna().min() >= 0 and data[col].dropna().max() > 1:
data[col] = data[col].pct_change()
elif data.min() >= 0 and data.max() > 1:
data = data.pct_change()
# cleanup data
data = data.replace([_np.inf, -_np.inf], float('NaN'))
if isinstance(data, (_pd.DataFrame, _pd.Series)):
data = data.fillna(0).replace(
[_np.inf, -_np.inf], float('NaN'))
if rf > 0:
return to_excess_returns(data, rf, nperiods)
return data
def download_returns(ticker, period="max"):
if isinstance(period, _pd.DatetimeIndex):
p = {"start": period[0]}
else:
p = {"period": period}
return _yf.Ticker(ticker).history(**p)['Close'].pct_change()
def _prepare_benchmark(benchmark=None, period="max", rf=0.,
prepare_returns=True):
"""
Fetch benchmark if ticker is provided, and pass through
_prepare_returns()
period can be options or (expected) _pd.DatetimeIndex range
"""
if benchmark is None:
return None
if isinstance(benchmark, str):
benchmark = download_returns(benchmark)
elif isinstance(benchmark, _pd.DataFrame):
benchmark = benchmark[benchmark.columns[0]].copy()
if isinstance(period, _pd.DatetimeIndex) \
and set(period) != set(benchmark.index):
# Adjust Benchmark to Strategy frequency
benchmark_prices = to_prices(benchmark, base=1)
new_index = _pd.date_range(start=period[0], end=period[-1], freq='D')
benchmark = benchmark_prices.reindex(new_index, method='bfill') \
.reindex(period).pct_change().fillna(0)
benchmark = benchmark[benchmark.index.isin(period)]
if prepare_returns:
return _prepare_returns(benchmark.dropna(), rf=rf)
return benchmark.dropna()
def _round_to_closest(val, res, decimals=None):
"""Round to closest resolution"""
if decimals is None and "." in str(res):
decimals = len(str(res).split('.')[1])
return round(round(val / res) * res, decimals)
def _file_stream():
"""Returns a file stream"""
return _io.BytesIO()
def _in_notebook(matplotlib_inline=False):
"""Identify enviroment (notebook, terminal, etc)"""
try:
shell = get_ipython().__class__.__name__
if shell == 'ZMQInteractiveShell':
# Jupyter notebook or qtconsole
if matplotlib_inline:
get_ipython().magic("matplotlib inline")
return True
if shell == 'TerminalInteractiveShell':
# Terminal running IPython
return False
# Other type (?)
return False
except NameError:
# Probably standard Python interpreter
return False
def _count_consecutive(data):
"""Counts consecutive data (like cumsum() with reset on zeroes)"""
def _count(data):
return data * (data.groupby(
(data != data.shift(1)).cumsum()).cumcount() + 1)
if isinstance(data, _pd.DataFrame):
for col in data.columns:
data[col] = _count(data[col])
return data
return _count(data)
def _score_str(val):
"""Returns + sign for positive values (used in plots)"""
return ("" if "-" in val else "+") + str(val)
def make_index(ticker_weights, rebalance="1M", period="max", returns=None, match_dates=False):
"""
Makes an index out of the given tickers and weights.
Optionally you can pass a dataframe with the returns.
If returns is not given it try to download them with yfinance
Args:
* ticker_weights (Dict): A python dict with tickers as keys
and weights as values
* rebalance: Pandas resample interval or None for never
* period: time period of the returns to be downloaded
* returns (Series, DataFrame): Optional. Returns If provided,
it will fist check if returns for the given ticker are in
this dataframe, if not it will try to download them with
yfinance
Returns:
* index_returns (Series, DataFrame): Returns for the index
"""
# Declare a returns variable
index = None
portfolio = {}
# Iterate over weights
for ticker in ticker_weights.keys():
if (returns is None) or (ticker not in returns.columns):
# Download the returns for this ticker, e.g. GOOG
ticker_returns = download_returns(ticker, period)
else:
ticker_returns = returns[ticker]
portfolio[ticker] = ticker_returns
# index members time-series
index = _pd.DataFrame(portfolio).dropna()
if match_dates:
index=index[max(index.ne(0).idxmax()):]
# no rebalance?
if rebalance is None:
for ticker, weight in ticker_weights.items():
index[ticker] = weight * index[ticker]
return index.sum(axis=1)
last_day = index.index[-1]
# rebalance marker
rbdf = index.resample(rebalance).first()
rbdf['break'] = rbdf.index.strftime('%s')
# index returns with rebalance markers
index = _pd.concat([index, rbdf['break']], axis=1)
# mark first day day
index['first_day'] = | _pd.isna(index['break']) | pandas.isna |
# -*- coding: utf-8 -*-
import pandas as pd
import seaborn as sns
sns.set_style('white')
##############################################################################
#################### Creative Achievement Questionnaire ######################
##############################################################################
def run_CAQ(df, out_dir, public):
cols = ["QACa_QACa1","QACa_QACa2","QACa_QACa3","QACa_QACa4","QACa_QACa5","QACa_QACa6","QACa_QACa7",
"QACa_QACa8","QACa_QACa9","QACa_QACa10","QACa_QACa11","QACa_QACa12","QACa_QACa13","QACb_QACb1",
"QACb_QACb2","QACb_QACb3","QACb_QACb4","QACb_QACb5","QACb_QACb6","QACb_QACb7","QACb1","QACc_QACc1",
"QACc_QACc2","QACc_QACc3","QACc_QACc4","QACc_QACc5","QACc_QACc6","QACc_QACc7","QACc_QACc9","QACc1",
"QACd_QACd1","QACd_QACd2","QACd_QACd3","QACd_QACd4","QACd_QACd5","QACd_QACd6","QACd_QACd7",
"QACd_QACd8","QACd1","QACz_QACz1","QACz_QACz2","QACz_QACz3","QACz_QACz4","QACz_QACz5","QACz_QACz6",
"QACz_QACz7","QACz_QACz8","QACz1","QACe_QACe1","QACe_QACe2","QACe_QACe3","QACe_QACe4","QACe_QACe5",
"QACe_QACe6","QACe_QACe7","QACe_QACe8","QACe1","QACf_QACf1","QACf_QACf2","QACf_QACf3","QACf_QACf4",
"QACf_QACf5","QACf_QACf6","QACf_QACf7","QACf_QACf8","QACg_QACg1","QACg_QACg2","QACg_QACg3",
"QACg_QACg4","QACg_QACg5","QACg_QACg6","QACg_QACg7","QACg1","QACh_QACg8","QACh1","QACi_QACi1",
"QACi_QACi2","QACi_QACi3","QACi_QACi4","QACi_QACi5","QACi_QACi6","QACi1","QACj_QACj1","QACj1",
"QACk_QACk1","QACl_QACl1","QACl_QACl2","QACl_QACl3","QACl_QACl4","QACl_QACl5","QACl_QACl6",
"QACl_QACl7","QACl_QACl8","QACl1","QACm_QACm1","QACm_QACm2","QACm_QACm3","QACm_QACm4","QACm_QACm5",
"QACm_QACm6","QACm_QACm7","QACm_QACm8","QACm1"]
cols_export = ['CAQ_%s' % (x+1) for x in range(len(cols))]
df.rename(columns=dict(zip(cols, cols_export)), inplace=True)
# subjects with at least one data entry
df.set_index([range(len(df.index))], inplace=True)
idx = df[cols_export].dropna(how='all').index
df['ids'] = df['ID'].map(lambda x: str(x)[0:5])
if public:
# subjects with MRI data
subjects_mri = pd.read_csv('/nobackup/adenauer2/LSD/Originals/Documentation/subjects_mri',
header=None, dtype=str)[0]
idx_mri = df.index[df.ids.isin(subjects_mri)]
# subjects with both data for questionnaire and MRI
idx = list(set(idx).intersection(idx_mri))
df = df.iloc[idx]
df.set_index([range(len(df.index))], inplace=True)
# anonymize IDs
converter = pd.read_excel('/nobackup/adenauer2/LSD/Originals/Documentation/lookup_table.xlsx',
converters={'ids_probanden_db' : str, 'ids_xnat_publicp' : str})
converter_dict = dict(zip(converter['ids_probanden_db'], converter['ids_xnat_publicp']))
df.replace({'ids': converter_dict}, inplace=True)
df[['ids'] + cols_export].to_csv('%s/CAQ.csv' % out_dir, decimal='.', index=False)
else:
df[['ids'] + cols_export].ix[idx].to_csv('%s/CAQ.csv' % out_dir, decimal='.', index=False)
##############################################################################
#################### Metacognition Questionnaire 30 #########################
##############################################################################
def run_MCQ30(df, out_dir, public):
cols = ['MCQ1_MCQ1', 'MCQ1_MCQ2', 'MCQ1_MCQ3', 'MCQ1_MCQ4', 'MCQ1_MCQ5',
'MCQ1_MCQ6', 'MCQ1_MCQ7', 'MCQ1_MCQ8', 'MCQ1_MCQ9', 'MCQ1_MCQ10',
'MCQ2_MCQ11', 'MCQ2_MCQ12', 'MCQ2_MCQ13', 'MCQ2_MCQ14',
'MCQ2_MCQ15', 'MCQ2_MCQ16', 'MCQ2_MCQ17', 'MCQ2_MCQ18',
'MCQ2_MCQ19', 'MCQ2_MCQ20', 'MCQ3_MCQ21', 'MCQ3_MCQ22',
'MCQ3_MCQ23', 'MCQ3_MCQ24', 'MCQ3_MCQ25', 'MCQ3_MCQ26',
'MCQ3_MCQ27', 'MCQ3_MCQ28', 'MCQ3_MCQ29', 'MCQ3_MCQ30']
cols_export = ['MCQ_%s' % (x+1) for x in range(len(cols))]
df.rename(columns=dict(zip(cols, cols_export)), inplace=True)
# subjects with at least one data entry
df.set_index([range(len(df.index))], inplace=True)
idx = df[cols_export].dropna(how='all').index
df['ids'] = df['ID'].map(lambda x: str(x)[0:5])
if public:
# subjects with MRI data
subjects_mri = pd.read_csv('/nobackup/adenauer2/LSD/Originals/Documentation/subjects_mri',
header=None, dtype=str)[0]
idx_mri = df.index[df.ids.isin(subjects_mri)]
# subjects with both data for questionnaire and MRI
idx = list(set(idx).intersection(idx_mri))
df = df.iloc[idx]
df.set_index([range(len(df.index))], inplace=True)
# anonymize IDs
converter = pd.read_excel('/nobackup/adenauer2/LSD/Originals/Documentation/lookup_table.xlsx',
converters={'ids_probanden_db' : str, 'ids_xnat_publicp' : str})
converter_dict = dict(zip(converter['ids_probanden_db'], converter['ids_xnat_publicp']))
df.replace({'ids': converter_dict}, inplace=True)
df[['ids'] + cols_export].to_csv('%s/MCQ30.csv' % out_dir, decimal='.', index=False)
else:
df[['ids'] + cols_export].ix[idx].to_csv('%s/MCQ30.csv' % out_dir, decimal='.', index=False)
##############################################################################
#################### Body Consciousness Questionnaire ########################
##############################################################################
def run_BCQ(df, out_dir, public):
cols = ['BCQ1_BCQ1', 'BCQ1_BCQ2', 'BCQ1_BCQ3', 'BCQ1_BCQ4',
'BCQ1_BCQ5', 'BCQ1_BCQ6', 'BCQ1_BCQ7', 'BCQ1_BCQ8', 'BCQ1_BCQ9',
'BCQ1_BCQ10', 'BCQ2_BCQ11', 'BCQ2_BCQ12', 'BCQ2_BCQ13',
'BCQ2_BCQ14', 'BCQ2_BCQ15']
cols_export = ['BCQ_%s' % (x+1) for x in range(len(cols))]
df.rename(columns=dict(zip(cols, cols_export)), inplace=True)
# subjects with at least one data entry
df.set_index([range(len(df.index))], inplace=True)
idx = df[cols_export].dropna(how='all').index
df['ids'] = df['ID'].map(lambda x: str(x)[0:5])
if public:
# subjects with MRI data
subjects_mri = pd.read_csv('/nobackup/adenauer2/LSD/Originals/Documentation/subjects_mri',
header=None, dtype=str)[0]
idx_mri = df.index[df.ids.isin(subjects_mri)]
# subjects with both data for questionnaire and MRI
idx = list(set(idx).intersection(idx_mri))
df = df.iloc[idx]
df.set_index([range(len(df.index))], inplace=True)
# anonymize IDs
converter = pd.read_excel('/nobackup/adenauer2/LSD/Originals/Documentation/lookup_table.xlsx',
converters={'ids_probanden_db' : str, 'ids_xnat_publicp' : str})
converter_dict = dict(zip(converter['ids_probanden_db'], converter['ids_xnat_publicp']))
df.replace({'ids': converter_dict}, inplace=True)
df[['ids'] + cols_export].to_csv('%s/BCQ.csv' % out_dir, decimal='.', index=False)
else:
df[['ids'] + cols_export].ix[idx].to_csv('%s/BCQ.csv' % out_dir, decimal='.', index=False)
##############################################################################
################### Five Facet Mindfulness Questionnaire #####################
##############################################################################
def run_FFMQ(df, out_dir, public):
cols = ['FFMQ1_FFMQ1', 'FFMQ1_FFMQ2',
'FFMQ1_FFMQ3', 'FFMQ1_FFMQ4', 'FFMQ1_FFMQ5', 'FFMQ1_FFMQ6',
'FFMQ1_FFMQ7', 'FFMQ1_FFMQ8', 'FFMQ1_FFMQ9', 'FFMQ1_FFMQ10',
'FFMQ2_MMFQ11', 'FFMQ2_MMFQ12', 'FFMQ2_MMFQ13', 'FFMQ2_MMFQ14',
'FFMQ2_MMFQ15', 'FFMQ2_MMFQ16', 'FFMQ2_MMFQ17', 'FFMQ2_MMFQ18',
'FFMQ2_MMFQ19', 'FFMQ2_MMFQ20', 'FFMQ3_FFMQ21', 'FFMQ3_FFMQ22',
'FFMQ3_FFMQ23', 'FFMQ3_FFMQ24', 'FFMQ3_FFMQ25', 'FFMQ3_FFMQ26',
'FFMQ3_FFMQ27', 'FFMQ3_FFMQ28', 'FFMQ3_FFMQ29', 'FFMQ3_FFMQ30',
'FFMQ4_FFMQ31', 'FFMQ4_FFMQ32', 'FFMQ4_FFMQ33', 'FFMQ4_FFMQ34',
'FFMQ4_FFMQ35', 'FFMQ4_FFMQ36', 'FFMQ4_FFMQ37', 'FFMQ4_FFMQ38',
'FFMQ4_FFMQ39']
cols_export = ['FFMQ_%s' % (x+1) for x in range(len(cols))]
df.rename(columns=dict(zip(cols, cols_export)), inplace=True)
# subjects with at least one data entry
df.set_index([range(len(df.index))], inplace=True)
idx = df[cols_export].dropna(how='all').index
df['ids'] = df['ID'].map(lambda x: str(x)[0:5])
if public:
# subjects with MRI data
subjects_mri = pd.read_csv('/nobackup/adenauer2/LSD/Originals/Documentation/subjects_mri',
header=None, dtype=str)[0]
idx_mri = df.index[df.ids.isin(subjects_mri)]
# subjects with both data for questionnaire and MRI
idx = list(set(idx).intersection(idx_mri))
df = df.iloc[idx]
df.set_index([range(len(df.index))], inplace=True)
# anonymize IDs
converter = pd.read_excel('/nobackup/adenauer2/LSD/Originals/Documentation/lookup_table.xlsx',
converters={'ids_probanden_db' : str, 'ids_xnat_publicp' : str})
converter_dict = dict(zip(converter['ids_probanden_db'], converter['ids_xnat_publicp']))
df.replace({'ids': converter_dict}, inplace=True)
df[['ids'] + cols_export].to_csv('%s/FFMQ.csv' % out_dir, decimal='.', index=False)
else:
df[['ids'] + cols_export].ix[idx].to_csv('%s/FFMQ.csv' % out_dir, decimal='.', index=False)
##############################################################################
#################### Abbreviated Math Anxiety Scale ##########################
##############################################################################
def run_AMAS(df, out_dir, public):
cols = ['AMAS[1]',
'AMAS[2]',
'AMAS[3]',
'AMAS[4]',
'AMAS[5]',
'AMAS[6]',
'AMAS[7]',
'AMAS[8]',
'AMAS[9]']
cols_export = ['AMAS_%s' % (x+1) for x in range(len(cols))]
df.rename(columns=dict(zip(cols, cols_export)), inplace=True)
# subjects with at least one data entry
df.set_index([range(len(df.index))], inplace=True)
idx = df[cols_export].dropna(how='all').index
df['ids'] = df['ID'].map(lambda x: str(x)[0:5])
if public:
# subjects with MRI data
subjects_mri = pd.read_csv('/nobackup/adenauer2/LSD/Originals/Documentation/subjects_mri',
header=None, dtype=str)[0]
idx_mri = df.index[df.ids.isin(subjects_mri)]
# subjects with both data for questionnaire and MRI
idx = list(set(idx).intersection(idx_mri))
df = df.iloc[idx]
df.set_index([range(len(df.index))], inplace=True)
# anonymize IDs
converter = pd.read_excel('/nobackup/adenauer2/LSD/Originals/Documentation/lookup_table.xlsx',
converters={'ids_probanden_db' : str, 'ids_xnat_publicp' : str})
converter_dict = dict(zip(converter['ids_probanden_db'], converter['ids_xnat_publicp']))
df.replace({'ids': converter_dict}, inplace=True)
df[['ids'] + cols_export].to_csv('%s/AMAS.csv' % out_dir, decimal='.', index=False)
else:
df[['ids'] + cols_export].ix[idx].to_csv('%s/AMAS.csv' % out_dir, decimal='.', index=False)
##############################################################################
########################## self control scale ################################
##############################################################################
def run_SelfCtrl(df, out_dir, public):
cols = ['SCSaBASEQ[SCS1]',
'SCSaBASEQ[SCS2r]',
'SCSaBASEQ[SCS3r]',
'SCSaBASEQ[SCS4r]',
'SCSaBASEQ[SCS5r]',
'SCSaBASEQ[SCS6r]',
'SCSbBASEQ[SCS7r]',
'SCSbBASEQ[SCS8r]',
'SCSbBASEQ[SCS9r]',
'SCSbBASEQ[SCS10r]',
'SCSbBASEQ[SCS11r]',
'SCSbBASEQ[SCS12]',
'SCSbBASEQ[SCS13]']
cols_export = ['SCS_%s' % (x+1) for x in range(len(cols))]
df.rename(columns=dict(zip(cols, cols_export)), inplace=True)
# subjects with at least one data entry
df.set_index([range(len(df.index))], inplace=True)
idx = df[cols_export].dropna(how='all').index
df['ids'] = df['ID'].map(lambda x: str(x)[0:5])
if public:
# subjects with MRI data
subjects_mri = pd.read_csv('/nobackup/adenauer2/LSD/Originals/Documentation/subjects_mri',
header=None, dtype=str)[0]
idx_mri = df.index[df.ids.isin(subjects_mri)]
# subjects with both data for questionnaire and MRI
idx = list(set(idx).intersection(idx_mri))
df = df.iloc[idx]
df.set_index([range(len(df.index))], inplace=True)
# anonymize IDs
converter = pd.read_excel('/nobackup/adenauer2/LSD/Originals/Documentation/lookup_table.xlsx',
converters={'ids_probanden_db' : str, 'ids_xnat_publicp' : str})
converter_dict = dict(zip(converter['ids_probanden_db'], converter['ids_xnat_publicp']))
df.replace({'ids': converter_dict}, inplace=True)
df[['ids'] + cols_export].to_csv('%s/SCS.csv' % out_dir, decimal='.', index=False)
else:
df[['ids'] + cols_export].ix[idx].to_csv('%s/SCS.csv' % out_dir, decimal='.', index=False)
##############################################################################
################ Internet Addiction test #####################################
##############################################################################
def run_IAT(df, out_dir, public):
cols = ['IATaBASEQ[IAT1]',
'IATaBASEQ[IAT2]',
'IATbBASEQ[IAT3]',
'IATcBASEQ[IAT4]',
'IATcBASEQ[IAT5]',
'IATcBASEQ[IAT6]',
'IATcBASEQ[IAT7]',
'IATcBASEQ[IAT8]',
'IATcBASEQ[IAT9]',
'IATcBASEQ[IAT10]',
'IATcBASEQ[IAT11]',
'IATcBASEQ[IAT12]',
'IATcBASEQ[IAT13]',
'IATcBASEQ[IAT14]',
'IATdBASEQ[IAT15]',
'IATdBASEQ[IAT16]',
'IATdBASEQ[IAT17]',
'IATdBASEQ[IAT18]',
'IATdBASEQ[IAT19]',
'IATdBASEQ[IAT20]']
cols_export = ['IAT_%s' % (x+1) for x in range(len(cols))]
df.rename(columns=dict(zip(cols, cols_export)), inplace=True)
# subjects with at least one data entry
df.set_index([range(len(df.index))], inplace=True)
idx = df[cols_export].dropna(how='all').index
df['ids'] = df['ID'].map(lambda x: str(x)[0:5])
if public:
# subjects with MRI data
subjects_mri = pd.read_csv('/nobackup/adenauer2/LSD/Originals/Documentation/subjects_mri',
header=None, dtype=str)[0]
idx_mri = df.index[df.ids.isin(subjects_mri)]
# subjects with both data for questionnaire and MRI
idx = list(set(idx).intersection(idx_mri))
df = df.iloc[idx]
df.set_index([range(len(df.index))], inplace=True)
# anonymize IDs
converter = pd.read_excel('/nobackup/adenauer2/LSD/Originals/Documentation/lookup_table.xlsx',
converters={'ids_probanden_db' : str, 'ids_xnat_publicp' : str})
converter_dict = dict(zip(converter['ids_probanden_db'], converter['ids_xnat_publicp']))
df.replace({'ids': converter_dict}, inplace=True)
df[['ids'] + cols_export].to_csv('%s/IAT.csv' % out_dir, decimal='.', index=False)
else:
df[['ids'] + cols_export].ix[idx].to_csv('%s/IAT.csv' % out_dir, decimal='.', index=False)
##############################################################################
########################### Arten innerer Sprache ############################
#################### varieties of inner speech (VIS) #########################
##############################################################################
def run_VIS(df, out_dir, public):
cols = ['AISaBASEQ[AIS1]', 'AISaBASEQ[AIS2]', 'AISaBASEQ[AIS3]', 'AISaBASEQ[AIS4]',
'AISaBASEQ[AIS5]', 'AISaBASEQ[AIS6]', 'AISaBASEQ[AIS7]', 'AISaBASEQ[AIS8]',
'AISaBASEQ[AIS9]', 'AISaBASEQ[AIS10]', 'AISbBASEQ[AIS11]', 'AISbBASEQ[AIS12]',
'AISbBASEQ[AIS13]', 'AISbBASEQ[AIS14]', 'AISbBASEQ[AIS15]', 'AISbBASEQ[AIS16]',
'AISbBASEQ[AIS17]', 'AISbBASEQ[AIS18]']
cols_export = ['VIS_%s' % (x+1) for x in range(len(cols))]
df.rename(columns=dict(zip(cols, cols_export)), inplace=True)
# subjects with at least one data entry
df.set_index([range(len(df.index))], inplace=True)
idx = df[cols_export].dropna(how='all').index
df['ids'] = df['ID'].map(lambda x: str(x)[0:5])
if public:
# subjects with MRI data
subjects_mri = pd.read_csv('/nobackup/adenauer2/LSD/Originals/Documentation/subjects_mri',
header=None, dtype=str)[0]
idx_mri = df.index[df.ids.isin(subjects_mri)]
# subjects with both data for questionnaire and MRI
idx = list(set(idx).intersection(idx_mri))
df = df.iloc[idx]
df.set_index([range(len(df.index))], inplace=True)
# anonymize IDs
converter = pd.read_excel('/nobackup/adenauer2/LSD/Originals/Documentation/lookup_table.xlsx',
converters={'ids_probanden_db' : str, 'ids_xnat_publicp' : str})
converter_dict = dict(zip(converter['ids_probanden_db'], converter['ids_xnat_publicp']))
df.replace({'ids': converter_dict}, inplace=True)
df[['ids'] + cols_export].to_csv('%s/VISQ.csv' % out_dir, decimal='.', index=False)
else:
df[['ids'] + cols_export].ix[idx].to_csv('%s/VISQ.csv' % out_dir, decimal='.', index=False)
##############################################################################
############# Spontaneous and Deliberate Mind Wandering ######################
##############################################################################
def run_MW_SD(df, out_dir, public):
cols = ["MWBASEQ[MWD1]", "MWBASEQ[MWD2]", "MWBASEQ[MWD3]", "MWBASEQ[MWD4]",
"MWBASEQ[MWS1]", "MWBASEQ[MWS2]", "MWBASEQ[MWS3]", "MWBASEQ[MWS4]"]
cols_export = ['S-D-MW_%s' % (x+1) for x in range(len(cols))]
df.rename(columns=dict(zip(cols, cols_export)), inplace=True)
# subjects with at least one data entry
df.set_index([range(len(df.index))], inplace=True)
idx = df[cols_export].dropna(how='all').index
df['ids'] = df['ID'].map(lambda x: str(x)[0:5])
if public:
# subjects with MRI data
subjects_mri = pd.read_csv('/nobackup/adenauer2/LSD/Originals/Documentation/subjects_mri',
header=None, dtype=str)[0]
idx_mri = df.index[df.ids.isin(subjects_mri)]
# subjects with both data for questionnaire and MRI
idx = list(set(idx).intersection(idx_mri))
df = df.iloc[idx]
df.set_index([range(len(df.index))], inplace=True)
# anonymize IDs
converter = pd.read_excel('/nobackup/adenauer2/LSD/Originals/Documentation/lookup_table.xlsx',
converters={'ids_probanden_db' : str, 'ids_xnat_publicp' : str})
converter_dict = dict(zip(converter['ids_probanden_db'], converter['ids_xnat_publicp']))
df.replace({'ids': converter_dict}, inplace=True)
df[['ids'] + cols_export].to_csv('%s/S-D-MW.csv' % out_dir, decimal='.', index=False)
else:
df[['ids'] + cols_export].ix[idx].to_csv('%s/S-D-MW.csv' % out_dir, decimal='.', index=False)
##############################################################################
############################# short dark triad ##############################
##############################################################################
def run_SDT(df, out_dir, public):
cols = ['SDTmBASEQ[SDTM1]', 'SDTmBASEQ[SDTM2]', 'SDTmBASEQ[SDTM3]', 'SDTmBASEQ[SDTM4]',
'SDTmBASEQ[SDTM5]', 'SDTmBASEQ[SDTM6]', 'SDTmBASEQ[SDTM7]', 'SDTmBASEQ[SDTM8]',
'SDTmBASEQ[SDTM9]','SDTnBASEQ[SDTN1]', 'SDTnBASEQ[SDTN2r]', 'SDTnBASEQ[SDTN3]',
'SDTnBASEQ[SDTN4]', 'SDTnBASEQ[SDTN5]', 'SDTnBASEQ[SDTN6r]', 'SDTnBASEQ[SDTN7]',
'SDTnBASEQ[SDTN8r]', 'SDTnBASEQ[SDTN9]', 'SDTpBASEQ[SDTP1]', 'SDTpBASEQ[SDTP2r]',
'SDTpBASEQ[SDTP3]', 'SDTpBASEQ[SDTP4]', 'SDTpBASEQ[SDTP5]', 'SDTpBASEQ[SDTP6]',
'SDTpBASEQ[SDTP7r]', 'SDTpBASEQ[SDTP8]', 'SDTpBASEQ[SDTP9]']
cols_export = ['SD3_%s' % (x+1) for x in range(len(cols))]
df.rename(columns=dict(zip(cols, cols_export)), inplace=True)
# subjects with at least one data entry
df.set_index([range(len(df.index))], inplace=True)
idx = df[cols_export].dropna(how='all').index
df['ids'] = df['ID'].map(lambda x: str(x)[0:5])
if public:
# subjects with MRI data
subjects_mri = pd.read_csv('/nobackup/adenauer2/LSD/Originals/Documentation/subjects_mri',
header=None, dtype=str)[0]
idx_mri = df.index[df.ids.isin(subjects_mri)]
# subjects with both data for questionnaire and MRI
idx = list(set(idx).intersection(idx_mri))
df = df.iloc[idx]
df.set_index([range(len(df.index))], inplace=True)
# anonymize IDs
converter = pd.read_excel('/nobackup/adenauer2/LSD/Originals/Documentation/lookup_table.xlsx',
converters={'ids_probanden_db' : str, 'ids_xnat_publicp' : str})
converter_dict = dict(zip(converter['ids_probanden_db'], converter['ids_xnat_publicp']))
df.replace({'ids': converter_dict}, inplace=True)
df[['ids'] + cols_export].to_csv('%s/SD3.csv' % out_dir, decimal='.', index=False)
else:
df[['ids'] + cols_export].ix[idx].to_csv('%s/SD3.csv' % out_dir, decimal='.', index=False)
##############################################################################
################################ SDS #########################################
##############################################################################
# social desirability
def run_SDS(df, out_dir, public):
cols = ['SESaBASEQ[SES1r]',
'SESaBASEQ[SES2]',
'SESaBASEQ[SES3]',
'SESaBASEQ[SES4r]',
'SESaBASEQ[SES5]',
'SESaBASEQ[SES6r]',
'SESaBASEQ[SES7r]',
'SESaBASEQ[SES8]',
'SESaBASEQ[SES9]',
'SESaBASEQ[SES10]',
'SESbBASEQ[SES11r]',
'SESbBASEQ[SES12]',
'SESbBASEQ[SES13]',
'SESbBASEQ[SES14]',
'SESbBASEQ[SES15r]',
'SESbBASEQ[SES16]',
'SESbBASEQ[SES17r]']
cols_export = ['SDS_%s' % (x+1) for x in range(len(cols))]
df.rename(columns=dict(zip(cols, cols_export)), inplace=True)
# subjects with at least one data entry
df.set_index([range(len(df.index))], inplace=True)
idx = df[cols_export].dropna(how='all').index
df['ids'] = df['ID'].map(lambda x: str(x)[0:5])
if public:
# subjects with MRI data
subjects_mri = pd.read_csv('/nobackup/adenauer2/LSD/Originals/Documentation/subjects_mri',
header=None, dtype=str)[0]
idx_mri = df.index[df.ids.isin(subjects_mri)]
# subjects with both data for questionnaire and MRI
idx = list(set(idx).intersection(idx_mri))
df = df.iloc[idx]
df.set_index([range(len(df.index))], inplace=True)
# anonymize IDs
converter = pd.read_excel('/nobackup/adenauer2/LSD/Originals/Documentation/lookup_table.xlsx',
converters={'ids_probanden_db' : str, 'ids_xnat_publicp' : str})
converter_dict = dict(zip(converter['ids_probanden_db'], converter['ids_xnat_publicp']))
df.replace({'ids': converter_dict}, inplace=True)
df[['ids'] + cols_export].to_csv('%s/SDS.csv' % out_dir, decimal='.', index=False)
else:
df[['ids'] + cols_export].ix[idx].to_csv('%s/SDS.csv' % out_dir, decimal='.', index=False)
##############################################################################
##################### UPPSP - impulsivity ####################################
##############################################################################
def run_UPPSP(df, out_dir, public):
cols = ['UPPSaBASEQ[UPP1]', 'UPPSaBASEQ[UPP2r]', 'UPPSaBASEQ[UPP3r]', 'UPPSaBASEQ[UPP4]',
'UPPSaBASEQ[UPP5r]', 'UPPSaBASEQ[UPP6]', 'UPPSaBASEQ[UPP7r]', 'UPPSaBASEQ[UPP8r]',
'UPPSaBASEQ[UPP9r]', 'UPPSaBASEQ[UPP10r]', 'UPPSbBASEQ[UPP11]', 'UPPSbBASEQ[UPP12r]',
'UPPSbBASEQ[UPP13r]', 'UPPSbBASEQ[UPP14]', 'UPPSbBASEQ[UPP15r]', 'UPPSbBASEQ[UPP16]',
'UPPSbBASEQ[UPP17r]', 'UPPSbBASEQ[UPP18r]', 'UPPSbBASEQ[UPP19]', 'UPPSbBASEQ[UPP20r]',
'UPPScBASEQ[UPP21]', 'UPPScBASEQ[UPP22r]', 'UPPScBASEQ[UPP23r]', 'UPPScBASEQ[UPP24]',
'UPPScBASEQ[UPP25r]', 'UPPScBASEQ[UPP26r]', 'UPPScBASEQ[UPP27]', 'UPPScBASEQ[UPP28]',
'UPPScBASEQ[UPP29r]', 'UPPScBASEQ[UPP30r]', 'UPPSdBASEQ[UPP31r]', 'UPPSdBASEQ[UPP32]',
'UPPSdBASEQ[UPP33]', 'UPPSdBASEQ[UPP34r]', 'UPPSdBASEQ[UPP35r]', 'UPPSdBASEQ[UPP36r]',
'UPPSdBASEQ[UPP37]', 'UPPSdBASEQ[UPP38]', 'UPPSdBASEQ[UPP39r]', 'UPPSdBASEQ[UPP40r]',
'UPPSeBASEQ[UPP41r]', 'UPPSeBASEQ[UPP42]', 'UPPSeBASEQ[UPP43]', 'UPPSeBASEQ[UPP44r]',
'UPPSeBASEQ[UPP45r]', 'UPPSeBASEQ[UPP46r]', 'UPPSeBASEQ[UPP47r]', 'UPPSeBASEQ[UPP48]',
'UPPSeBASEQ[UPP49]', 'UPPSeBASEQ[UPP50r]', 'UPPSfBASEQ[UPP51r]', 'UPPSfBASEQ[UPP52r]',
'UPPSfBASEQ[UPP53r]', 'UPPSfBASEQ[UPP54]', 'UPPSfBASEQ[UPP55r]', 'UPPSfBASEQ[UPP56r]',
'UPPSfBASEQ[UPP57r]', 'UPPSfBASEQ[UPP58r]','UPPSfBASEQ[UPP59r]']
cols_export = ['UPPS_%s' % (x+1) for x in range(len(cols))]
df.rename(columns=dict(zip(cols, cols_export)), inplace=True)
# subjects with at least one data entry
df.set_index([range(len(df.index))], inplace=True)
idx = df[cols_export].dropna(how='all').index
df['ids'] = df['ID'].map(lambda x: str(x)[0:5])
if public:
# subjects with MRI data
subjects_mri = pd.read_csv('/nobackup/adenauer2/LSD/Originals/Documentation/subjects_mri',
header=None, dtype=str)[0]
idx_mri = df.index[df.ids.isin(subjects_mri)]
# subjects with both data for questionnaire and MRI
idx = list(set(idx).intersection(idx_mri))
df = df.iloc[idx]
df.set_index([range(len(df.index))], inplace=True)
# anonymize IDs
converter = pd.read_excel('/nobackup/adenauer2/LSD/Originals/Documentation/lookup_table.xlsx',
converters={'ids_probanden_db' : str, 'ids_xnat_publicp' : str})
converter_dict = dict(zip(converter['ids_probanden_db'], converter['ids_xnat_publicp']))
df.replace({'ids': converter_dict}, inplace=True)
df[['ids'] + cols_export].to_csv('%s/UPPS-P.csv' % out_dir, decimal='.', index=False)
else:
df[['ids'] + cols_export].ix[idx].to_csv('%s/UPPS-P.csv' % out_dir, decimal='.', index=False)
##############################################################################
############################## TPS-D #########################################
################ Tuckmann Procrastination Scale (TPS_D)#######################
##############################################################################
def run_TPS(df, out_dir, public):
cols = ['TPSBASEQ[TPS1]',
'TPSBASEQ[TPS2]',
'TPSBASEQ[TPS3]',
'TPSBASEQ[TPS4]',
'TPSBASEQ[TPS5]',
'TPSBASEQ[TPS6]',
'TPSBASEQ[TPS7]',
'TPSBASEQ[TPS8]',
'TPSBASEQ[TPS9]',
'TPSBASEQ[TPS10]',
'TPSBASEQ[TPS11]',
'TPSBASEQ[TPS12]',
'TPSBASEQ[TPS13]',
'TPSBASEQ[TPS14]',
'TPSBASEQ[TPS15]',
'TPSBASEQ[TPS16]']
cols_export = ['TPS_%s' % (x+1) for x in range(len(cols))]
df.rename(columns=dict(zip(cols, cols_export)), inplace=True)
# subjects with at least one data entry
df.set_index([range(len(df.index))], inplace=True)
idx = df[cols_export].dropna(how='all').index
df['ids'] = df['ID'].map(lambda x: str(x)[0:5])
if public:
# subjects with MRI data
subjects_mri = pd.read_csv('/nobackup/adenauer2/LSD/Originals/Documentation/subjects_mri',
header=None, dtype=str)[0]
idx_mri = df.index[df.ids.isin(subjects_mri)]
# subjects with both data for questionnaire and MRI
idx = list(set(idx).intersection(idx_mri))
df = df.iloc[idx]
df.set_index([range(len(df.index))], inplace=True)
# anonymize IDs
converter = pd.read_excel('/nobackup/adenauer2/LSD/Originals/Documentation/lookup_table.xlsx',
converters={'ids_probanden_db' : str, 'ids_xnat_publicp' : str})
converter_dict = dict(zip(converter['ids_probanden_db'], converter['ids_xnat_publicp']))
df.replace({'ids': converter_dict}, inplace=True)
df[['ids'] + cols_export].to_csv('%s/TPS.csv' % out_dir, decimal='.', index=False)
else:
df[['ids'] + cols_export].ix[idx].to_csv('%s/TPS.csv' % out_dir, decimal='.', index=False)
##############################################################################
############################ ASR 18-59 #######################################
##############################################################################
def run_ASR(df, out_dir, public):
d = {'ASQQ79Freitext': 'ASR_79_comment',
'ASR100Freitext': 'ASR_100_comment',
'ASR92Freitext': 'ASR_92_comment',
'ASRIABASEQ[ASRIA]': 'ASR_I_A',
'ASRIBBASEQ[ASRIB]': 'ASR_I_B',
'ASRICBASEQ[ASRIC]': 'ASR_I_C',
'ASRIDBASEQ[ASRID]': 'ASR_I_D',
'ASRII1': 'ASR_II_1',
'ASRII1[comment]': 'ASR_II_1_comment',
'ASRII2': 'ASR_II_2',
'ASRII3BASEQ[ASRIIA]': 'ASR_II_A',
'ASRII3BASEQ[ASRIIBr]': 'ASR_II_B',
'ASRII3BASEQ[ASRIIC]': 'ASR_II_C',
'ASRII3BASEQ[ASRIID]': 'ASR_II_D',
'ASRII3BASEQ[ASRIIEr]': 'ASR_II_E',
'ASRII3BASEQ[ASRIIFr]': 'ASR_II_F',
'ASRII3BASEQ[ASRIIG]': 'ASR_II_G',
'ASRII3BASEQ[ASRIIHr]': 'ASR_II_H',
'ASRIIIABASEQ[ASRIIIA]': 'ASR_III_A',
'ASRIIIBBASEQ[ASRIIIB]': 'ASR_III_B',
'ASRIIICBASEQ[ASRIIIC]': 'ASR_III_C',
'ASRIIIDBASEQ[ASRIIID]': 'ASR_III_D',
'ASRIIIEaBASEQ[ASRIIIE]': 'ASR_III_E',
'ASRIIIEbBASEQ[ASRIIIE1]': 'ASR_III_E_1',
'ASRIIIEbBASEQ[ASRIIIE2]': 'ASR_III_E_2',
'ASRIIIEbBASEQ[ASRIIIE3]': 'ASR_III_E_3',
'ASRIIIEbBASEQ[ASRIIIE4]': 'ASR_III_E_4',
'ASRIIIFBASEQ[ASRIIIF]': 'ASR_III_F',
'ASRIVaBASEQ[ASRIV]': 'ASR_IV_1_comment',
'ASRIVbBASEQ[ASRIVA]': 'ASR_IV_A',
'ASRIVbBASEQ[ASRIVBr]': 'ASR_IV_B',
'ASRIVbBASEQ[ASRIVC]': 'ASR_IV_C',
'ASRIVbBASEQ[ASRIVDr]': 'ASR_IV_D',
'ASRIVbBASEQ[ASRIVE]': 'ASR_IV_E',
'ASRIVbBASEQ[ASRIVFr]': 'ASR_IV_F',
'ASRIVbBASEQ[ASRIVGr]': 'ASR_IV_G',
'ASRIVbBASEQ[ASRIVHr]': 'ASR_IV_H',
'ASRIVbBASEQ[ASRIVIr]': 'ASR_IV_I',
'ASRQ101BASEQ[ASRQ101]': 'ASR_101',
'ASRQ101BASEQ[ASRQ102]': 'ASR_102',
'ASRQ101BASEQ[ASRQ103]': 'ASR_103',
'ASRQ101BASEQ[ASRQ104]': 'ASR_104',
'ASRQ101BASEQ[ASRQ105]': 'ASR_105',
'ASRQ101BASEQ[ASRQ106]': 'ASR_106',
'ASRQ101BASEQ[ASRQ107]': 'ASR_107',
'ASRQ101BASEQ[ASRQ108]': 'ASR_108',
'ASRQ101BASEQ[ASRQ109]': 'ASR_109',
'ASRQ101BASEQ[ASRQ110]': 'ASR_110',
'ASRQ10BASEQ[ASRQ10]': 'ASR_10',
'ASRQ10BASEQ[ASRQ11]': 'ASR_11',
'ASRQ10BASEQ[ASRQ12]': 'ASR_12',
'ASRQ10BASEQ[ASRQ13]': 'ASR_13',
'ASRQ10BASEQ[ASRQ14]': 'ASR_14',
'ASRQ10BASEQ[ASRQ15]': 'ASR_15',
'ASRQ10BASEQ[ASRQ16]': 'ASR_16',
'ASRQ10BASEQ[ASRQ17]': 'ASR_17',
'ASRQ10BASEQ[ASRQ18]': 'ASR_18',
'ASRQ10BASEQ[ASRQ19]': 'ASR_19',
'ASRQ10BASEQ[ASRQ20]': 'ASR_20',
'ASRQ111BASEQ[ASRQ111]': 'ASR_111',
'ASRQ111BASEQ[ASRQ112]': 'ASR_112',
'ASRQ111BASEQ[ASRQ113]': 'ASR_113',
'ASRQ111BASEQ[ASRQ114]': 'ASR_114',
'ASRQ111BASEQ[ASRQ115]': 'ASR_115',
'ASRQ111BASEQ[ASRQ116]': 'ASR_116',
'ASRQ111BASEQ[ASRQ117]': 'ASR_117',
'ASRQ111BASEQ[ASRQ118]': 'ASR_118',
'ASRQ111BASEQ[ASRQ119]': 'ASR_119',
'ASRQ111BASEQ[ASRQ120]': 'ASR_120',
'ASRQ121BASEQ[ASRQ121]': 'ASR_121',
'ASRQ121BASEQ[ASRQ122]': 'ASR_122',
'ASRQ121BASEQ[ASRQ123]': 'ASR_123',
'ASRQ124': 'ASR_124',
'ASRQ125': 'ASR_125',
'ASRQ126': 'ASR_126',
'ASRQ1BASEQ[ASRQ1]': 'ASR_1',
'ASRQ1BASEQ[ASRQ2]': 'ASR_2',
'ASRQ1BASEQ[ASRQ3]': 'ASR_3',
'ASRQ1BASEQ[ASRQ4]': 'ASR_4',
'ASRQ1BASEQ[ASRQ5]': 'ASR_5',
'ASRQ1BASEQ[ASRQ6]': 'ASR_6',
'ASRQ21BASEQ[ASRQ21]': 'ASR_21',
'ASRQ21BASEQ[ASRQ22]': 'ASR_22',
'ASRQ21BASEQ[ASRQ23]': 'ASR_23',
'ASRQ21BASEQ[ASRQ24]': 'ASR_24',
'ASRQ21BASEQ[ASRQ25]': 'ASR_25',
'ASRQ21BASEQ[ASRQ26]': 'ASR_26',
'ASRQ21BASEQ[ASRQ27]': 'ASR_27',
'ASRQ21BASEQ[ASRQ28]': 'ASR_28',
'ASRQ21BASEQ[ASRQ29]': 'ASR_29',
'ASRQ29Freitext': 'ASR_29_comment',
'ASRQ30BASEQ[ASRQ30]': 'ASR_30',
'ASRQ30BASEQ[ASRQ31]': 'ASR_31',
'ASRQ30BASEQ[ASRQ32]': 'ASR_32',
'ASRQ30BASEQ[ASRQ33]': 'ASR_33',
'ASRQ30BASEQ[ASRQ34]': 'ASR_34',
'ASRQ30BASEQ[ASRQ35]': 'ASR_35',
'ASRQ30BASEQ[ASRQ36]': 'ASR_36',
'ASRQ30BASEQ[ASRQ37]': 'ASR_37',
'ASRQ30BASEQ[ASRQ38]': 'ASR_38',
'ASRQ30BASEQ[ASRQ39]': 'ASR_39',
'ASRQ30BASEQ[ASRQ40]': 'ASR_40',
'ASRQ40Freitext': 'ASR_40_comment',
'ASRQ41BASEQ[ASRQ41]': 'ASR_41',
'ASRQ41BASEQ[ASRQ42]': 'ASR_42',
'ASRQ41BASEQ[ASRQ43]': 'ASR_43',
'ASRQ41BASEQ[ASRQ44]': 'ASR_44',
'ASRQ41BASEQ[ASRQ45]': 'ASR_45',
'ASRQ41BASEQ[ASRQ46]': 'ASR_46',
'ASRQ41BASEQ[ASRQ47]': 'ASR_47',
'ASRQ41BASEQ[ASRQ48]': 'ASR_48',
'ASRQ41BASEQ[ASRQ49]': 'ASR_49',
'ASRQ41BASEQ[ASRQ50]': 'ASR_50',
'ASRQ46Freitext': 'ASR_46_comment',
'ASRQ51BASEQ[ASRQ51]': 'ASR_51',
'ASRQ51BASEQ[ASRQ52]': 'ASR_52',
'ASRQ51BASEQ[ASRQ53]': 'ASR_53',
'ASRQ51BASEQ[ASRQ54]': 'ASR_54',
'ASRQ51BASEQ[ASRQ55]': 'ASR_55',
'ASRQ56BASEQ[ASRVIII561]': 'ASR_56_a',
'ASRQ56BASEQ[ASRVIII562]': 'ASR_56_b',
'ASRQ56BASEQ[ASRVIII563]': 'ASR_56_c',
'ASRQ56BASEQ[ASRVIII564]': 'ASR_56_d',
'ASRQ56BASEQ[ASRVIII565]': 'ASR_56_e',
'ASRQ56BASEQ[ASRVIII566]': 'ASR_56_f',
'ASRQ56BASEQ[ASRVIII567]': 'ASR_56_g',
'ASRQ56Freitext': 'ASR_56_d_comment',
'ASRQ57BASEQ[ASRQ57]': 'ASR_57',
'ASRQ57BASEQ[ASRQ58]': 'ASR_58',
'ASRQ57BASEQ[ASRQ59]': 'ASR_59',
'ASRQ57BASEQ[ASRQ60]': 'ASR_60',
'ASRQ58Freitext': 'ASR_58_comment',
'ASRQ61BASEQ[ASRQ61]': 'ASR_61',
'ASRQ61BASEQ[ASRQ62]': 'ASR_62',
'ASRQ61BASEQ[ASRQ63]': 'ASR_63',
'ASRQ61BASEQ[ASRQ64]': 'ASR_64',
'ASRQ61BASEQ[ASRQ65]': 'ASR_65',
'ASRQ61BASEQ[ASRQ66]': 'ASR_66',
'ASRQ61BASEQ[ASRQ67]': 'ASR_67',
'ASRQ61BASEQ[ASRQ68]': 'ASR_68',
'ASRQ61BASEQ[ASRQ69]': 'ASR_69',
'ASRQ61BASEQ[ASRQ70]': 'ASR_70',
'ASRQ66': 'ASR_66_comment',
'ASRQ6Freitext': 'ASR_6_comment',
'ASRQ70': 'ASR_70_comment',
'ASRQ71BASEQ[ASRQ71]': 'ASR_71',
'ASRQ71BASEQ[ASRQ72]': 'ASR_72',
'ASRQ71BASEQ[ASRQ73]': 'ASR_73',
'ASRQ71BASEQ[ASRQ74]': 'ASR_74',
'ASRQ71BASEQ[ASRQ75]': 'ASR_75',
'ASRQ71BASEQ[ASRQ76]': 'ASR_76',
'ASRQ71BASEQ[ASRQ77]': 'ASR_77',
'ASRQ71BASEQ[ASRQ78]': 'ASR_78',
'ASRQ71BASEQ[ASRQ79]': 'ASR_79',
'ASRQ71BASEQ[ASRQ80]': 'ASR_80',
'ASRQ77Freitext': 'ASR_77_comment',
'ASRQ7BASEQ[ASRQ7]': 'ASR_7',
'ASRQ7BASEQ[ASRQ8]': 'ASR_8',
'ASRQ7BASEQ[ASRQ9]': 'ASR_9',
'ASRQ81BASEQ[ASRQ81]': 'ASR_81',
'ASRQ81BASEQ[ASRQ82]': 'ASR_82',
'ASRQ81BASEQ[ASRQ83]': 'ASR_83',
'ASRQ81BASEQ[ASRQ84]': 'ASR_84',
'ASRQ81BASEQ[ASRQ85]': 'ASR_85',
'ASRQ81BASEQ[ASRQ86]': 'ASR_86',
'ASRQ81BASEQ[ASRQ87]': 'ASR_87',
'ASRQ81BASEQ[ASRQ88]': 'ASR_88',
'ASRQ81BASEQ[ASRQ89]': 'ASR_89',
'ASRQ81BASEQ[ASRQ90]': 'ASR_90',
'ASRQ84Freitext': 'ASR_84_comment',
'ASRQ85Freitext': 'ASR_85_comment',
'ASRQ91BASEQ[ASRQ100]': 'ASR_100',
'ASRQ91BASEQ[ASRQ91]': 'ASR_91',
'ASRQ91BASEQ[ASRQ92]': 'ASR_92',
'ASRQ91BASEQ[ASRQ93]': 'ASR_93',
'ASRQ91BASEQ[ASRQ94]': 'ASR_94',
'ASRQ91BASEQ[ASRQ95]': 'ASR_95',
'ASRQ91BASEQ[ASRQ96]': 'ASR_96',
'ASRQ91BASEQ[ASRQ97]': 'ASR_97',
'ASRQ91BASEQ[ASRQ98]': 'ASR_98',
'ASRQ91BASEQ[ASRQ99]': 'ASR_99',
'ASRQ9Freitext': 'ASR_9_comment',
'ASRVI': 'ASR_VI',
'ASRVII': 'ASR_VII',
'ASRVIII': 'ASR_VIII',
'ASRVII[comment]': 'ASR_VII_comment',
'ASRVI[comment]': 'ASR_VI_comment',
'ASRVa': 'ASR_V_1',
'ASRVa[comment]': 'ASR_V_1_comment',
'ASRVbBASEQ[ASRV1]': 'ASR_V_2',
'ASRVbBASEQ[ASRV1comment]': 'ASR_V_2_comment',
'ASRVbBASEQ[ASRV3]': 'ASR_V_3',
'ASRVbBASEQ[ASRV3comment]': 'ASR_V_3_comment',
'ASRVbBASEQ[ASRV4]': 'ASR_V_4',
'ASRVbBASEQ[ASRV4comment]': 'ASR_V_4_comment',
'ASRVcBASEQ[ASRVA]': 'ASR_V_A',
'ASRVcBASEQ[ASRVB]': 'ASR_V_B',
'ASRVcBASEQ[ASRVCr]': 'ASR_V_C',
'ASRVcBASEQ[ASRVD]': 'ASR_V_D',
'ASRVcBASEQ[ASRVEr]': 'ASR_V_E'}
item_order = ['ASR_I_A',
'ASR_I_B',
'ASR_I_C',
'ASR_I_D',
'ASR_II_1',
'ASR_II_1_comment',
'ASR_II_2',
'ASR_II_A',
'ASR_II_B',
'ASR_II_C',
'ASR_II_D',
'ASR_II_E',
'ASR_II_F',
'ASR_II_G',
'ASR_II_H',
'ASR_III_A',
'ASR_III_B',
'ASR_III_C',
'ASR_III_D',
'ASR_III_E',
'ASR_III_E_1',
'ASR_III_E_2',
'ASR_III_E_3',
'ASR_III_E_4',
'ASR_III_F',
'ASR_IV_1_comment',
'ASR_IV_A',
'ASR_IV_B',
'ASR_IV_C',
'ASR_IV_D',
'ASR_IV_E',
'ASR_IV_F',
'ASR_IV_G',
'ASR_IV_H',
'ASR_IV_I',
'ASR_V_1',
'ASR_V_1_comment',
'ASR_V_2',
'ASR_V_2_comment',
'ASR_V_3',
'ASR_V_3_comment',
'ASR_V_4',
'ASR_V_4_comment',
'ASR_V_A',
'ASR_V_B',
'ASR_V_C',
'ASR_V_D',
'ASR_V_E',
'ASR_VI',
'ASR_VI_comment',
'ASR_VII',
'ASR_VII_comment',
'ASR_VIII',
'ASR_1',
'ASR_2',
'ASR_3',
'ASR_4',
'ASR_5',
'ASR_6',
'ASR_6_comment',
'ASR_7',
'ASR_8',
'ASR_9',
'ASR_9_comment',
'ASR_10',
'ASR_11',
'ASR_12',
'ASR_13',
'ASR_14',
'ASR_15',
'ASR_16',
'ASR_17',
'ASR_18',
'ASR_19',
'ASR_20',
'ASR_21',
'ASR_22',
'ASR_23',
'ASR_24',
'ASR_25',
'ASR_26',
'ASR_27',
'ASR_28',
'ASR_29',
'ASR_29_comment',
'ASR_30',
'ASR_31',
'ASR_32',
'ASR_33',
'ASR_34',
'ASR_35',
'ASR_36',
'ASR_37',
'ASR_38',
'ASR_39',
'ASR_40',
'ASR_40_comment',
'ASR_41',
'ASR_42',
'ASR_43',
'ASR_44',
'ASR_45',
'ASR_46',
'ASR_47',
'ASR_48',
'ASR_49',
'ASR_50',
'ASR_46_comment',
'ASR_51',
'ASR_52',
'ASR_53',
'ASR_54',
'ASR_55',
'ASR_56_a',
'ASR_56_b',
'ASR_56_c',
'ASR_56_d',
'ASR_56_e',
'ASR_56_f',
'ASR_56_g',
'ASR_56_d_comment',
'ASR_57',
'ASR_58',
'ASR_59',
'ASR_60',
'ASR_58_comment',
'ASR_61',
'ASR_62',
'ASR_63',
'ASR_64',
'ASR_65',
'ASR_66',
'ASR_67',
'ASR_68',
'ASR_69',
'ASR_70',
'ASR_66_comment',
'ASR_70_comment',
'ASR_71',
'ASR_72',
'ASR_73',
'ASR_74',
'ASR_75',
'ASR_76',
'ASR_77',
'ASR_78',
'ASR_79',
'ASR_80',
'ASR_77_comment',
'ASR_79_comment',
'ASR_81',
'ASR_82',
'ASR_83',
'ASR_84',
'ASR_85',
'ASR_86',
'ASR_87',
'ASR_88',
'ASR_89',
'ASR_90',
'ASR_84_comment',
'ASR_85_comment',
'ASR_91',
'ASR_92',
'ASR_93',
'ASR_94',
'ASR_95',
'ASR_96',
'ASR_97',
'ASR_98',
'ASR_99',
'ASR_100',
'ASR_92_comment',
'ASR_100_comment',
'ASR_101',
'ASR_102',
'ASR_103',
'ASR_104',
'ASR_105',
'ASR_106',
'ASR_107',
'ASR_108',
'ASR_109',
'ASR_110',
'ASR_111',
'ASR_112',
'ASR_113',
'ASR_114',
'ASR_115',
'ASR_116',
'ASR_117',
'ASR_118',
'ASR_119',
'ASR_120',
'ASR_121',
'ASR_122',
'ASR_123',
'ASR_124',
'ASR_125',
'ASR_126']
df['ids'] = df['ID'].map(lambda x: str(x)[0:5])
df.rename(columns=d, inplace=True)
if public:
# excluding pp comments
# ASR_IV_1_comment is coded as binary variable and can stay
remove = ['ASR_II_1_comment', 'ASR_V_1_comment', 'ASR_V_2_comment', 'ASR_V_3_comment',
'ASR_V_4_comment', 'ASR_VI_comment', 'ASR_VII_comment', 'ASR_VIII', 'ASR_6_comment',
'ASR_9_comment', 'ASR_29_comment', 'ASR_40_comment', 'ASR_46_comment', 'ASR_56_d_comment',
'ASR_58_comment', 'ASR_66_comment', 'ASR_70_comment', 'ASR_77_comment', 'ASR_79_comment',
'ASR_84_comment', 'ASR_85_comment', 'ASR_92_comment', 'ASR_100_comment']
cols_export = [item for item in item_order if item not in remove]
# subjects with at least one data entry
df.set_index([range(len(df.index))], inplace=True)
idx = df[cols_export].dropna(how='all').index
# subjects with MRI data
subjects_mri = pd.read_csv('/nobackup/adenauer2/LSD/Originals/Documentation/subjects_mri',
header=None, dtype=str)[0]
idx_mri = df.index[df.ids.isin(subjects_mri)]
# subjects with both data for questionnaire and MRI
idx = list(set(idx).intersection(idx_mri))
df = df.iloc[idx]
df.set_index([range(len(df.index))], inplace=True)
# anonymize IDs
converter = pd.read_excel('/nobackup/adenauer2/LSD/Originals/Documentation/lookup_table.xlsx',
converters={'ids_probanden_db' : str, 'ids_xnat_publicp' : str})
converter_dict = dict(zip(converter['ids_probanden_db'], converter['ids_xnat_publicp']))
df.replace({'ids': converter_dict}, inplace=True)
df[['ids'] + cols_export].to_csv('%s/ASR.csv' % out_dir, decimal='.', index=False)
else:
# including pp comments
cols_export = item_order
# subjects with at least one data entry
df.set_index([range(len(df.index))], inplace=True)
idx = df[cols_export].dropna(how='all').index
df[['ids'] + cols_export].ix[idx].to_csv('%s/ASR.csv' % out_dir, decimal='.', index=False)
##############################################################################
########################## Self-Esteem Scale #################################
##############################################################################
def run_SE(df, out_dir, public):
cols = ['SEBASEQ[SE1]',
'SEBASEQ[SE2]',
'SEBASEQ[SE3]',
'SEBASEQ[SE4]',
'SEBASEQ[SE5r]',
'SEBASEQ[SE6r]',
'SEBASEQ[SE7r]',
'SEBASEQ[SE8r]']
cols_export = ['SE_%s' % (x+1) for x in range(len(cols))]
df.rename(columns=dict(zip(cols, cols_export)), inplace=True)
# subjects with at least one data entry
df.set_index([range(len(df.index))], inplace=True)
idx = df[cols_export].dropna(how='all').index
df['ids'] = df['ID'].map(lambda x: str(x)[0:5])
if public:
# subjects with MRI data
subjects_mri = pd.read_csv('/nobackup/adenauer2/LSD/Originals/Documentation/subjects_mri',
header=None, dtype=str)[0]
idx_mri = df.index[df.ids.isin(subjects_mri)]
# subjects with both data for questionnaire and MRI
idx = list(set(idx).intersection(idx_mri))
df = df.iloc[idx]
df.set_index([range(len(df.index))], inplace=True)
# anonymize IDs
converter = pd.read_excel('/nobackup/adenauer2/LSD/Originals/Documentation/lookup_table.xlsx',
converters={'ids_probanden_db' : str, 'ids_xnat_publicp' : str})
converter_dict = dict(zip(converter['ids_probanden_db'], converter['ids_xnat_publicp']))
df.replace({'ids': converter_dict}, inplace=True)
df[['ids'] + cols_export].to_csv('%s/SE.csv' % out_dir, decimal='.', index=False)
else:
df[['ids'] + cols_export].ix[idx].to_csv('%s/SE.csv' % out_dir, decimal='.', index=False)
##############################################################################
####### Involuntary Musical Imagery Scale (Earworm Scale) ####################
##############################################################################
def run_IMIS(df, out_dir, public):
cols = ['EWSaBASEQ[AQ_1]','EWSbBASEQ[NV1]','EWSbBASEQ[NV2]','EWSbBASEQ[NV3]','EWSbBASEQ[NV4]','EWSbBASEQ[NV5]',
'EWSbBASEQ[NV6]','EWSbBASEQ[NV7]','EWScBASEQ[M1]','EWScBASEQ[M2]','EWScBASEQ[M3]','EWScBASEQ[PR1]',
'EWScBASEQ[PR2]','EWScBASEQ[PR3]','EWScBASEQ[H1]','EWScBASEQ[H2]','EWSdBASEQ[AQ2]','EWSeBASEQ[AQ3]']
cols_export = ['IMIS_%s' % (x+1) for x in range(len(cols))]
df.rename(columns=dict(zip(cols, cols_export)), inplace=True)
# subjects with at least one data entry
df.set_index([range(len(df.index))], inplace=True)
idx = df[cols_export].dropna(how='all').index
df['ids'] = df['ID'].map(lambda x: str(x)[0:5])
if public:
# subjects with MRI data
subjects_mri = pd.read_csv('/nobackup/adenauer2/LSD/Originals/Documentation/subjects_mri',
header=None, dtype=str)[0]
idx_mri = df.index[df.ids.isin(subjects_mri)]
# subjects with both data for questionnaire and MRI
idx = list(set(idx).intersection(idx_mri))
df = df.iloc[idx]
df.set_index([range(len(df.index))], inplace=True)
# anonymize IDs
converter = pd.read_excel('/nobackup/adenauer2/LSD/Originals/Documentation/lookup_table.xlsx',
converters={'ids_probanden_db' : str, 'ids_xnat_publicp' : str})
converter_dict = dict(zip(converter['ids_probanden_db'], converter['ids_xnat_publicp']))
df.replace({'ids': converter_dict}, inplace=True)
df[['ids'] + cols_export].to_csv('%s/IMIS.csv' % out_dir, decimal='.', index=False)
else:
df[['ids'] + cols_export].ix[idx].to_csv('%s/IMIS.csv' % out_dir, decimal='.', index=False)
##############################################################################
####### Goldsmiths Musical Sophistication Index (Gold-MSI) ###################
##############################################################################
def run_GoldMSI(df, out_dir, public):
cols = ['MUSaBASEQ[MUS_1]','MUSaBASEQ[MUS_3]','MUSaBASEQ[MUS_8]','MUSaBASEQ[MUS_15]','MUSaBASEQ[MUS_21]','MUSaBASEQ[MUS_24]',
'MUSaBASEQ[MUS_28]','MUSbBASEQ[MUS_34]','MUScBASEQ[MUS_38]','MUSdBASEQ[MUS_14]','MUSdBASEQ[MUS_27]','MUSeBASEQ[MUS_32]',
'MUSfBASEQ[MUS_33]','MUSgBASEQ[MUS_35]','MUShBASEQ[MUS_36]','MUSiBASEQ[MUS_37]']
cols_export = ['GoldMSI_%s' % (x+1) for x in range(len(cols))]
df.rename(columns=dict(zip(cols, cols_export)), inplace=True)
# subjects with at least one data entry
df.set_index([range(len(df.index))], inplace=True)
idx = df[cols_export].dropna(how='all').index
df['ids'] = df['ID'].map(lambda x: str(x)[0:5])
if public:
# subjects with MRI data
subjects_mri = pd.read_csv('/nobackup/adenauer2/LSD/Originals/Documentation/subjects_mri',
header=None, dtype=str)[0]
idx_mri = df.index[df.ids.isin(subjects_mri)]
# subjects with both data for questionnaire and MRI
idx = list(set(idx).intersection(idx_mri))
df = df.iloc[idx]
df.set_index([range(len(df.index))], inplace=True)
# anonymize IDs
converter = pd.read_excel('/nobackup/adenauer2/LSD/Originals/Documentation/lookup_table.xlsx',
converters={'ids_probanden_db' : str, 'ids_xnat_publicp' : str})
converter_dict = dict(zip(converter['ids_probanden_db'], converter['ids_xnat_publicp']))
df.replace({'ids': converter_dict}, inplace=True)
df[['ids'] + cols_export].to_csv('%s/Gold-MSI.csv' % out_dir, decimal='.', index=False)
else:
df[['ids'] + cols_export].ix[idx].to_csv('%s/Gold-MSI.csv' % out_dir, decimal='.', index=False)
##############################################################################
####################### Epsworth Sleepiness Scale ############################
##############################################################################
def run_ESS(df, out_dir, public):
cols = ['ESSBASEQ[ESS1]', 'ESSBASEQ[ESS2]', 'ESSBASEQ[ESS3]', 'ESSBASEQ[ESS4]',
'ESSBASEQ[ESS5]', 'ESSBASEQ[ESS6]', 'ESSBASEQ[ESS7]', 'ESSBASEQ[ESS8]']
cols_export = ['ESS_%s' % (x+1) for x in range(len(cols))]
df.rename(columns=dict(zip(cols, cols_export)), inplace=True)
# subjects with at least one data entry
df.set_index([range(len(df.index))], inplace=True)
idx = df[cols_export].dropna(how='all').index
df['ids'] = df['ID'].map(lambda x: str(x)[0:5])
if public:
# subjects with MRI data
subjects_mri = pd.read_csv('/nobackup/adenauer2/LSD/Originals/Documentation/subjects_mri',
header=None, dtype=str)[0]
idx_mri = df.index[df.ids.isin(subjects_mri)]
# subjects with both data for questionnaire and MRI
idx = list(set(idx).intersection(idx_mri))
df = df.iloc[idx]
df.set_index([range(len(df.index))], inplace=True)
# anonymize IDs
converter = pd.read_excel('/nobackup/adenauer2/LSD/Originals/Documentation/lookup_table.xlsx',
converters={'ids_probanden_db' : str, 'ids_xnat_publicp' : str})
converter_dict = dict(zip(converter['ids_probanden_db'], converter['ids_xnat_publicp']))
df.replace({'ids': converter_dict}, inplace=True)
df[['ids'] + cols_export].to_csv('%s/ESS.csv' % out_dir, decimal='.', index=False)
else:
df[['ids'] + cols_export].ix[idx].to_csv('%s/ESS.csv' % out_dir, decimal='.', index=False)
##############################################################################
############################## BDI ###########################################
##############################################################################
def run_BDI(df, out_dir, public):
cols_raw = ['BDIABASEQ[BDIA0]', 'BDIABASEQ[BDIA1]', 'BDIABASEQ[BDIA2]', 'BDIABASEQ[BDIA3]',
'BDIBBASEQ[BDIB0]', 'BDIBBASEQ[BDIB1]', 'BDIBBASEQ[BDIB2]',
'BDIBBASEQ[BDIB3]', 'BDICBASEQ[BDIC0]', 'BDICBASEQ[BDIC1]',
'BDICBASEQ[BDIC2]', 'BDICBASEQ[BDIC3]', 'BDIDBASEQ[BDID0]',
'BDIDBASEQ[BDID1]', 'BDIDBASEQ[BDID2]', 'BDIDBASEQ[BDID3]',
'BDIEBASEQ[BDIE0]', 'BDIEBASEQ[BDIE1]', 'BDIEBASEQ[BDIE2]',
'BDIEBASEQ[BDIE3]', 'BDIFBASEQ[BDIF0]', 'BDIFBASEQ[BDIF1]',
'BDIFBASEQ[BDIF2]', 'BDIFBASEQ[BDIF3]', 'BDIGBASEQ[BDIG0]',
'BDIGBASEQ[BDIG1]', 'BDIGBASEQ[BDIG2]', 'BDIGBASEQ[BDIG3]',
'BDIHBASEQ[BDIH0]', 'BDIHBASEQ[BDIH1]', 'BDIHBASEQ[BDIH2]',
'BDIHBASEQ[BDIH3]', 'BDIIBASEQ[BDII0]', 'BDIIBASEQ[BDII1]',
'BDIIBASEQ[BDII2]', 'BDIIBASEQ[BDII3]', 'BDIJBASEQ[BDIJ0]',
'BDIJBASEQ[BDIJ1]', 'BDIJBASEQ[BDIJ2]', 'BDIJBASEQ[BDIJ3]',
'BDIKBASEQ[BDIK0]', 'BDIKBASEQ[BDIK1]', 'BDIKBASEQ[BDIK2]',
'BDIKBASEQ[BDIK3]', 'BDILBASEQ[BDIL0]', 'BDILBASEQ[BDIL1]',
'BDILBASEQ[BDIL2]', 'BDILBASEQ[BDIL3]', 'BDIMBASEQ[BDIM0]',
'BDIMBASEQ[BDIM1]', 'BDIMBASEQ[BDIM2]', 'BDIMBASEQ[BDIM3]',
'BDINBASEQ[BDIN0]', 'BDINBASEQ[BDIN1]', 'BDINBASEQ[BDIN2]',
'BDINBASEQ[BDIN3]', 'BDIOBASEQ[BDIO0]', 'BDIOBASEQ[BDIO1]',
'BDIOBASEQ[BDIO2]', 'BDIOBASEQ[BDIO3]', 'BDIPBASEQ[BDIP0]',
'BDIPBASEQ[BDIP1]', 'BDIPBASEQ[BDIP2]', 'BDIPBASEQ[BDIP3]',
'BDIQBASEQ[BDIQ0]', 'BDIQBASEQ[BDIQ1]', 'BDIQBASEQ[BDIQ2]',
'BDIQBASEQ[BDIQ3]', 'BDIRBASEQ[BDIR0]', 'BDIRBASEQ[BDIR1]',
'BDIRBASEQ[BDIR2]', 'BDIRBASEQ[BDIR3]', 'BDISBASEQ[BDIS0]',
'BDISBASEQ[BDIS1]', 'BDISBASEQ[BDIS2]', 'BDISBASEQ[BDIS3]', 'BDIS4',
'BDITBASEQ[BDIT0]', 'BDITBASEQ[BDIT1]', 'BDITBASEQ[BDIT2]',
'BDITBASEQ[BDIT3]', 'BDIUBASEQ[BDIU0]', 'BDIUBASEQ[BDIU1]',
'BDIUBASEQ[BDIU2]', 'BDIUBASEQ[BDIU3]']
cols_export = ['BDI_%s' % (x+1) for x in range(len(cols_raw))]
df.rename(columns=dict(zip(cols_raw, cols_export)), inplace=True)
# subjects with at least one data entry
df.set_index([range(len(df.index))], inplace=True)
idx = df[cols_export].dropna(how='all').index
df['ids'] = df['ID'].map(lambda x: str(x)[0:5])
if public:
# subjects with MRI data
subjects_mri = pd.read_csv('/nobackup/adenauer2/LSD/Originals/Documentation/subjects_mri',
header=None, dtype=str)[0]
idx_mri = df.index[df.ids.isin(subjects_mri)]
# subjects with both data for questionnaire and MRI
idx = list(set(idx).intersection(idx_mri))
df = df.iloc[idx]
df.set_index([range(len(df.index))], inplace=True)
# anonymize IDs
converter = pd.read_excel('/nobackup/adenauer2/LSD/Originals/Documentation/lookup_table.xlsx',
converters={'ids_probanden_db' : str, 'ids_xnat_publicp' : str})
converter_dict = dict(zip(converter['ids_probanden_db'], converter['ids_xnat_publicp']))
df.replace({'ids': converter_dict}, inplace=True)
df[['ids'] + cols_export].to_csv('%s/BDI.csv' % out_dir, decimal='.', index=False)
else:
df[['ids'] + cols_export].ix[idx].to_csv('%s/BDI.csv' % out_dir, decimal='.', index=False)
##############################################################################
############################## HADS ##########################################
##############################################################################
def run_HADS(df, out_dir, public):
# anxiety / HADS-A
df['tense'] = df['HADS1BASEQ[HADS1]'].subtract(1).multiply(-1).add(3)
df['frightened'] = df['HADS3BASEQ[HADS3]'].subtract(1).multiply(-1).add(3)
df['worry'] = df['HADS5BASEQ[HADS5]'].subtract(1).multiply(-1).add(3)
df['relaxed'] = df['HADS7BASEQ[HADS7]'].subtract(1)
df['butterflies'] = df['HADS9BASEQ[HADS9]'].subtract(1)
df['restless'] = df['HADS11BASEQ[HADS11]'].subtract(1).multiply(-1).add(3)
df['panic'] = df['HADS13BASEQ[HADS13]'].subtract(1).multiply(-1).add(3)
# depression / HADS-D
df['enjoy'] = df['HADS2BASEQ[HADS2]'].subtract(1)
df['laugh'] = df['HADS4BASEQ[HADS4]'].subtract(1)
df['cheerful'] = df['HADS6BASEQ[HADS6]'].subtract(1).multiply(-1).add(3)
df['slowed'] = df['HADS8BASEQ[HADS8]'].subtract(1).multiply(-1).add(3)
df['appearance'] = df['HADS10BASEQ[HADS10]'].subtract(1).multiply(-1).add(3)
df['lookforward'] = df['HADS12BASEQ[HADS12]'].subtract(1)
df['entertain'] = df['HADS14BASEQ[HADS14]'].subtract(1)
cols = ['tense','enjoy','frightened','laugh','worry','cheerful','relaxed','slowed',
'butterflies','appearance','restless','lookforward','panic','entertain']
cols_export = ['HADS_%s' % (x+1) for x in range(len(cols))]
df.rename(columns=dict(zip(cols, cols_export)), inplace=True)
# subjects with at least one data entry
df.set_index([range(len(df.index))], inplace=True)
idx = df[cols_export].dropna(how='all').index
df['ids'] = df['ID'].map(lambda x: str(x)[0:5])
if public:
# subjects with MRI data
subjects_mri = pd.read_csv('/nobackup/adenauer2/LSD/Originals/Documentation/subjects_mri',
header=None, dtype=str)[0]
idx_mri = df.index[df.ids.isin(subjects_mri)]
# subjects with both data for questionnaire and MRI
idx = list(set(idx).intersection(idx_mri))
df = df.iloc[idx]
df.set_index([range(len(df.index))], inplace=True)
# anonymize IDs
converter = pd.read_excel('/nobackup/adenauer2/LSD/Originals/Documentation/lookup_table.xlsx',
converters={'ids_probanden_db' : str, 'ids_xnat_publicp' : str})
converter_dict = dict(zip(converter['ids_probanden_db'], converter['ids_xnat_publicp']))
df.replace({'ids': converter_dict}, inplace=True)
df[['ids'] + cols_export].to_csv('%s/HADS.csv' % out_dir, decimal='.', index=False)
else:
df[['ids'] + cols_export].ix[idx].to_csv('%s/HADS.csv' % out_dir, decimal='.', index=False)
##############################################################################
##################### Boredom Proness Scale ##################################
##############################################################################
def run_BPS(df, out_dir, public):
cols = ['BPSaBASEQ[BPS1]','BPSaBASEQ[BPS2]','BPSaBASEQ[BPS3]',
'BPSaBASEQ[BPS4]','BPSaBASEQ[BPS5]','BPSaBASEQ[BPS6]',
'BPSaBASEQ[BPS7]','BPSaBASEQ[BPS8]','BPSaBASEQ[BPS9]',
'BPSaBASEQ[BPS10]','BPSbBASEQ[BPS11]','BPSbBASEQ[BPS12]',
'BPSbBASEQ[BPS13]','BPSbBASEQ[BPS14]','BPSbBASEQ[BPS15]',
'BPSbBASEQ[BPS16]','BPSbBASEQ[BPS17]','BPSbBASEQ[BPS18]',
'BPSbBASEQ[BPS19]','BPSbBASEQ[BPS20]','BPSbBASEQ[BPS21]',
'BPScBASEQ[BPS22]','BPScBASEQ[BPS23]','BPScBASEQ[BPS24]',
'BPScBASEQ[BPS25]','BPScBASEQ[BPS26]','BPScBASEQ[BPS27]',
'BPScBASEQ[BPS28]']
cols_export = ['BPS_%s' % (x+1) for x in range(len(cols))]
df.rename(columns=dict(zip(cols, cols_export)), inplace=True)
# subjects with at least one data entry
df.set_index([range(len(df.index))], inplace=True)
idx = df[cols_export].dropna(how='all').index
df['ids'] = df['ID'].map(lambda x: str(x)[0:5])
if public:
# subjects with MRI data
subjects_mri = pd.read_csv('/nobackup/adenauer2/LSD/Originals/Documentation/subjects_mri',
header=None, dtype=str)[0]
idx_mri = df.index[df.ids.isin(subjects_mri)]
# subjects with both data for questionnaire and MRI
idx = list(set(idx).intersection(idx_mri))
df = df.iloc[idx]
df.set_index([range(len(df.index))], inplace=True)
# anonymize IDs
converter = pd.read_excel('/nobackup/adenauer2/LSD/Originals/Documentation/lookup_table.xlsx',
converters={'ids_probanden_db' : str, 'ids_xnat_publicp' : str})
converter_dict = dict(zip(converter['ids_probanden_db'], converter['ids_xnat_publicp']))
df.replace({'ids': converter_dict}, inplace=True)
df[['ids'] + cols_export].to_csv('%s/BP.csv' % out_dir, decimal='.', index=False)
else:
df[['ids'] + cols_export].ix[idx].to_csv('%s/BP.csv' % out_dir, decimal='.', index=False)
##############################################################################
################# Derryberry Attention Control Scale #########################
##############################################################################
def run_ACS(df, out_dir, public):
cols = ['DACaBASEQ[DAC1]','DACaBASEQ[DAC2]','DACaBASEQ[DAC3]',
'DACaBASEQ[DAC4]','DACaBASEQ[DAC5]','DACaBASEQ[DAC6]',
'DACaBASEQ[DAC7]','DACbBASEQ[DAC8]','DACbBASEQ[DAC9]',
'DACbBASEQ[DAC10]','DACbBASEQ[DAC11]','DACbBASEQ[DAC12]',
'DACbBASEQ[DAC13]','DACbBASEQ[DAC14]','DACbBASEQ[DAC15]',
'DACcBASEQ[DAC16]','DACcBASEQ[DAC17]','DACcBASEQ[DAC18]',
'DACcBASEQ[DAC19]','DACcBASEQ[DAC20]']
cols_export = ['ACS_%s' % (x+1) for x in range(len(cols))]
df.rename(columns=dict(zip(cols, cols_export)), inplace=True)
# subjects with at least one data entry
df.set_index([range(len(df.index))], inplace=True)
idx = df[cols_export].dropna(how='all').index
df['ids'] = df['ID'].map(lambda x: str(x)[0:5])
if public:
# subjects with MRI data
subjects_mri = pd.read_csv('/nobackup/adenauer2/LSD/Originals/Documentation/subjects_mri',
header=None, dtype=str)[0]
idx_mri = df.index[df.ids.isin(subjects_mri)]
# subjects with both data for questionnaire and MRI
idx = list(set(idx).intersection(idx_mri))
df = df.iloc[idx]
df.set_index([range(len(df.index))], inplace=True)
# anonymize IDs
converter = pd.read_excel('/nobackup/adenauer2/LSD/Originals/Documentation/lookup_table.xlsx',
converters={'ids_probanden_db' : str, 'ids_xnat_publicp' : str})
converter_dict = dict(zip(converter['ids_probanden_db'], converter['ids_xnat_publicp']))
df.replace({'ids': converter_dict}, inplace=True)
df[['ids'] + cols_export].to_csv('%s/ACS.csv' % out_dir, decimal='.', index=False)
else:
df[['ids'] + cols_export].ix[idx].to_csv('%s/ACS.csv' % out_dir, decimal='.', index=False)
##############################################################################
############################## NEO-PI-R ######################################
##############################################################################
def run_NEOPIR(pir_f, ffi_lsd_f, out_dir, public):
##### create combined dataframe from FFI (lemon, lsd) and PIR items ####
cols_neo_pir = ['ID', 'NEOaBASEQ[NEO2]','NEOaBASEQ[NEO3]','NEOaBASEQ[NEO5]','NEOaBASEQ[NEO7r]','NEOaBASEQ[NEO8r]','NEOaBASEQ[NEO9]','NEOaBASEQ[NEO10r]','NEOaBASEQ[NEO12]',
'NEOaBASEQ[NEO13]','NEOaBASEQ[NEO16]','NEObBASEQ[NEO17r]','NEObBASEQ[NEO18r]','NEObBASEQ[NEO20r]','NEObBASEQ[NEO21r]','NEObBASEQ[NEO22]','NEObBASEQ[NEO24r]',
'NEObBASEQ[NEO27r]','NEObBASEQ[NEO29]','NEObBASEQ[NEO30r]','NEObBASEQ[NEO31]','NEOcBASEQ[NEO32r]','NEOcBASEQ[NEO33r]','NEOcBASEQ[NEO34]','NEOcBASEQ[NEO35r]',
'NEOcBASEQ[NEO36r]','NEOcBASEQ[NEO38]','NEOcBASEQ[NEO42r]','NEOcBASEQ[NEO43r]','NEOcBASEQ[NEO47]','NEOcBASEQ[NEO48]','NEOdBASEQ[NEO49r]','NEOdBASEQ[NEO51]',
'NEOdBASEQ[NEO52r]','NEOdBASEQ[NEO54]','NEOdBASEQ[NEO56r]','NEOdBASEQ[NEO57]','NEOdBASEQ[NEO58]','NEOdBASEQ[NEO60]','NEOdBASEQ[NEO62]','NEOdBASEQ[NEO63]',
'NEOeBASEQ[NEO65]','NEOeBASEQ[NEO66]','NEOeBASEQ[NEO68r]','NEOeBASEQ[NEO69]','NEOeBASEQ[NEO70r]','NEOeBASEQ[NEO71r]','NEOeBASEQ[NEO72]','NEOeBASEQ[NEO73]',
'NEOeBASEQ[NEO75]','NEOeBASEQ[NEO77r]','NEOfBASEQ[NEO78r]','NEOfBASEQ[NEO79]','NEOfBASEQ[NEO80r]','NEOfBASEQ[NEO81r]','NEOfBASEQ[NEO82]','NEOfBASEQ[NEO84r]',
'NEOfBASEQ[NEO89]','NEOfBASEQ[NEO90r]','NEOfBASEQ[NEO92r]','NEOfBASEQ[NEO94]','NEOgBASEQ[NEO95r]','NEOgBASEQ[NEO96r]','NEOgBASEQ[NEO97]','NEOgBASEQ[NEO99r]',
'NEOgBASEQ[NEO100]','NEOgBASEQ[NEO101]','NEOgBASEQ[NEO102r]','NEOgBASEQ[NEO103r]','NEOgBASEQ[NEO105r]','NEOgBASEQ[NEO106r]','NEOhBASEQ[NEO111]','NEOhBASEQ[NEO112r]',
'NEOhBASEQ[NEO113r]','NEOhBASEQ[NEO114]','NEOhBASEQ[NEO115r]','NEOhBASEQ[NEO116r]','NEOhBASEQ[NEO117]','NEOhBASEQ[NEO118]','NEOhBASEQ[NEO119r]','NEOhBASEQ[NEO120]',
'NEOiBASEQ[NEO121r]','NEOiBASEQ[NEO123]','NEOiBASEQ[NEO124r]','NEOiBASEQ[NEO125]','NEOiBASEQ[NEO126]','NEOiBASEQ[NEO127r]','NEOiBASEQ[NEO129]','NEOiBASEQ[NEO131]',
'NEOiBASEQ[NEO132]','NEOiBASEQ[NEO133]','NEOjBASEQ[NEO134r]','NEOjBASEQ[NEO137r]','NEOjBASEQ[NEO138r]','NEOjBASEQ[NEO139]','NEOjBASEQ[NEO140r]','NEOjBASEQ[NEO141r]',
'NEOjBASEQ[NEO143]','NEOjBASEQ[NEO144r]','NEOjBASEQ[NEO145]','NEOjBASEQ[NEO146]','NEOkBASEQ[NEO148r]','NEOkBASEQ[NEO149]','NEOkBASEQ[NEO150r]','NEOkBASEQ[NEO151]',
'NEOkBASEQ[NEO152]','NEOkBASEQ[NEO153r]','NEOkBASEQ[NEO154]','NEOkBASEQ[NEO155r]','NEOkBASEQ[NEO156r]','NEOkBASEQ[NEO157]','NEOlBASEQ[NEO158]','NEOlBASEQ[NEO159r]',
'NEOlBASEQ[NEO160]','NEOlBASEQ[NEO161]','NEOlBASEQ[NEO165]','NEOlBASEQ[NEO166r]','NEOlBASEQ[NEO167]','NEOlBASEQ[NEO168]','NEOlBASEQ[NEO169r]','NEOlBASEQ[NEO170]',
'NEOmBASEQ[NEO171]','NEOmBASEQ[NEO172]','NEOmBASEQ[NEO174]','NEOmBASEQ[NEO175r]','NEOmBASEQ[NEO176r]','NEOmBASEQ[NEO178]','NEOmBASEQ[NEO179]','NEOmBASEQ[NEO180]',
'NEOmBASEQ[NEO181r]','NEOmBASEQ[NEO182]','NEOnBASEQ[NEO183r]','NEOnBASEQ[NEO184]','NEOnBASEQ[NEO185]','NEOnBASEQ[NEO186]','NEOnBASEQ[NEO187r]','NEOnBASEQ[NEO189r]',
'NEOnBASEQ[NEO190r]','NEOnBASEQ[NEO191]','NEOnBASEQ[NEO192]','NEOnBASEQ[NEO193]','NEOoBASEQ[NEO194]','NEOoBASEQ[NEO195]','NEOoBASEQ[NEO196]','NEOoBASEQ[NEO198r]',
'NEOoBASEQ[NEO199r]','NEOoBASEQ[NEO201]','NEOoBASEQ[NEO202]','NEOoBASEQ[NEO204]','NEOoBASEQ[NEO205r]','NEOoBASEQ[NEO206r]','NEOpBASEQ[NEO207r]','NEOpBASEQ[NEO208r]',
'NEOpBASEQ[NEO209]','NEOpBASEQ[NEO210]','NEOpBASEQ[NEO211]','NEOpBASEQ[NEO212]','NEOpBASEQ[NEO213r]','NEOpBASEQ[NEO214]','NEOpBASEQ[NEO215]','NEOpBASEQ[NEO216]',
'NEOqBASEQ[NEO217]','NEOqBASEQ[NEO218]','NEOqBASEQ[NEO219r]','NEOqBASEQ[NEO220r]','NEOqBASEQ[NEO222r]','NEOqBASEQ[NEO223]','NEOqBASEQ[NEO224]','NEOqBASEQ[NEO225]',
'NEOqBASEQ[NEO226]','NEOqBASEQ[NEO228r]','NEOrBASEQ[NEO230]','NEOrBASEQ[NEO231r]','NEOrBASEQ[NEO232]','NEOrBASEQ[NEO233]','NEOrBASEQ[NEO234r]','NEOrBASEQ[NEO235]',
'NEOrBASEQ[NEO236r]','NEOrBASEQ[NEO238r]','NEOrBASEQ[NEO239]','NEOrBASEQ[NEO240]','NEOrBASEQ[NEO241]']
cols_NEOFFI = ['ID', 'NEOFFI01[NEOFFI01]','NEOFFI01[NEOFFI02]','NEOFFI01[NEOFFI03]','NEOFFI01[NEOFFI04]','NEOFFI01[NEOFFI05]','NEOFFI01[NEOFFI06]','NEOFFI01[NEOFFI07]',
'NEOFFI01[NEOFFI08]','NEOFFI01[NEOFFI09]','NEOFFI01[NEOFFI10]','NEOFFI01[NEOFFI11]','NEOFFI01[NEOFFI12]','NEOFFI13[NEOFFI13]','NEOFFI13[NEOFFI14]',
'NEOFFI13[NEOFFI15]','NEOFFI13[NEOFFI16]','NEOFFI13[NEOFFI17]','NEOFFI13[NEOFFI18]','NEOFFI13[NEOFFI19]','NEOFFI13[NEOFFI20]','NEOFFI13[NEOFFI21]',
'NEOFFI13[NEOFFI22]','NEOFFI13[NEOFFI23]','NEOFFI13[NEOFFI24]','NEOFFI25[NEOFFI25]','NEOFFI25[NEOFFI26]','NEOFFI25[NEOFFI27]','NEOFFI25[NEOFFI28]',
'NEOFFI25[NEOFFI29]','NEOFFI25[NEOFFI30]','NEOFFI25[NEOFFI31]','NEOFFI25[NEOFFI32]','NEOFFI25[NEOFFI33]','NEOFFI25[NEOFFI34]','NEOFFI25[NEOFFI35]',
'NEOFFI25[NEOFFI36]','NEOFFI37[NEOFFI37]','NEOFFI37[NEOFFI38]','NEOFFI37[NEOFFI39]','NEOFFI37[NEOFFI40]','NEOFFI37[NEOFFI41]','NEOFFI37[NEOFFI42]',
'NEOFFI37[NEOFFI43]','NEOFFI37[NEOFFI44]','NEOFFI37[NEOFFI45]','NEOFFI37[NEOFFI46]','NEOFFI37[NEOFFI47]','NEOFFI37[NEOFFI48]','NEOFFI49[NEOFFI49]',
'NEOFFI49[NEOFFI50]','NEOFFI49[NEOFFI51]','NEOFFI49[NEOFFI52]','NEOFFI49[NEOFFI53]','NEOFFI49[NEOFFI54]','NEOFFI49[NEOFFI55]','NEOFFI49[NEOFFI56]',
'NEOFFI49[NEOFFI57]','NEOFFI49[NEOFFI58]','NEOFFI49[NEOFFI59]','NEOFFI49[NEOFFI60]']
##### Neo PI R lemon & lsd ####
df_pir = pd.read_csv(pir_f, sep = ",")[cols_neo_pir]
# prep df
df_pir['ID'].replace('LSD2', '25729', inplace = True)
df_pir['ID'] = df_pir['ID'].map(lambda x: str(x)[0:5])
# drop subject that was tested twice
#idx_drop = df_pir[df_pir.ID == '26642'].index[0]
#df_pir.drop(idx_drop, axis=0, inplace=True)
#df_pir.set_index([range(len(df_pir.index))], inplace=True)
# recode item names from complicated to item numbers
new_items = []
for item in df_pir.columns.values[1:]:
item = item[13:]
item = item[:-1]
if item[-1] == 'r':
item = item[:-1]
new_items.append(item)
dictionary1 = dict(zip(df_pir.columns.values[1:], new_items))
df_pir.rename(columns=dictionary1, inplace=True)
df_pir.dropna(inplace=True)
##### NEO FFI lsd #####
df_ffi_lsd = pd.read_csv(ffi_lsd_f, sep = ",", converters={'ID':str})[cols_NEOFFI]
# drop subject that was tested twice
idx_drop = df_ffi_lsd[df_ffi_lsd.ID == '26642'].index[0]
df_ffi_lsd.drop(idx_drop, axis=0, inplace=True)
df_ffi_lsd.set_index([range(len(df_ffi_lsd.index))], inplace=True)
# recode item names from complicated to 1-60
new_items = []
for item in df_ffi_lsd.columns.values[1:]:
item = item[15:]
item = item[:-1]
item = int(item)
item = str(item)
new_items.append(item)
dictionary2 = dict(zip(df_ffi_lsd.columns.values[1:], new_items))
df_ffi_lsd.rename(columns=dictionary2, inplace=True)
# recode ffi item numbers into pir item numbers
ffi2pir = | pd.read_excel('/nobackup/adenauer2/LSD/Originals/Raw/Questionnaires/NEO/NEO KEY.xlsx', converters={0:str, 1:str}) | pandas.read_excel |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import re
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import TruncatedSVD
from sklearn import preprocessing, model_selection, metrics
import lightgbm as lgb
import gc
train_df = pd.read_csv('../input/train.csv', parse_dates=["activation_date"])
test_df = pd.read_csv('../input/test.csv', parse_dates=["activation_date"])
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import seaborn as sns
import random
import nltk
nltk.data.path.append("/media/sayantan/Personal/nltk_data")
from nltk.stem.snowball import RussianStemmer
from fuzzywuzzy import fuzz
from nltk.corpus import stopwords
from tqdm import tqdm
from scipy.stats import skew, kurtosis
from scipy.spatial.distance import cosine, cityblock, jaccard, canberra, euclidean, minkowski, braycurtis
from nltk import word_tokenize
stopwords = stopwords.words('russian')
def genFeatures(x):
x["activation_weekday"] = x["activation_date"].dt.weekday
x["monthday"] = x["activation_date"].dt.day
x["weekinmonday"] = x["monthday"] // 7
##################Added in set 1 - 0.01 Improvement
x['price_new'] = np.log1p(x.price) # log transform improves co-relation with deal_price
x['count_null_in_row'] = x.isnull().sum(axis=1)# works
x['has_description'] = x.description.isnull().astype(int)
x['has_image'] = x.image.isnull().astype(int)
x['has_image_top'] = x.image_top_1.isnull().astype(int)
x['has_param1'] = x.param_1.isnull().astype(int)
x['has_param2'] = x.param_2.isnull().astype(int)
x['has_param3'] = x.param_3.isnull().astype(int)
x['has_price'] = x.price.isnull().astype(int)
#################Added in set 2 - 0.00x Improvement
x["description"].fillna("NA", inplace=True)
x["desc_nwords"] = x["description"].apply(lambda x: len(x.split()))
x['len_description'] = x['description'].apply(lambda x: len(x))
x["title_nwords"] = x["title"].apply(lambda x: len(x.split()))
x['len_title'] = x['title'].apply(lambda x: len(x))
x['params'] = x['param_1'].fillna('') + ' ' + x['param_2'].fillna('') + ' ' + x['param_3'].fillna('')
x['params'] = x['params'].str.strip()
x['len_params'] = x['params'].apply(lambda x: len(x))
x['words_params'] = x['params'].apply(lambda x: len(x.split()))
x['symbol1_count'] = x['description'].str.count('↓')
x['symbol2_count'] = x['description'].str.count('\*')
x['symbol3_count'] = x['description'].str.count('✔')
x['symbol4_count'] = x['description'].str.count('❀')
x['symbol5_count'] = x['description'].str.count('➚')
x['symbol6_count'] = x['description'].str.count('ஜ')
x['symbol7_count'] = x['description'].str.count('.')
x['symbol8_count'] = x['description'].str.count('!')
x['symbol9_count'] = x['description'].str.count('\?')
x['symbol10_count'] = x['description'].str.count(' ')
x['symbol11_count'] = x['description'].str.count('-')
x['symbol12_count'] = x['description'].str.count(',')
####################
return x
train_df = genFeatures(train_df)
test_df = genFeatures(test_df)
test_df['deal_probability']=10.0
############################
english_stemmer = nltk.stem.SnowballStemmer('russian')
def clean_text(text):
#text = re.sub(r'(\d+),(\d+)', r'\1.\2', text)
text = text.replace(u'²', '2')
text = text.lower()
text = re.sub(u'[^a-zа-я0-9]', ' ', text)
text = re.sub('\s+', ' ', text)
return text.strip()
def stem_tokens(tokens, stemmer):
stemmed = []
for token in tokens:
#stemmed.append(stemmer.lemmatize(token))
stemmed.append(stemmer.stem(token))
return stemmed
def preprocess_data(line,
exclude_stopword=True,
encode_digit=False):
## tokenize
line = clean_text(line)
tokens = [x.lower() for x in nltk.word_tokenize(line)]
## stem
tokens_stemmed = stem_tokens(tokens, english_stemmer)#english_stemmer
if exclude_stopword:
tokens_stemmed = [x for x in tokens_stemmed if x not in stopwords]
return ' '.join(tokens_stemmed)
train_test = pd.concat((train_df, test_df), axis = 'rows')
## After cleaning => then find intersection
train_test["title_clean"]= list(train_test[["title"]].apply(lambda x: preprocess_data(x["title"]), axis=1))
train_test["desc_clean"]= list(train_test[["description"]].apply(lambda x: preprocess_data(x["description"]), axis=1))
train_test["params_clean"]= list(train_test[["params"]].apply(lambda x: preprocess_data(x["params"]), axis=1))
train_test['count_common_words_title_desc'] = train_test.apply(lambda x: len(set(str(x['title_clean']).lower().split()).intersection(set(str(x['desc_clean']).lower().split()))), axis=1)
train_test['count_common_words_title_params'] = train_test.apply(lambda x: len(set(str(x['title_clean']).lower().split()).intersection(set(str(x['params_clean']).lower().split()))), axis=1)
train_test['count_common_words_params_desc'] = train_test.apply(lambda x: len(set(str(x['params_clean']).lower().split()).intersection(set(str(x['desc_clean']).lower().split()))), axis=1)
print("Cleaned texts..")
###################
# Count Nouns
import pymorphy2
morph = pymorphy2.MorphAnalyzer(result_type=None)
from fastcache import clru_cache as lru_cache
@lru_cache(maxsize=1000000)
def lemmatize_pos(word):
_, tag, norm_form, _, _ = morph.parse(word)[0]
return norm_form, tag.POS
def getPOS(x, pos1 = 'NOUN'):
lemmatized = []
x = clean_text(x)
#x = re.sub(u'[.]', ' ', x)
for s in x.split():
s, pos = lemmatize_pos(s)
if pos != None:
if pos1 in pos:
lemmatized.append(s)
return ' '.join(lemmatized)
train_test['get_nouns_title'] = list(train_test.apply(lambda x: getPOS(x['title'], 'NOUN'), axis=1))
train_test['get_nouns_desc'] = list(train_test.apply(lambda x: getPOS(x['description'], 'NOUN'), axis=1))
train_test['get_adj_title'] = list(train_test.apply(lambda x: getPOS(x['title'], 'ADJ'), axis=1))
train_test['get_adj_desc'] = list(train_test.apply(lambda x: getPOS(x['description'], 'ADJ'), axis=1))
train_test['get_verb_title'] = list(train_test.apply(lambda x: getPOS(x['title'], 'VERB'), axis=1))
train_test['get_verb_desc'] = list(train_test.apply(lambda x: getPOS(x['description'], 'VERB'), axis=1))
# Count digits
def count_digit(x):
x = clean_text(x)
return len(re.findall(r'\b\d+\b', x))
train_test['count_of_digit_in_title'] = list(train_test.apply(lambda x: count_digit(x['title']), axis=1))
train_test['count_of_digit_in_desc'] = list(train_test.apply(lambda x: count_digit(x['description']), axis=1))
train_test['count_of_digit_in_params'] = list(train_test.apply(lambda x: count_digit(x['params']), axis=1))
## get unicode features
count_unicode = lambda x: len([c for c in x if ord(c) > 1105])
count_distunicode = lambda x: len({c for c in x if ord(c) > 1105})
train_test['count_of_unicode_in_title'] = list(train_test.apply(lambda x: count_unicode(x['title']), axis=1))
train_test['count_of_unicode_in_desc'] = list(train_test.apply(lambda x: count_distunicode(x['description']), axis=1))
train_test['count_of_distuni_in_title'] = list(train_test.apply(lambda x: count_unicode(x['title']), axis=1))
train_test['count_of_distuni_in_desc'] = list(train_test.apply(lambda x: count_distunicode(x['description']), axis=1))
###
count_caps = lambda x: len([c for c in x if c.isupper()])
train_test['count_caps_in_title'] = list(train_test.apply(lambda x: count_caps(x['title']), axis=1))
train_test['count_caps_in_desc'] = list(train_test.apply(lambda x: count_caps(x['description']), axis=1))
import string
count_punct = lambda x: len([c for c in x if c in string.punctuation])
train_test['count_punct_in_title'] = list(train_test.apply(lambda x: count_punct(x['title']), axis=1))
train_test['count_punct_in_desc'] = list(train_test.apply(lambda x: count_punct(x['description']), axis=1))
print("Computed POS Features and others..")
train_test['count_common_nouns'] = train_test.apply(lambda x: len(set(str(x['get_nouns_title']).lower().split()).intersection(set(str(x['get_nouns_desc']).lower().split()))), axis=1)
train_test['count_common_adj'] = train_test.apply(lambda x: len(set(str(x['get_adj_title']).lower().split()).intersection(set(str(x['get_adj_desc']).lower().split()))), axis=1)
train_test['ratio_of_unicode_in_title'] = train_test['count_of_unicode_in_title'] / train_test['len_title']
train_test['ratio_of_unicode_in_desc'] = train_test['count_of_unicode_in_desc'] / train_test['len_description']
train_test['ratio_of_punct_in_title'] = train_test['count_punct_in_title'] / train_test['len_title']
train_test['ratio_of_punct_in_desc'] = train_test['count_punct_in_desc'] / train_test['len_description']
train_test['ratio_of_cap_in_title'] = train_test['count_caps_in_title'] / train_test['len_title']
train_test['ratio_of_cap_in_desc'] = train_test['count_caps_in_desc'] / train_test['len_description']
train_test['count_nouns_in_title'] = train_test["get_nouns_title"].apply(lambda x: len(x.split()))
train_test['count_nouns_in_desc'] = train_test['get_nouns_desc'].apply(lambda x: len(x.split()))
train_test['count_adj_in_title'] = train_test["get_adj_title"].apply(lambda x: len(x.split()))
train_test['count_adj_in_desc'] = train_test['get_adj_desc'].apply(lambda x: len(x.split()))
train_test['count_verb_title'] = train_test['get_verb_title'].apply(lambda x: len(x.split()))
train_test['count_verb_desc'] = train_test['get_verb_desc'].apply(lambda x: len(x.split()))
train_test['ratio_nouns_in_title'] = train_test["count_nouns_in_title"] / train_test["title_nwords"]
train_test['ratio_nouns_in_desc'] = train_test["count_nouns_in_desc"] / train_test["desc_nwords"]
train_test['ratio_adj_in_title'] = train_test["count_adj_in_title"] / train_test["title_nwords"]
train_test['ratio_adj_in_desc'] = train_test["count_adj_in_desc"] / train_test["desc_nwords"]
train_test['ratio_vrb_in_title'] = train_test["count_verb_title"] / train_test["title_nwords"]
train_test['ratio_vrb_in_desc'] = train_test["count_verb_desc"] / train_test["desc_nwords"]
train_test["title"]= list(train_test[["title"]].apply(lambda x: clean_text(x["title"]), axis=1))
train_test["description"]= list(train_test[["description"]].apply(lambda x: clean_text(x["description"]), axis=1))
train_test["params"]= list(train_test[["params"]].apply(lambda x: clean_text(x["params"]), axis=1))
#######################
### Save
#######################
train_df = train_test.loc[train_test.deal_probability != 10].reset_index(drop = True)
test_df = train_test.loc[train_test.deal_probability == 10].reset_index(drop = True)
for c in train_df.columns:
if train_df[c].dtype == 'float64':
train_df[c] = train_df[c].astype('float32')
test_df[c] = test_df[c].astype('float32')
train_df.to_feather('../train_basic_features.pkl')
test_df.to_feather('../test__basic_features.pkl')
#######################
### Label Enc
#######################
from sklearn.preprocessing import StandardScaler, OneHotEncoder, LabelEncoder, MinMaxScaler
cat_vars = ["user_id", "region", "city", "parent_category_name", "category_name", "user_type", "param_1", "param_2", "param_3"]
for col in cat_vars:
lbl = preprocessing.LabelEncoder()
lbl.fit(list(train_df[col].values.astype('str')) + list(test_df[col].values.astype('str')))
train_df[col] = lbl.transform(list(train_df[col].values.astype('str')))
test_df[col] = lbl.transform(list(test_df[col].values.astype('str')))
train_df.to_feather('../train_basic_features_lblencCats.pkl')
test_df.to_feather('../test__basic_features_lblencCats.pkl')
#######################
### One hots
#######################
train_df=pd.read_feather('../train_basic_features_lblencCats.pkl')
test_df=pd.read_feather('../test__basic_features_lblencCats.pkl')
from sklearn.externals import joblib
le = OneHotEncoder()
X = le.fit_transform(np.array(train_df.user_id.values.tolist() + test_df.user_id.values.tolist()).reshape(-1,1))
joblib.dump(X, "../user_id_onehot.pkl")
X = le.fit_transform(np.array(train_df.region.values.tolist() + test_df.region.values.tolist()).reshape(-1,1))
joblib.dump(X, "../region_onehot.pkl")
X = le.fit_transform(np.array(train_df.city.values.tolist() + test_df.city.values.tolist()).reshape(-1,1))
joblib.dump(X, "../city_onehot.pkl")
X = le.fit_transform(np.array(train_df.parent_category_name.values.tolist() + test_df.parent_category_name.values.tolist()).reshape(-1,1))
joblib.dump(X, "../parent_category_name_onehot.pkl")
X = le.fit_transform(np.array(train_df.category_name.values.tolist() + test_df.category_name.values.tolist()).reshape(-1,1))
joblib.dump(X, "../category_name_onehot.pkl")
X = le.fit_transform(np.array(train_df.user_type.values.tolist() + test_df.user_type.values.tolist()).reshape(-1,1))
joblib.dump(X, "../user_type_onehot.pkl")
X = le.fit_transform(np.array(train_df.param_1.values.tolist() + test_df.param_1.values.tolist()).reshape(-1,1))
joblib.dump(X, "../param_1_onehot.pkl")
X = le.fit_transform(np.array(train_df.param_2.values.tolist() + test_df.param_2.values.tolist()).reshape(-1,1))
joblib.dump(X, "../param_2_onehot.pkl")
X = le.fit_transform(np.array(train_df.param_3.values.tolist() + test_df.param_3.values.tolist()).reshape(-1,1))
joblib.dump(X, "../param_3_onehot.pkl")
train_df.drop(cat_vars, inplace = True, axis = 'columns')
test_df.drop(cat_vars, inplace = True, axis = 'columns')
train_df.to_feather('../train_basic_features_woCats.pkl')
test_df.to_feather('../test__basic_features_woCats.pkl')
#######################
### Tfidf
#######################
train_df=pd.read_feather('../train_basic_features_woCats.pkl')
test_df=pd.read_feather('../test__basic_features_woCats.pkl')
from sklearn.externals import joblib
### TFIDF Vectorizer ###
train_df['params'] = train_df['params'].fillna('NA')
test_df['params'] = test_df['params'].fillna('NA')
tfidf_vec = TfidfVectorizer(ngram_range=(1,3),max_features = 10000,#min_df=3, max_df=.85,
analyzer='word', token_pattern= r'\w{1,}',
use_idf=1, smooth_idf=0, sublinear_tf=1,)
#TfidfVectorizer(ngram_range=(1,2))
full_tfidf = tfidf_vec.fit_transform(train_df['params'].values.tolist() + test_df['params'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['params'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['params'].values.tolist())
del full_tfidf
print("TDIDF Params UNCLEAN..")
joblib.dump([train_tfidf, test_tfidf], "../params_tfidf.pkl")
### TFIDF Vectorizer ###
train_df['title_clean'] = train_df['title_clean'].fillna('NA')
test_df['title_clean'] = test_df['title_clean'].fillna('NA')
tfidf_vec = TfidfVectorizer(ngram_range=(1,2),max_features = 20000,#,min_df=3, max_df=.85,
analyzer='word', token_pattern= r'\w{1,}',
use_idf=1, smooth_idf=0, sublinear_tf=1,)
full_tfidf = tfidf_vec.fit_transform(train_df['title_clean'].values.tolist() + test_df['title_clean'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['title_clean'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['title_clean'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../title_tfidf.pkl")
del full_tfidf
print("TDIDF TITLE CLEAN..")
### TFIDF Vectorizer ###
train_df['desc_clean'] = train_df['desc_clean'].fillna(' ')
test_df['desc_clean'] = test_df['desc_clean'].fillna(' ')
tfidf_vec = TfidfVectorizer(ngram_range=(1,2), max_features = 20000, #,min_df=3, max_df=.85,
analyzer='word', token_pattern= r'\w{1,}',
use_idf=1, smooth_idf=0, sublinear_tf=1,)
full_tfidf = tfidf_vec.fit_transform(train_df['desc_clean'].values.tolist() + test_df['desc_clean'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['desc_clean'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['desc_clean'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../desc_tfidf.pkl")
del full_tfidf
print("TDIDF DESC CLEAN..")
### TFIDF Vectorizer ###
train_df['get_nouns_title'] = train_df['get_nouns_title'].fillna(' ')
test_df['get_nouns_title'] = test_df['get_nouns_title'].fillna(' ')
tfidf_vec = TfidfVectorizer(ngram_range=(1,1), max_features = 10000)
full_tfidf = tfidf_vec.fit_transform(train_df['get_nouns_title'].values.tolist() + test_df['get_nouns_title'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['get_nouns_title'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['get_nouns_title'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../nouns_title_tfidf.pkl")
del full_tfidf
print("TDIDF Title Noun..")
### TFIDF Vectorizer ###
train_df['get_nouns_desc'] = train_df['get_nouns_desc'].fillna(' ')
test_df['get_nouns_desc'] = test_df['get_nouns_desc'].fillna(' ')
tfidf_vec = TfidfVectorizer(ngram_range=(1,1), max_features = 10000)
full_tfidf = tfidf_vec.fit_transform(train_df['get_nouns_desc'].values.tolist() + test_df['get_nouns_desc'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['get_nouns_desc'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['get_nouns_desc'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../nouns_desc_tfidf.pkl")
del full_tfidf
print("TDIDF Desc Noun..")
### TFIDF Vectorizer ###
train_df['get_adj_title'] = train_df['get_adj_title'].fillna(' ')
test_df['get_adj_title'] = test_df['get_adj_title'].fillna(' ')
tfidf_vec = TfidfVectorizer(ngram_range=(1,1), max_features = 10000)
full_tfidf = tfidf_vec.fit_transform(train_df['get_adj_title'].values.tolist() + test_df['get_adj_title'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['get_adj_title'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['get_adj_title'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../adj_title_tfidf.pkl")
del full_tfidf
print("TDIDF TITLE Adj..")
### TFIDF Vectorizer ###
train_df['get_adj_desc'] = train_df['get_adj_desc'].fillna(' ')
test_df['get_adj_desc'] = test_df['get_adj_desc'].fillna(' ')
tfidf_vec = TfidfVectorizer(ngram_range=(1,1), max_features = 10000)
full_tfidf = tfidf_vec.fit_transform(train_df['get_adj_desc'].values.tolist() + test_df['get_adj_desc'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['get_adj_desc'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['get_adj_desc'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../adj_desc_tfidf.pkl")
del full_tfidf
print("TDIDF Desc Adj..")
### TFIDF Vectorizer ###
train_df['get_verb_title'] = train_df['get_verb_title'].fillna(' ')
test_df['get_verb_title'] = test_df['get_verb_title'].fillna(' ')
tfidf_vec = TfidfVectorizer(ngram_range=(1,1), max_features = 10000)
full_tfidf = tfidf_vec.fit_transform(train_df['get_verb_title'].values.tolist() + test_df['get_verb_title'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['get_verb_title'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['get_verb_title'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../verb_title_tfidf.pkl")
del full_tfidf
print("TDIDF TITLE Verb..")
### TFIDF Vectorizer ###
train_df['get_verb_desc'] = train_df['get_verb_desc'].fillna(' ')
test_df['get_verb_desc'] = test_df['get_verb_desc'].fillna(' ')
tfidf_vec = TfidfVectorizer(ngram_range=(1,1), max_features = 10000)
full_tfidf = tfidf_vec.fit_transform(train_df['get_verb_desc'].values.tolist() + test_df['get_verb_desc'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['get_verb_desc'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['get_verb_desc'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../verb_desc_tfidf.pkl")
del full_tfidf
print("TDIDF Desc Verb..")
###############################
# Sentence to seq
###############################
print('Generate Word Sequences')
train_df= | pd.read_feather('../train_basic_features_woCats.pkl') | pandas.read_feather |
# Project: fuelmeter-tools
# Created by: # Created on: 5/7/2020
from pandas.tseries.offsets import MonthEnd
from puma.Report import Report
import pandas as pd
import numpy as np
import puma.plot as pplot
import puma.tex as ptex
import datetime
import os
class MultiMonthReport(Report):
def __init__(self,start,end,title,nc,houses,monthly_fuel_price):
super(MultiMonthReport, self).__init__(start,end,title,nc,houses,monthly_fuel_price)
def getAveCostPerDay(self):
'''calculates the average cost of fuel per day. If the attribute gph_hdd
is available this will be used to calculate costs otherwise the attribute
fuel_by_day is used.'''
if 'gpd_hdd' not in self.__dict__:
self.cost_per_day = self.getCostPerDay(self.fuel_by_day)
else:
self.cost_per_day = self.getCostPerDay(self.gpd_hdd)
return self.cost_per_day.mean()
def getCostPerDay(self,fuel_by_day):
'''calculate cost for each day based on a fuel price for each day and fuel consumption for each day'''
self.fuel_price.name = 'fuel_price'
df = pd.concat([fuel_by_day, self.fuel_price.groupby(pd.Grouper(freq='D')).mean()], axis=1)
df.fuel_price = df.fuel_price.ffill() # filled for days that did not match
return df.fuel_consumption * df.fuel_price
# def getEstimatedTotalGallons(self):
# '''calculates the total gallons used each month and sets the attribute gallons_by_month
# :return float total gallons for the entire report period'''
# self.estimated_gallons_by_month = self.calculateTotalGallonsByMonth()
# return self.gallons_by_month.sum()
def getCostPerMonth(self):
'''calculates the total cost of consumed fuel per month by summing cost per day for every day within a month'''
if self.cost_per_day == None:
if 'gpd_hdd' in self.__dict__:
self.cost_per_day = self.getCostPerDay(self.gpd_hdd)
else:
self.cost_per_day = self.getCostPerDay(self.fuel_by_day)
self.cost_per_month = self.cost_per_day.groupby(pd.Grouper(freq="M")).sum()
return
def getTotalCost(self):
'''uses hdd corrected estimate of fuel consumption to estimate cost per day and aggregate to the entire report period.'''
costPerDay = self.getCostPerDay(self.gpd_hdd)
return costPerDay.sum()
def calculateMeanDailyGallonsPerMonth(self):
'''Calculates the total gallons consumed by month based on an average daily consumption rate for each month'''
#actual measured total by day We use a count of 5 records as our cutoff for producing a legit average
groupedDaily = self.filtered_df['fuel_consumption'].groupby(pd.Grouper(freq="D")).sum(min_count=5) #total gallons each day
#total days needing estimates
self.meanDailyByMonth = groupedDaily.groupby(pd.Grouper(freq='M')).agg(['mean','count']) #total daily gallons averaged over month
self.meanDailyByMonth = self.meanDailyByMonth.loc[self.meanDailyByMonth['count'] >=15,'mean'] #drop months with fewer than 20 days of data
#estimatedTotalByMonth = self.meanDailyByMonth * self.meanDailyByMonth.index.days_in_month #use the average to calculate a total amount for the month
return
def calculateMeanGallonsPerMonth(self):
'''get the average gallons consumed for all months in the reporting period'''
tgpm = self.calculateTotalGallonsByMonth()
return tgpm.mean()
def getGallonsPerFt(self):
'''get the total gallons used in the report period per house area (square feet).
sets the aveGPFByYear attribute which is the totalGPF for each year averaged over all years.
:return float total gallons per house square footage for the report period'''
totalGPF = super().getGallonsPerFt()
AveDailyByYear = self.filtered_df['fuel_consumption'].groupby(pd.Grouper(freq='A')).mean()
self.aveGPFByYear = AveDailyByYear/self.area
return totalGPF
def makePlots(self):
'''produces pngs of plots specific to this report'''
os.chdir(self.name)
outDoor = self.ave_MonthlyoutdoorT['ave']
pplot.plotActualvsEstimated(self.actualGallonsByMonth, self.estimatedGallonsByMonth['total_fuel'] )
pplot.plot_multiyear_bar_progress_with_temperature(self.actualAverageGallonsPerHDDByMonth['average_gphdd'], outDoor[self.start:self.end],
'monthly_track_your_progress.png')
you = self.getMeanGallonsPerMonthPerAreaByYear()
you.name = 'you'
df = pd.concat([you, self.yearly_neighbor_ave_monthly_usage_per_area], join='inner', axis=1)
pplot.plot_annual_fuel_usage(df, 'fuel_usage.png')
gph = pd.DataFrame(self.gph,index = self.gph.index)
gph['season'] = 0
gph.loc[(gph.index.month >= 1) & (gph.index.month <= 3), 'season'] = 1
gph.loc[(gph.index.month >= 4) & (gph.index.month <= 6), 'season'] = 2
gph.loc[(gph.index.month >= 7) & (gph.index.month <= 9), 'season'] = 3
gph.loc[(gph.index.month >= 10) & (gph.index.month <= 12), 'season'] = 4
ave_gal_by_hour_by_season = gph.groupby([gph.season, gph.index.hour]).mean()
pplot.seasonal_polar_flow_plot(ave_gal_by_hour_by_season,
'seasonal_polar_plot.png')
os.chdir("..")
return
def getAveCostPerYear(self):
'''calculate the average cost per year based on the average daily cost for the report period'''
return self.ave_cost_per_day * 365
def getMeanGallonsPerMonthPerAreaByYear(self):
gpmpa = self.estimatedGallonsByMonth/self.area
gpmpa = pd.concat([gpmpa,self.actualGallonsByMonth], axis=1)
gpmpa = gpmpa[pd.notnull(gpmpa.iloc[:,1])] #estimate is only produced for months with at least 15 days of actual data
AverageGPMPerArea = gpmpa['total_fuel'].groupby(pd.Grouper(freq='A')).mean()
return AverageGPMPerArea
def getYearlyNeigborhoodUsagePerArea(self):
return self.neighborhood.getMeanMonthlyGPFByYear(self.houses)
def getNeighborhoodUsagePerArea(self):
return self.neighborhood.getUsageTable([])
def compare2Neighbors(self):
'''generate neighborhood metrics'''
super().compare2Neighbors()
self.yearly_neighbor_ave_monthly_usage_per_area, self.yearly_neighbor_usage_std_per_area =self.getYearlyNeigborhoodUsagePerArea()
self.neighborhoodUsage = self.getNeighborhoodUsagePerArea()
return
def generateSummaryTable(self,cost):
'''create a summary table of fuel usage, costs and temperatures by month'''
combinedData = pd.concat([np.round(self.estimatedGallonsByMonth['total_fuel'],2), self.estimatedGallonsByMonth['sample_size'],np.round(self.meanDailyByMonth,4), np.round(self.ave_MonthlyindoorT['ave'], 0),
np.round(self.ave_MonthlyoutdoorT['ave'], 0)], axis=1)
combinedData = combinedData[:self.estimatedGallonsByMonth.index[-1]]
combinedData.columns = ['total_gal_by_month','sample_size','ave_daily_by_month','ave_indoor_t_by_month','ave_outdoor_t_by_month']
combinedData.loc[pd.isnull(combinedData['ave_indoor_t_by_month']), 'ave_daily_by_month'] = np.nan
combinedData['ave_daily_cost_by_month'] = np.round(combinedData['ave_daily_by_month'] * cost,2)
combinedData['total_cost_by_month'] = np.round(combinedData['total_gal_by_month'] * cost,2)
#self.estimatedCostByMonth = combinedData['total_cost_by_month']
combinedData['month_year'] = [datetime.datetime.strftime(pd.to_datetime(i),format="%b %y") for i in combinedData.index]
combinedData['total_cost_by_month'] = combinedData['total_cost_by_month'].map('\${:,.2f}'.format)
combinedData['ave_daily_cost_by_month'] = combinedData['ave_daily_cost_by_month'].map('\${:,.2f}'.format)
combinedData = combinedData[self.reportRange[0]:self.reportRange[-1]]
combinedData = combinedData.astype(str)
#combinedData = combinedData.astype(dtype=pd.StringDtype())
combinedData.loc[combinedData['ave_daily_by_month'] != 'nan', 'ave_daily_by_month'] = combinedData.loc[combinedData['ave_daily_by_month'] != 'nan', 'ave_daily_by_month'] + " (" + combinedData.loc[combinedData['ave_daily_by_month'] != 'nan', 'sample_size'] + ")"
subset = combinedData[['month_year','ave_daily_by_month','ave_daily_cost_by_month','total_gal_by_month', 'total_cost_by_month','ave_indoor_t_by_month','ave_outdoor_t_by_month']]
myTable = [tuple(x) for x in subset.to_numpy()]
return myTable
def generateHighMonths(self):
'''calculate which months are in the 90th percentile for fuel consumption for the entier report period based on gallons_by_month attribute
:return list of string month names'''
highValue = np.percentile(self.estimatedGallonsByMonth , 90)
highMonths = self.estimatedGallonsByMonth [self.estimatedGallonsByMonth > highValue].index.month
if len(highMonths) > 0:
return [datetime.datetime.strftime(datetime.datetime(2021, h, 1), format="%B") for h in highMonths]
else:
return None
def generateMetrics(self):
super().generateMetrics() #generate all the metrics used in monthly reports
self.calculateMeanDailyGallonsPerMonth() #gpm is an estimated average per month
self.aveYearlyCost = self.getAveCostPerYear()
firstIndex = self.indoorTData[ | pd.notnull(self.indoorTData['inT']) | pandas.notnull |
#!/usr/bin/env python3
"""
Dependencies required to run program.
- python3.6+
- argparse
- pandas >= 0.22.4
- pysam==0.15.2
"""
import sys
import pandas as pd
from primer_tk import constants
def add_tabix_subparser(subparser):
"""
Get commandline arguments
Args:
subparser (?): Subparser object.
Returns:
args (Namespace): the parsed arguments.
"""
parser = subparser.add_parser("tabix", help="Tabix subparser")
parser.add_argument("-vcf", "--variant-call-file", dest="vcf",
help="Tabix indexed VCF.")
parser.add_argument("-in", "--primer-input-file", dest="p_info",
help="The output of the primer pipeline.")
parser.add_argument("-o", "--output", dest="output",
help="The name of the output file")
return parser
def create_tabix_df(primer_pipeline_output):
"""
Takes output of primer pipeline and generates dataframe.
Args:
total_primers (file): the output of the primer pipeline
Returns:
dataframe (pd.DataFrame): a pandas dataframe
"""
primer_df = | pd.read_csv(primer_pipeline_output, header=0) | pandas.read_csv |
# -*- coding: UTF-8 -*-
# @Time : 2021/1/20
# @Author : <EMAIL>
# Apache License
# Copyright©2020-2021 <EMAIL> All Rights Reserved
import json
import pandas as pd
from styleframe import StyleFrame
from tqdm import tqdm
from data_process.change_data_format_unit import read_semeval_list, split_data, choose_key, cut_text, get_slice_line, ambiguity
def get_excel(whole_ann):
out_datas = [['paragraph ID', 'sentence ID', 'sentence', 'sliced info', 'sliced sent',
'quantity', 'quantity_line', 'unit', 'mod',
'property', 'property_line', 'entity', 'entity_line']]
for ann in tqdm(whole_ann):
slice_text, slice_ids, slice = cut_text(ann['text'])
id = ann['id']
text_idx = json.dumps(ann['sentx'])
raw_text = ann['text']
line_idx = json.dumps(slice_ids)
slice_text = slice_text
if len(ann['excel']) == 0:
out_datas.append([
id, text_idx, raw_text, line_idx, slice_text,
'', '', '', '', '', '', '', ''
])
for i, caseID in enumerate(ann['excel']):
case = ann['excel'][caseID]
quantity = ''
quantity_line = ''
unit = ''
mod = ''
property = ''
property_line = ''
entity = ''
entity_line = ''
if 'Quantity' in case:
quantity = case['Quantity'][0]
if ambiguity(raw_text, quantity):
quantity_line = get_slice_line(case['Quantity'][1], slice_ids)
if 'unit' in case:
unit = case['unit']
if 'mods' in case:
mod = ' '.join(case['mods'])
if 'MeasuredProperty' in case:
property = case['MeasuredProperty'][0]
if ambiguity(raw_text, property):
property_line = get_slice_line(case['MeasuredProperty'][1], slice_ids)
if 'MeasuredEntity' in case:
entity = case['MeasuredEntity'][0]
if ambiguity(raw_text, entity):
entity_line = get_slice_line(case['MeasuredEntity'][1], slice_ids)
if i == 0:
out_datas.append([
id, text_idx, raw_text, line_idx, slice_text,
quantity, quantity_line, unit, mod, property, property_line, entity, entity_line
])
else:
out_datas.append([
'', '', '', '', '',
quantity, quantity_line, unit, mod, property, property_line, entity, entity_line
])
return out_datas
def generate_gold():
path_text = [
'../MeasEval/data/train/text',
'../MeasEval/data/trial/txt'
] # 输入数据的位置
path_tsv = [
'../MeasEval/data/train/tsv',
'../MeasEval/data/trial/tsv'
] # train的输入数据的位置
whole_ann = read_semeval_list(path_tsv, path_text)
whole_ann = split_data(whole_ann)
out_datas = get_excel(whole_ann)
ds = pd.DataFrame(out_datas)
StyleFrame(ds).to_excel('data_enhancement/train.xlsx', index=False, header=False).save()
def generate_test():
path_text = [
'../MeasEval/data/eval/text',
] # 输入数据的位置
path_tsv = [
'../ner_process/pre_tsv_format',
] # train的输入数据的位置
whole_ann = read_semeval_list(path_tsv, path_text)
whole_ann = split_data(whole_ann)
out_datas = get_excel(whole_ann)
ds = | pd.DataFrame(out_datas) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 5 15:33:50 2019
@author: luc
"""
#%% Import Libraries
import numpy as np
import pandas as pd
import itertools
from stimuli_dictionary import cued_stim, free_stim, cued_stim_prac, free_stim_prac
def randomize(ID, Age, Gender, Handedness):
'''
Create a randomized and counterbalanced stimulus list for the current participant
Parameters
----------
ID : INT
The subject ID. Based on the subject ID the correct counterbalancing is determined
Returns
-------
design : Pandas DataFame
The dataframe containing the complete stimulus list (including practice trials)
keys: Dictionary
the response keys for the free phase
'''
#%% Variables
# experiment variables
nBlocks = 6
Phases = ['prac_cued', 'prac_free', 'cued', 'free']
nstim = 60 # sample 60 stim from each target_type
# sample from main stimulus set without replacement
# randomize word targets to avoid relationship reward - stimulus
for idx, name in enumerate(['lism','lila','nosm','nola']):
cued_stim[name] = np.random.choice(cued_stim[name], size = nstim, replace = False)
wide_cued = pd.DataFrame(cued_stim); wide_free = pd.DataFrame(free_stim)
wide_cued_prac = | pd.DataFrame(cued_stim_prac) | pandas.DataFrame |
# demographics_etl.py
#######
# This class provides capabilities to extract, transform,
# and load data from student, staff, and school geographic
# data files that it downloads from the web.
######
import pandas as pd
import numpy as np
import os
import datetime
import urllib
import shutil
import logging
import pyodbc
import pypyodbc
import sqlalchemy as sa
import keyring
import yaml
import pprint as pp
import time
class DemographicsETL():
def __init__(self,config_file_path,log_folder_name):
"""
Initialize ETL process by preparing logging,
reading in configuration file, and
creating a data files folder, if it does not yet
exist.
"""
pd.options.mode.chained_assignment = None
self.setup_logging(folder_name=log_folder_name)
config_map = self.load_configuration(config_file_path)
self.create_folder(folder_name=self.datafiles_folder)
def load_configuration(self,file_path):
"""
Load data from the configuration file from the specified path
into instance variables.
Keyword arguments:
file_path - the path to the configuration file from
the current folder.
"""
try:
logging.info("Using configuration file {}".format(file_path))
file = open(file_path)
config_map = yaml.load(file)
file.close()
# Set instance variables with settings from configuration file.
self.datafiles_folder = config_map['Data Folder Name']
self.database_name = config_map['Database']['Name']
self.database_driver = config_map['Database']['Driver']
self.database_server = config_map['Database']['Server']
self.database_username = config_map['Database']['Username']
self.database_schema = config_map['Database']['Schema']
self.student_demographics_url = config_map['Student Demographics URL']
self.staff_demographics_url = config_map['Staff Demographics URL']
self.school_geography_url = config_map['School Geography URL']
self.source_staff_table = config_map['Source Staff Table Name']
self.staging_student_table = config_map['Staging Student Table Name']
self.staging_staff_table = config_map['Staging Staff Table Name']
self.staging_school_geography_table = config_map['Staging School Geography Table Name']
self.school_district_ids = config_map['School District IDs']
self.n_less_than_10 = config_map['Replacement for n<10']
self.more_than_95 = config_map['Replacement for >95%']
except IOError:
logging.error("Unable to read configuration from file. Exiting program.")
exit(1)
except KeyError as key:
logging.error("Key missing from configuration from file: {}. Exiting program.".format(key))
exit(1)
except:
logging.error("Unknown configuration file error. Exiting program.")
exit(1)
logging.info('Configuration has been loaded.')
return config_map
def setup_logging(self,folder_name):
"""
Create a folder to store the log, if one does not yet exist,
then initialize the logger for logging to both the console
and the file in the log folder.
Keyword arguments:
folder_name - the name of the folder for storing the log file
"""
# Create folder to store log file if folder does not exist already
self.create_folder(folder_name)
# Configure logger with more verbose format to write to log file
log_file_path = folder_name+'/'+'demographics_etl.log'
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d-%Y %H:%M:%S',
filename=log_file_path,
filemode='w')
# Create the logger
console = logging.StreamHandler()
# Write to console any messages that are info or higher priority
console.setLevel(logging.INFO)
# Specify simpler format to write to console
formatter = logging.Formatter('%(levelname)-8s %(message)s')
# Assign the format for console
console.setFormatter(formatter)
# Add the handler to the root logger
logging.getLogger('').addHandler(console)
# Start logging
logging.info('Starting demographics_etl.py.')
logging.info("Logging has been set up with log file located at {}.".format(log_file_path))
def create_folder(self,folder_name):
"""
Create the specified folder if it does not yet exists.
Keyword arguments:
folder_name - the name of the folder to create
"""
# Create folder to store files if folder does not already exist
os.makedirs(folder_name,exist_ok=True)
def download_data(self):
"""
Download student and staff demographic data and school
geographic data from URLs defined in instance variables,
assigned based on the configuration file settings.
"""
self.engine = self.connect_database()
self.student_demographics_file = self.staff_demographics_file = ''
if self.student_demographics_url:
self.student_demographics_file = self.download_file(self.student_demographics_url)
if self.staff_demographics_url:
self.staff_demographics_file = self.download_file(self.staff_demographics_url)
if self.school_geography_url:
self.school_geography_file = self.download_file(self.school_geography_url)
def download_file(self,url):
"""
Download the file from the url provided and save it locally to the folder for data files
using its original file name. Any existing file with that file name and location will be
overwritten.
Keyword arguments:
url - the URL of the file to download
"""
output_filepath = self.datafiles_folder + '/' + url[url.rfind("/")+1:]
# Download the file from the url and save it to the data files folder.
try:
with urllib.request.urlopen(url) as response, open(output_filepath, 'wb') as output_file:
shutil.copyfileobj(response, output_file)
except:
logging.error("Unable to download file from {}. Exiting program.".format(url))
exit(1)
logging.info("Downloaded file to {}".format(output_filepath))
return output_filepath
def connect_database(self):
"""
Acquire the database password using keyring and prepare a connection to the database
used for storing demographic information.
"""
# Get password from keyring
password = keyring.get_password(self.database_name, self.database_username)
# Connect to the database
params = urllib.parse.quote_plus("DRIVER={{{0}}};SERVER={1};DATABASE={2};UID={3};PWD={4};autocommit=True;".format(self.database_driver,
self.database_server,self.database_name, self.database_username,password))
try:
engine = sa.create_engine("mssql+pyodbc:///?odbc_connect={}".format(params))
logging.info("Prepared connection to {} database.".format(self.database_name))
except:
logging.error("Unable to prepare connection to {} database. Exiting program.".format(self.database_name))
exit(1)
return engine
def extract_data(self):
"""
Call methods to extract student and staff demographic information
and school geographic information from downloaded source files.
"""
self.extract_student_demographics_data()
self.extract_staff_demographics_data()
self.extract_school_geography_data()
def extract_student_demographics_data(self):
"""
Extract data from student demographics file, a tab-delimited text file,
to a Pandas dataframe for further processing.
"""
try:
self.student_demographics_df = pd.read_table(self.student_demographics_file, sep='\t', header=0, index_col=False)
except:
logging.error("Unable to read file from {}. Exiting program.".format(self.student_demographics_file))
exit(1)
logging.info("Extracted student demographics data from file {file}. {df} rows of data found.".format(file=self.student_demographics_file,
df = self.student_demographics_df.shape[0]))
def extract_staff_demographics_data(self):
"""
Extract data from staff demographics file, which is an Access database,
to a Pandas dataframe for further processing.
"""
connection_string = "DRIVER={{Microsoft Access Driver (*.mdb, *.accdb)}};DBQ={0}/{1}".format(os.getcwd().replace('\\','/'),self.staff_demographics_file)
logging.info("Attempting to connect to staff demographics Access database with the following connection: {}".format(connection_string))
connection = pypyodbc.connect(connection_string)
quoted_district_ids = ','.join(map("'{}'".format, self.school_district_ids))
query = (r"SELECT SchoolYear,codist,cert,sex,hispanic,race,hdeg,certfte,certflag,recno,prog,act,bldgn,asspct,assfte,yr "
r"FROM [{source_table}] "
r"WHERE act = '27' " # Activity code 27 means a teaching assignment
r"AND assfte > 0 " # Must be at least part of the staff member's assignment FTE
r"AND codist IN ({district_ids});".format(source_table=self.source_staff_table,district_ids=quoted_district_ids))
try:
self.staff_demographics_df = | pd.read_sql(query, connection) | pandas.read_sql |
import datetime
from datetime import timedelta
from distutils.version import LooseVersion
from io import BytesIO
import os
import re
from warnings import catch_warnings, simplefilter
import numpy as np
import pytest
from pandas.compat import is_platform_little_endian, is_platform_windows
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_categorical_dtype
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Int64Index,
MultiIndex,
RangeIndex,
Series,
Timestamp,
bdate_range,
concat,
date_range,
isna,
timedelta_range,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
create_tempfile,
ensure_clean_path,
ensure_clean_store,
safe_close,
safe_remove,
tables,
)
import pandas.util.testing as tm
from pandas.io.pytables import (
ClosedFileError,
HDFStore,
PossibleDataLossError,
Term,
read_hdf,
)
from pandas.io import pytables as pytables # noqa: E402 isort:skip
from pandas.io.pytables import TableIterator # noqa: E402 isort:skip
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
@pytest.mark.single
class TestHDFStore:
def test_format_kwarg_in_constructor(self, setup_path):
# GH 13291
with ensure_clean_path(setup_path) as path:
with pytest.raises(ValueError):
HDFStore(path, format="table")
def test_context(self, setup_path):
path = create_tempfile(setup_path)
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
finally:
safe_remove(path)
try:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
with HDFStore(path) as tbl:
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
finally:
safe_remove(path)
def test_conv_read_write(self, setup_path):
path = create_tempfile(setup_path)
try:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
tm.assert_series_equal(o, roundtrip("series", o))
o = tm.makeStringSeries()
tm.assert_series_equal(o, roundtrip("string_series", o))
o = tm.makeDataFrame()
tm.assert_frame_equal(o, roundtrip("frame", o))
# table
df = DataFrame(dict(A=range(5), B=range(5)))
df.to_hdf(path, "table", append=True)
result = read_hdf(path, "table", where=["index>2"])
tm.assert_frame_equal(df[df.index > 2], result)
finally:
safe_remove(path)
def test_long_strings(self, setup_path):
# GH6166
df = DataFrame(
{"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10)
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["a"])
result = store.select("df")
tm.assert_frame_equal(df, result)
def test_api(self, setup_path):
# GH4584
# API issue when to_hdf doesn't accept append AND format args
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True)
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True)
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", append=False, format="fixed")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False, format="f")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False)
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_store(setup_path) as store:
path = store._path
df = tm.makeDataFrame()
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=True, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# append to False
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# formats
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format=None)
tm.assert_frame_equal(store.select("df"), df)
with ensure_clean_path(setup_path) as path:
# Invalid.
df = tm.makeDataFrame()
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="f")
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="fixed")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=True, format="foo")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=False, format="bar")
# File path doesn't exist
path = ""
with pytest.raises(FileNotFoundError):
read_hdf(path, "df")
def test_api_default_format(self, setup_path):
# default_format option
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
_maybe_remove(store, "df")
store.put("df", df)
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
store.append("df2", df)
pd.set_option("io.hdf.default_format", "table")
_maybe_remove(store, "df")
store.put("df", df)
assert store.get_storer("df").is_table
_maybe_remove(store, "df2")
store.append("df2", df)
assert store.get_storer("df").is_table
pd.set_option("io.hdf.default_format", None)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
df.to_hdf(path, "df")
with HDFStore(path) as store:
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
df.to_hdf(path, "df2", append=True)
pd.set_option("io.hdf.default_format", "table")
df.to_hdf(path, "df3")
with HDFStore(path) as store:
assert store.get_storer("df3").is_table
df.to_hdf(path, "df4", append=True)
with HDFStore(path) as store:
assert store.get_storer("df4").is_table
pd.set_option("io.hdf.default_format", None)
def test_keys(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
assert len(store) == 3
expected = {"/a", "/b", "/c"}
assert set(store.keys()) == expected
assert set(store) == expected
def test_keys_ignore_hdf_softlink(self, setup_path):
# GH 20523
# Puts a softlink into HDF file and rereads
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A=range(5), B=range(5)))
store.put("df", df)
assert store.keys() == ["/df"]
store._handle.create_soft_link(store._handle.root, "symlink", "df")
# Should ignore the softlink
assert store.keys() == ["/df"]
def test_iter_empty(self, setup_path):
with ensure_clean_store(setup_path) as store:
# GH 12221
assert list(store) == []
def test_repr(self, setup_path):
with ensure_clean_store(setup_path) as store:
repr(store)
store.info()
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store["df"] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, "bah")
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df)
s = store.get_storer("df")
repr(s)
str(s)
@ignore_natural_naming_warning
def test_contains(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
store["foo/bar"] = tm.makeDataFrame()
assert "a" in store
assert "b" in store
assert "c" not in store
assert "foo/bar" in store
assert "/foo/bar" in store
assert "/foo/b" not in store
assert "bar" not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store["node())"] = tm.makeDataFrame()
assert "node())" in store
def test_versioning(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
assert store.root.a._v_attrs.pandas_version == "0.15.2"
assert store.root.b._v_attrs.pandas_version == "0.15.2"
assert store.root.df1._v_attrs.pandas_version == "0.15.2"
# write a file and wipe its versioning
_maybe_remove(store, "df2")
store.append("df2", df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node("df2")._v_attrs.pandas_version = None
with pytest.raises(Exception):
store.select("df2")
def test_mode(self, setup_path):
df = tm.makeTimeDataFrame()
def check(mode):
with ensure_clean_path(setup_path) as path:
# constructor
if mode in ["r", "r+"]:
with pytest.raises(IOError):
HDFStore(path, mode=mode)
else:
store = HDFStore(path, mode=mode)
assert store._handle.mode == mode
store.close()
with ensure_clean_path(setup_path) as path:
# context
if mode in ["r", "r+"]:
with pytest.raises(IOError):
with HDFStore(path, mode=mode) as store: # noqa
pass
else:
with HDFStore(path, mode=mode) as store:
assert store._handle.mode == mode
with ensure_clean_path(setup_path) as path:
# conv write
if mode in ["r", "r+"]:
with pytest.raises(IOError):
df.to_hdf(path, "df", mode=mode)
df.to_hdf(path, "df", mode="w")
else:
df.to_hdf(path, "df", mode=mode)
# conv read
if mode in ["w"]:
with pytest.raises(ValueError):
read_hdf(path, "df", mode=mode)
else:
result = read_hdf(path, "df", mode=mode)
tm.assert_frame_equal(result, df)
def check_default_mode():
# read_hdf uses default mode
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="w")
result = read_hdf(path, "df")
tm.assert_frame_equal(result, df)
check("r")
check("r+")
check("a")
check("w")
check_default_mode()
def test_reopen_handle(self, setup_path):
with ensure_clean_path(setup_path) as path:
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# invalid mode change
with pytest.raises(PossibleDataLossError):
store.open("w")
store.close()
assert not store.is_open
# truncation ok here
store.open("w")
assert store.is_open
assert len(store) == 0
store.close()
assert not store.is_open
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# reopen as read
store.open("r")
assert store.is_open
assert len(store) == 1
assert store._mode == "r"
store.close()
assert not store.is_open
# reopen as append
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
# reopen as append (again)
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
def test_open_args(self, setup_path):
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
# create an in memory store
store = HDFStore(
path, mode="a", driver="H5FD_CORE", driver_core_backing_store=0
)
store["df"] = df
store.append("df2", df)
tm.assert_frame_equal(store["df"], df)
tm.assert_frame_equal(store["df2"], df)
store.close()
# the file should not have actually been written
assert not os.path.exists(path)
def test_flush(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store.flush()
store.flush(fsync=True)
def test_get(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
left = store.get("a")
right = store["a"]
tm.assert_series_equal(left, right)
left = store.get("/a")
right = store["/a"]
tm.assert_series_equal(left, right)
with pytest.raises(KeyError, match="'No object named b in the file'"):
store.get("b")
@pytest.mark.parametrize(
"where, expected",
[
(
"/",
{
"": ({"first_group", "second_group"}, set()),
"/first_group": (set(), {"df1", "df2"}),
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
(
"/second_group",
{
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
],
)
def test_walk(self, where, expected, setup_path):
# GH10143
objs = {
"df1": pd.DataFrame([1, 2, 3]),
"df2": pd.DataFrame([4, 5, 6]),
"df3": pd.DataFrame([6, 7, 8]),
"df4": pd.DataFrame([9, 10, 11]),
"s1": pd.Series([10, 9, 8]),
# Next 3 items aren't pandas objects and should be ignored
"a1": np.array([[1, 2, 3], [4, 5, 6]]),
"tb1": np.array([(1, 2, 3), (4, 5, 6)], dtype="i,i,i"),
"tb2": np.array([(7, 8, 9), (10, 11, 12)], dtype="i,i,i"),
}
with ensure_clean_store("walk_groups.hdf", mode="w") as store:
store.put("/first_group/df1", objs["df1"])
store.put("/first_group/df2", objs["df2"])
store.put("/second_group/df3", objs["df3"])
store.put("/second_group/s1", objs["s1"])
store.put("/second_group/third_group/df4", objs["df4"])
# Create non-pandas objects
store._handle.create_array("/first_group", "a1", objs["a1"])
store._handle.create_table("/first_group", "tb1", obj=objs["tb1"])
store._handle.create_table("/second_group", "tb2", obj=objs["tb2"])
assert len(list(store.walk(where=where))) == len(expected)
for path, groups, leaves in store.walk(where=where):
assert path in expected
expected_groups, expected_frames = expected[path]
assert expected_groups == set(groups)
assert expected_frames == set(leaves)
for leaf in leaves:
frame_path = "/".join([path, leaf])
obj = store.get(frame_path)
if "df" in leaf:
tm.assert_frame_equal(obj, objs[leaf])
else:
tm.assert_series_equal(obj, objs[leaf])
def test_getattr(self, setup_path):
with ensure_clean_store(setup_path) as store:
s = tm.makeTimeSeries()
store["a"] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store, "a")
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store["df"] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
for x in ["d", "mode", "path", "handle", "complib"]:
with pytest.raises(AttributeError):
getattr(store, x)
# not stores
for x in ["mode", "path", "handle", "complib"]:
getattr(store, "_{x}".format(x=x))
def test_put(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
store["a"] = ts
store["b"] = df[:10]
store["foo/bar/bah"] = df[:10]
store["foo"] = df[:10]
store["/foo"] = df[:10]
store.put("c", df[:10], format="table")
# not OK, not a table
with pytest.raises(ValueError):
store.put("b", df[10:], append=True)
# node does not currently exist, test _is_table_type returns False
# in this case
_maybe_remove(store, "f")
with pytest.raises(ValueError):
store.put("f", df[10:], append=True)
# can't put to a table (use append instead)
with pytest.raises(ValueError):
store.put("c", df[10:], append=True)
# overwrite table
store.put("c", df[:10], format="table", append=False)
tm.assert_frame_equal(df[:10], store["c"])
def test_put_string_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
index = Index(
["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(20), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
# mixed length
index = Index(
["abcdefghijklmnopqrstuvwxyz1234567890"]
+ ["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(21), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
def test_put_compression(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
store.put("c", df, format="table", complib="zlib")
tm.assert_frame_equal(store["c"], df)
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="zlib")
@td.skip_if_windows_python_3
def test_put_compression_blosc(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="blosc")
store.put("c", df, format="table", complib="blosc")
tm.assert_frame_equal(store["c"], df)
def test_complibs_default_settings(self, setup_path):
# GH15943
df = tm.makeDataFrame()
# Set complevel and check if complib is automatically set to
# default value
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complevel=9)
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "zlib"
# Set complib and check to see if compression is disabled
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complib="zlib")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if not setting complib or complevel results in no compression
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if file-defaults can be overridden on a per table basis
with ensure_clean_path(setup_path) as tmpfile:
store = pd.HDFStore(tmpfile)
store.append("dfc", df, complevel=9, complib="blosc")
store.append("df", df)
store.close()
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
for node in h5file.walk_nodes(where="/dfc", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "blosc"
def test_complibs(self, setup_path):
# GH14478
df = tm.makeDataFrame()
# Building list of all complibs and complevels tuples
all_complibs = tables.filters.all_complibs
# Remove lzo if its not available on this platform
if not tables.which_lib_version("lzo"):
all_complibs.remove("lzo")
# Remove bzip2 if its not available on this platform
if not tables.which_lib_version("bzip2"):
all_complibs.remove("bzip2")
all_levels = range(0, 10)
all_tests = [(lib, lvl) for lib in all_complibs for lvl in all_levels]
for (lib, lvl) in all_tests:
with ensure_clean_path(setup_path) as tmpfile:
gname = "foo"
# Write and read file to see if data is consistent
df.to_hdf(tmpfile, gname, complib=lib, complevel=lvl)
result = pd.read_hdf(tmpfile, gname)
tm.assert_frame_equal(result, df)
# Open file and check metadata
# for correct amount of compression
h5table = tables.open_file(tmpfile, mode="r")
for node in h5table.walk_nodes(where="/" + gname, classname="Leaf"):
assert node.filters.complevel == lvl
if lvl == 0:
assert node.filters.complib is None
else:
assert node.filters.complib == lib
h5table.close()
def test_put_integer(self, setup_path):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
self._check_roundtrip(df, tm.assert_frame_equal, setup_path)
@td.xfail_non_writeable
def test_put_mixed_type(self, setup_path):
df = tm.makeTimeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
# PerformanceWarning
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store.put("df", df)
expected = store.get("df")
tm.assert_frame_equal(expected, df)
@pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
def test_append(self, setup_path):
with ensure_clean_store(setup_path) as store:
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning):
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
_maybe_remove(store, "df2")
store.put("df2", df[:10], format="table")
store.append("df2", df[10:])
tm.assert_frame_equal(store["df2"], df)
_maybe_remove(store, "df3")
store.append("/df3", df[:10])
store.append("/df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning
_maybe_remove(store, "/df3 foo")
store.append("/df3 foo", df[:10])
store.append("/df3 foo", df[10:])
tm.assert_frame_equal(store["df3 foo"], df)
# dtype issues - mizxed type in a single object column
df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]])
df["mixed_column"] = "testing"
df.loc[2, "mixed_column"] = np.nan
_maybe_remove(store, "df")
store.append("df", df)
tm.assert_frame_equal(store["df"], df)
# uints - test storage of uints
uint_data = DataFrame(
{
"u08": Series(
np.random.randint(0, high=255, size=5), dtype=np.uint8
),
"u16": Series(
np.random.randint(0, high=65535, size=5), dtype=np.uint16
),
"u32": Series(
np.random.randint(0, high=2 ** 30, size=5), dtype=np.uint32
),
"u64": Series(
[2 ** 58, 2 ** 59, 2 ** 60, 2 ** 61, 2 ** 62],
dtype=np.uint64,
),
},
index=np.arange(5),
)
_maybe_remove(store, "uints")
store.append("uints", uint_data)
tm.assert_frame_equal(store["uints"], uint_data)
# uints - test storage of uints in indexable columns
_maybe_remove(store, "uints")
# 64-bit indices not yet supported
store.append("uints", uint_data, data_columns=["u08", "u16", "u32"])
tm.assert_frame_equal(store["uints"], uint_data)
def test_append_series(self, setup_path):
with ensure_clean_store(setup_path) as store:
# basic
ss = tm.makeStringSeries()
ts = tm.makeTimeSeries()
ns = Series(np.arange(100))
store.append("ss", ss)
result = store["ss"]
tm.assert_series_equal(result, ss)
assert result.name is None
store.append("ts", ts)
result = store["ts"]
tm.assert_series_equal(result, ts)
assert result.name is None
ns.name = "foo"
store.append("ns", ns)
result = store["ns"]
tm.assert_series_equal(result, ns)
assert result.name == ns.name
# select on the values
expected = ns[ns > 60]
result = store.select("ns", "foo>60")
tm.assert_series_equal(result, expected)
# select on the index and values
expected = ns[(ns > 70) & (ns.index < 90)]
result = store.select("ns", "foo>70 and index<90")
tm.assert_series_equal(result, expected)
# multi-index
mi = DataFrame(np.random.randn(5, 1), columns=["A"])
mi["B"] = np.arange(len(mi))
mi["C"] = "foo"
mi.loc[3:5, "C"] = "bar"
mi.set_index(["C", "B"], inplace=True)
s = mi.stack()
s.index = s.index.droplevel(2)
store.append("mi", s)
tm.assert_series_equal(store["mi"], s)
def test_store_index_types(self, setup_path):
# GH5386
# test storing various index types
with ensure_clean_store(setup_path) as store:
def check(format, index):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df.index = index(len(df))
_maybe_remove(store, "df")
store.put("df", df, format=format)
tm.assert_frame_equal(df, store["df"])
for index in [
tm.makeFloatIndex,
tm.makeStringIndex,
tm.makeIntIndex,
tm.makeDateIndex,
]:
check("table", index)
check("fixed", index)
# period index currently broken for table
# seee GH7796 FIXME
check("fixed", tm.makePeriodIndex)
# check('table',tm.makePeriodIndex)
# unicode
index = tm.makeUnicodeIndex
check("table", index)
check("fixed", index)
@pytest.mark.skipif(
not is_platform_little_endian(), reason="reason platform is not little endian"
)
def test_encoding(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A="foo", B="bar"), index=range(5))
df.loc[2, "A"] = np.nan
df.loc[3, "B"] = np.nan
_maybe_remove(store, "df")
store.append("df", df, encoding="ascii")
tm.assert_frame_equal(store["df"], df)
expected = df.reindex(columns=["A"])
result = store.select("df", Term("columns=A", encoding="ascii"))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"val",
[
[b"E\xc9, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"a", b"b", b"c"],
[b"EE, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"\xf8\xfc", b"a", b"b", b"c"],
[b"", b"a", b"b", b"c"],
[b"\xf8\xfc", b"a", b"b", b"c"],
[b"A\xf8\xfc", b"", b"a", b"b", b"c"],
[np.nan, b"", b"b", b"c"],
[b"A\xf8\xfc", np.nan, b"", b"b", b"c"],
],
)
@pytest.mark.parametrize("dtype", ["category", object])
def test_latin_encoding(self, setup_path, dtype, val):
enc = "latin-1"
nan_rep = ""
key = "data"
val = [x.decode(enc) if isinstance(x, bytes) else x for x in val]
ser = pd.Series(val, dtype=dtype)
with ensure_clean_path(setup_path) as store:
ser.to_hdf(store, key, format="table", encoding=enc, nan_rep=nan_rep)
retr = read_hdf(store, key)
s_nan = ser.replace(nan_rep, np.nan)
if is_categorical_dtype(s_nan):
assert is_categorical_dtype(retr)
tm.assert_series_equal(
s_nan, retr, check_dtype=False, check_categorical=False
)
else:
tm.assert_series_equal(s_nan, retr)
# FIXME: don't leave commented-out
# fails:
# for x in examples:
# roundtrip(s, nan_rep=b'\xf8\xfc')
def test_append_some_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{
"A": Series(np.random.randn(20)).astype("int32"),
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
# some nans
_maybe_remove(store, "df1")
df.loc[0:15, ["A1", "B", "D", "E"]] = np.nan
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
# first column
df1 = df.copy()
df1.loc[:, "A1"] = np.nan
_maybe_remove(store, "df1")
store.append("df1", df1[:10])
store.append("df1", df1[10:])
tm.assert_frame_equal(store["df1"], df1)
# 2nd column
df2 = df.copy()
df2.loc[:, "A2"] = np.nan
_maybe_remove(store, "df2")
store.append("df2", df2[:10])
store.append("df2", df2[10:])
tm.assert_frame_equal(store["df2"], df2)
# datetimes
df3 = df.copy()
df3.loc[:, "E"] = np.nan
_maybe_remove(store, "df3")
store.append("df3", df3[:10])
store.append("df3", df3[10:])
tm.assert_frame_equal(store["df3"], df3)
def test_append_all_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{"A1": np.random.randn(20), "A2": np.random.randn(20)},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
# nan some entire rows (dropna=True)
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df[-4:])
# nan some entire rows (dropna=False)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# tests the option io.hdf.dropna_table
pd.set_option("io.hdf.dropna_table", False)
_maybe_remove(store, "df3")
store.append("df3", df[:10])
store.append("df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
pd.set_option("io.hdf.dropna_table", True)
_maybe_remove(store, "df4")
store.append("df4", df[:10])
store.append("df4", df[10:])
tm.assert_frame_equal(store["df4"], df[-4:])
# nan some entire rows (string are still written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# nan some entire rows (but since we have dates they are still
# written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# Test to make sure defaults are to not drop.
# Corresponding to Issue 9382
df_with_missing = DataFrame(
{"col1": [0, np.nan, 2], "col2": [1, np.nan, np.nan]}
)
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df_with_missing", format="table")
reloaded = read_hdf(path, "df_with_missing")
tm.assert_frame_equal(df_with_missing, reloaded)
def test_read_missing_key_close_store(self, setup_path):
# GH 25766
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(path, "k2")
# smoke test to test that file is properly closed after
# read with KeyError before another write
df.to_hdf(path, "k2")
def test_read_missing_key_opened_store(self, setup_path):
# GH 28699
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
store = pd.HDFStore(path, "r")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(store, "k2")
# Test that the file is still open after a KeyError and that we can
# still read from it.
pd.read_hdf(store, "k1")
def test_append_frame_column_oriented(self, setup_path):
with ensure_clean_store(setup_path) as store:
# column oriented
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df.iloc[:, :2], axes=["columns"])
store.append("df1", df.iloc[:, 2:])
tm.assert_frame_equal(store["df1"], df)
result = store.select("df1", "columns=A")
expected = df.reindex(columns=["A"])
tm.assert_frame_equal(expected, result)
# selection on the non-indexable
result = store.select("df1", ("columns=A", "index=df.index[0:4]"))
expected = df.reindex(columns=["A"], index=df.index[0:4])
tm.assert_frame_equal(expected, result)
# this isn't supported
with pytest.raises(TypeError):
store.select("df1", "columns=A and index>df.index[4]")
def test_append_with_different_block_ordering(self, setup_path):
# GH 4096; using same frames, but different block orderings
with ensure_clean_store(setup_path) as store:
for i in range(10):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df["index"] = range(10)
df["index"] += i * 10
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
if i % 2 == 0:
del df["int64"]
df["int64"] = Series([1] * len(df), dtype="int64")
if i % 3 == 0:
a = df.pop("A")
df["A"] = a
df.set_index("index", inplace=True)
store.append("df", df)
# test a different ordering but with more fields (like invalid
# combinate)
with ensure_clean_store(setup_path) as store:
df = DataFrame(np.random.randn(10, 2), columns=list("AB"), dtype="float64")
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
store.append("df", df)
# store additional fields in different blocks
df["int16_2"] = Series([1] * len(df), dtype="int16")
with pytest.raises(ValueError):
store.append("df", df)
# store multile additional fields in different blocks
df["float_3"] = Series([1.0] * len(df), dtype="float64")
with pytest.raises(ValueError):
store.append("df", df)
def test_append_with_strings(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big", df)
tm.assert_frame_equal(store.select("df_big"), df)
check_col("df_big", "values_block_1", 15)
# appending smaller string ok
df2 = DataFrame([[124, "asdqy"], [346, "dggnhefbdfb"]])
store.append("df_big", df2)
expected = concat([df, df2])
tm.assert_frame_equal(store.select("df_big"), expected)
check_col("df_big", "values_block_1", 15)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big2", df, min_itemsize={"values": 50})
tm.assert_frame_equal(store.select("df_big2"), df)
check_col("df_big2", "values_block_1", 50)
# bigger string on next append
store.append("df_new", df)
df_new = DataFrame(
[[124, "abcdefqhij"], [346, "abcdefghijklmnopqrtsuvwxyz"]]
)
with pytest.raises(ValueError):
store.append("df_new", df_new)
# min_itemsize on Series index (GH 11412)
df = tm.makeMixedDataFrame().set_index("C")
store.append("ss", df["B"], min_itemsize={"index": 4})
tm.assert_series_equal(store.select("ss"), df["B"])
# same as above, with data_columns=True
store.append(
"ss2", df["B"], data_columns=True, min_itemsize={"index": 4}
)
tm.assert_series_equal(store.select("ss2"), df["B"])
# min_itemsize in index without appending (GH 10381)
store.put("ss3", df, format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
store.append("ss3", df2)
tm.assert_frame_equal(store.select("ss3"), pd.concat([df, df2]))
# same as above, with a Series
store.put("ss4", df["B"], format="table", min_itemsize={"index": 6})
store.append("ss4", df2["B"])
tm.assert_series_equal(
store.select("ss4"), pd.concat([df["B"], df2["B"]])
)
# with nans
_maybe_remove(store, "df")
df = | tm.makeTimeDataFrame() | pandas.util.testing.makeTimeDataFrame |
"""Integration tests for the HyperTransformer."""
import re
from copy import deepcopy
from unittest.mock import patch
import numpy as np
import pandas as pd
import pytest
from rdt import HyperTransformer
from rdt.errors import Error, NotFittedError
from rdt.transformers import (
DEFAULT_TRANSFORMERS, BaseTransformer, BinaryEncoder, FloatFormatter, FrequencyEncoder,
OneHotEncoder, UnixTimestampEncoder, get_default_transformer, get_default_transformers)
class DummyTransformerNumerical(BaseTransformer):
INPUT_SDTYPE = 'categorical'
OUTPUT_SDTYPES = {
'value': 'float'
}
def _fit(self, data):
pass
def _transform(self, data):
return data.astype(float)
def _reverse_transform(self, data):
return data.astype(str)
class DummyTransformerNotMLReady(BaseTransformer):
INPUT_SDTYPE = 'datetime'
OUTPUT_SDTYPES = {
'value': 'categorical',
}
def _fit(self, data):
pass
def _transform(self, data):
# Stringify input data
return data.astype(str)
def _reverse_transform(self, data):
return data.astype('datetime64')
TEST_DATA_INDEX = [4, 6, 3, 8, 'a', 1.0, 2.0, 3.0]
def get_input_data():
datetimes = pd.to_datetime([
'2010-02-01',
'2010-02-01',
'2010-01-01',
'2010-01-01',
'2010-01-01',
'2010-02-01',
'2010-01-01',
'2010-01-01',
])
data = pd.DataFrame({
'integer': [1, 2, 1, 3, 1, 4, 2, 3],
'float': [0.1, 0.2, 0.1, 0.2, 0.1, 0.4, 0.2, 0.3],
'categorical': ['a', 'a', 'b', 'b', 'a', 'b', 'a', 'a'],
'bool': [False, False, False, True, False, False, True, False],
'datetime': datetimes,
'names': ['Jon', 'Arya', 'Arya', 'Jon', 'Jon', 'Sansa', 'Jon', 'Jon'],
}, index=TEST_DATA_INDEX)
return data
def get_transformed_data():
datetimes = [
1.264982e+18,
1.264982e+18,
1.262304e+18,
1.262304e+18,
1.262304e+18,
1.264982e+18,
1.262304e+18,
1.262304e+18
]
return pd.DataFrame({
'integer.value': [1, 2, 1, 3, 1, 4, 2, 3],
'float.value': [0.1, 0.2, 0.1, 0.2, 0.1, 0.4, 0.2, 0.3],
'categorical.value': [0.3125, 0.3125, .8125, 0.8125, 0.3125, 0.8125, 0.3125, 0.3125],
'bool.value': [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0],
'datetime.value': datetimes,
'names.value': [0.3125, 0.75, 0.75, 0.3125, 0.3125, 0.9375, 0.3125, 0.3125]
}, index=TEST_DATA_INDEX)
def get_reversed_data():
data = get_input_data()
data['bool'] = data['bool'].astype('object')
return data
DETERMINISTIC_DEFAULT_TRANSFORMERS = deepcopy(DEFAULT_TRANSFORMERS)
DETERMINISTIC_DEFAULT_TRANSFORMERS['categorical'] = FrequencyEncoder
@patch('rdt.transformers.DEFAULT_TRANSFORMERS', DETERMINISTIC_DEFAULT_TRANSFORMERS)
def test_hypertransformer_default_inputs():
"""Test the HyperTransformer with default parameters.
This tests that if default parameters are provided to the HyperTransformer,
the ``default_transformers`` method will be used to determine which
transformers to use for each field.
Setup:
- Patch the ``DEFAULT_TRANSFORMERS`` to use the ``FrequencyEncoder``
for categorical sdtypes, so that the output is predictable.
Input:
- A dataframe with every sdtype.
- A fixed random seed to guarantee the samle values are null.
Expected behavior:
- The transformed data should contain all the ML ready data.
- The reverse transformed data should be the same as the input.
"""
# Setup
datetimes = pd.to_datetime([
np.nan,
'2010-02-01',
'2010-01-01',
'2010-01-01',
'2010-01-01',
'2010-02-01',
'2010-01-01',
'2010-01-01',
])
data = pd.DataFrame({
'integer': [1, 2, 1, 3, 1, 4, 2, 3],
'float': [0.1, 0.2, 0.1, np.nan, 0.1, 0.4, np.nan, 0.3],
'categorical': ['a', 'a', np.nan, 'b', 'a', 'b', 'a', 'a'],
'bool': [False, np.nan, False, True, False, np.nan, True, False],
'datetime': datetimes,
'names': ['Jon', 'Arya', 'Arya', 'Jon', 'Jon', 'Sansa', 'Jon', 'Jon'],
}, index=TEST_DATA_INDEX)
# Run
ht = HyperTransformer()
ht.detect_initial_config(data)
ht.fit(data)
transformed = ht.transform(data)
reverse_transformed = ht.reverse_transform(transformed)
# Assert
expected_datetimes = [
1.263069e+18,
1.264982e+18,
1.262304e+18,
1.262304e+18,
1.262304e+18,
1.264982e+18,
1.262304e+18,
1.262304e+18
]
expected_transformed = pd.DataFrame({
'integer.value': [1, 2, 1, 3, 1, 4, 2, 3],
'float.value': [0.1, 0.2, 0.1, 0.2, 0.1, 0.4, 0.2, 0.3],
'categorical.value': [0.3125, 0.3125, 0.9375, 0.75, 0.3125, 0.75, 0.3125, 0.3125],
'bool.value': [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0],
'datetime.value': expected_datetimes,
'names.value': [0.3125, 0.75, 0.75, 0.3125, 0.3125, 0.9375, 0.3125, 0.3125]
}, index=TEST_DATA_INDEX)
pd.testing.assert_frame_equal(transformed, expected_transformed)
reversed_datetimes = pd.to_datetime([
'2010-01-09 20:34:17.142857216',
'2010-02-01',
'2010-01-01',
'2010-01-01',
'2010-01-01',
'2010-02-01',
'2010-01-01',
'2010-01-01',
])
expected_reversed = pd.DataFrame({
'integer': [1, 2, 1, 3, 1, 4, 2, 3],
'float': [0.1, 0.2, 0.1, 0.20000000000000004, 0.1, 0.4, 0.20000000000000004, 0.3],
'categorical': ['a', 'a', np.nan, 'b', 'a', 'b', 'a', 'a'],
'bool': [False, False, False, True, False, False, True, False],
'datetime': reversed_datetimes,
'names': ['Jon', 'Arya', 'Arya', 'Jon', 'Jon', 'Sansa', 'Jon', 'Jon'],
}, index=TEST_DATA_INDEX)
for row in range(reverse_transformed.shape[0]):
for column in range(reverse_transformed.shape[1]):
expected = expected_reversed.iloc[row, column]
actual = reverse_transformed.iloc[row, column]
assert pd.isna(actual) or expected == actual
assert isinstance(ht._transformers_tree['integer']['transformer'], FloatFormatter)
assert ht._transformers_tree['integer']['outputs'] == ['integer.value']
assert isinstance(ht._transformers_tree['float']['transformer'], FloatFormatter)
assert ht._transformers_tree['float']['outputs'] == ['float.value']
assert isinstance(ht._transformers_tree['categorical']['transformer'], FrequencyEncoder)
assert ht._transformers_tree['categorical']['outputs'] == ['categorical.value']
assert isinstance(ht._transformers_tree['bool']['transformer'], BinaryEncoder)
assert ht._transformers_tree['bool']['outputs'] == ['bool.value']
assert isinstance(ht._transformers_tree['datetime']['transformer'], UnixTimestampEncoder)
assert ht._transformers_tree['datetime']['outputs'] == ['datetime.value']
assert isinstance(ht._transformers_tree['names']['transformer'], FrequencyEncoder)
assert ht._transformers_tree['names']['outputs'] == ['names.value']
get_default_transformers.cache_clear()
get_default_transformer.cache_clear()
def test_hypertransformer_field_transformers():
"""Test the HyperTransformer with ``field_transformers`` provided.
This tests that the transformers specified in the ``field_transformers``
argument are used. Any output of a transformer that is not ML ready (not
in the ``_transform_output_sdtypes`` list) should be recursively transformed
till it is.
Setup:
- The datetime column is set to use a dummy transformer that stringifies
the input. That output is then set to use the categorical transformer.
Input:
- A dict mapping each field to a transformer.
- A dataframe with every sdtype.
Expected behavior:
- The transformed data should contain all the ML ready data.
- The reverse transformed data should be the same as the input.
"""
# Setup
config = {
'sdtypes': {
'integer': 'numerical',
'float': 'numerical',
'categorical': 'categorical',
'bool': 'boolean',
'datetime': 'datetime',
'names': 'categorical'
},
'transformers': {
'integer': FloatFormatter(missing_value_replacement='mean'),
'float': FloatFormatter(missing_value_replacement='mean'),
'categorical': FrequencyEncoder,
'bool': BinaryEncoder(missing_value_replacement='mode'),
'datetime': DummyTransformerNotMLReady,
'names': FrequencyEncoder
}
}
data = get_input_data()
# Run
ht = HyperTransformer()
ht.detect_initial_config(data)
ht.set_config(config)
ht.fit(data)
transformed = ht.transform(data)
reverse_transformed = ht.reverse_transform(transformed)
# Assert
expected_transformed = get_transformed_data()
rename = {'datetime.value': 'datetime.value.value'}
expected_transformed = expected_transformed.rename(columns=rename)
transformed_datetimes = [0.8125, 0.8125, 0.3125, 0.3125, 0.3125, 0.8125, 0.3125, 0.3125]
expected_transformed['datetime.value.value'] = transformed_datetimes
pd.testing.assert_frame_equal(transformed, expected_transformed)
expected_reversed = get_reversed_data()
| pd.testing.assert_frame_equal(expected_reversed, reverse_transformed) | pandas.testing.assert_frame_equal |
#!/usr/bin/env python
###
# File Created: Wednesday, February 6th 2019, 9:05:13 pm
# Author: <NAME> <EMAIL>
# Modified By: <NAME>
# Last Modified: Friday, February 8th 2019, 1:06:21 pm
###
import os
from os.path import isfile, join, split
import glob
import pandas as pd
# Timeseries data
import datetime
import numpy as np
from pandas.tseries.frequencies import to_offset
def get_avg_losses():
"""Returns pandas df of average losses over all runs of all files
Returns:
pd.df -- df of averages over all funs of all runs of files
"""
# Returns pd df of fullpaths, paths and filenames
files = files_to_df('../output/final/*/*.csv')
list_num_files('../output/final')
# Iterate over all runs for every unique filename,
# resample df to 2S interval,
# create losses df,
# concat mean to avg loss df
print('List of filenames:')
print('---------------------------')
avg_losses = pd.DataFrame()
for fn in files['filename'].unique():
name = fn.split('.')[0]
print(fn)
paths = files[files['filename'] == fn]['fullpath']
losses = pd.DataFrame()
benchmark_losses = | pd.DataFrame() | pandas.DataFrame |
import requests
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
class ScrapingPlayer:
# table header transfermarket
table_structure_tm = {
"matchday" : 0,
"date" : 1,
"home_team_pos": 2,
"home_team" : 3,
"away_team_pos" : 4,
"away_team" : 5,
"result" : 6,
"available" : 7,
"goal" : 8,
"assist" : 9,
"autogoal" : 10,
"yellow_card" : 11,
"double_yellow_card" : 12,
"red_card" : 13,
"substitution_on" : 14,
"substitution_off" : 15,
"minutes" : 16,
"penalty_kicker" : None
}
# table header fantagiaveno
table_structure_fg = {
"matchday": 0,
"home_match": 1,
"score": 2,
"grade": 3,
"penalty_scored": None,
"penalty_kick": 9,
"starter": 10,
"postponed": None
}
# rank at end of first round
rank_difficulty = {
"Milan" : 1,
"Inter" : 1,
"Juventus": 1,
"Roma": 2,
"Atalanta" : 2,
"Napoli" : 2,
"Lazio" : 2,
"Verona": 3,
"Sassuolo" : 3,
"Sampdoria" : 3,
"Benevento": 4,
"Fiorentina": 4,
"Bologna" : 4,
"Spezia": 4,
"Udinese" : 4,
"Genoa" : 5,
"Cagliari": 5,
"Torino" : 5,
"Parma" : 5,
"Crotone" : 5
}
# penalty kickers
penalty_kickers = {
"Milan" : "ibrahimovic",
"Inter" : "lukaku",
"Juventus": "<NAME>",
"Roma": "veretout",
"Atalanta" : "ilicic",
"Napoli" : "insigne",
"Lazio" : "immobile",
"Verona": "kalinic",
"Sassuolo" : "berardi",
"Sampdoria" : "quagliarella",
"Benevento": "viola",
"Fiorentina": "vlahovic",
"Bologna" : "orsolini",
"Spezia": "nzola",
"Udinese" : "de paul",
"Genoa" : "criscito",
"Cagliari": "<NAME>",
"Torino" : "belotti",
"Parma" : "kucka",
"Crotone" : "simy"
}
def __init__(self,
url_tm,
url_fg):
self.url_tm = url_tm
self.url_fg = url_fg
def readRowTm(self, player_name, row, table_structure=table_structure_tm, penalty_kickers=penalty_kickers):
# Fill a list with each cell contained into a row
cells = row.find_all("td")
cells = list(map(lambda x: x.text.strip(), cells))
# Ignore empty rows
if len(cells) == 0:
return None
else:
matchday = int(cells[table_structure["matchday"]])
date = cells[table_structure["date"]]
home_team_raw = cells[table_structure["home_team"]]
away_team_raw = cells[table_structure["away_team"]]
try:
home_team_name, home_team_pos = home_team_raw.split("\xa0\xa0")
home_team_pos = int(home_team_pos[1:-2])
away_team_name, away_team_pos = away_team_raw.split("\xa0\xa0")
away_team_pos = int(away_team_pos[1:-2])
except ValueError:
home_team_name = home_team_raw
home_team_pos = None
away_team_name = away_team_raw
away_team_pos = None
result = cells[table_structure["result"]]
# If a row contains less cells than the overall table structure, means that player in unavailable
if len(cells) < len(table_structure)-1:
available = cells[table_structure["available"]]
return [matchday, date, home_team_pos, home_team_name, away_team_pos, away_team_name, result, available,
0, 0, 0, False, False, False, '', '', 0, False]
else:
available = 'Available'
goal = 0 if cells[table_structure["goal"]] == '' else int(cells[table_structure["goal"]])
assist = 0 if cells[table_structure["assist"]] == '' else int(cells[table_structure["assist"]])
autogoal = 0 if cells[table_structure["autogoal"]] == '' else int(cells[table_structure["autogoal"]])
yellow_card = False if cells[table_structure["yellow_card"]] == '' else True
double_yellow_card = False if cells[table_structure["double_yellow_card"]] == '' else True
red_card = False if cells[table_structure["red_card"]] == '' else True
substitution_on = cells[table_structure["substitution_on"]][:-1]
substitution_off = cells[table_structure["substitution_off"]][:-1]
minutes = int(cells[table_structure["minutes"]][:-1])
penalty_kicker = True if (player_name.lower() in penalty_kickers.values() and minutes > 0) else False
return [matchday, date, home_team_pos, home_team_name, away_team_pos, away_team_name, result, available,
goal, assist, autogoal, yellow_card, double_yellow_card, red_card, substitution_on,
substitution_off, minutes, penalty_kicker]
def readRowFg(self, row, table_structure=table_structure_fg):
# Fill a list with each cell contained into a row
cells = row.find_all("td")
cells = list(map(lambda x: x.text.strip(), cells))
# Ignore empty rows
if len(cells) == 0:
return None
else:
postponed = False
# Match day without final letter
matchday = int(cells[table_structure["matchday"]][:-1])
# C/T became True is is C, otherwise False
home_match = True if cells[table_structure["home_match"]] == "c" else False
# Scores considering - as NaN scores
scores = cells[table_structure["score"]].strip()
if scores == "-":
scores = np.nan
elif scores[-1] == "*":
scores = float(scores[:-1].strip())
postponed = True
else:
scores = float(scores.replace(",", "."))
# Grade considering - as NaN
grade = cells[table_structure["grade"]].strip()
if grade == "-":
grade = np.nan
else:
grade = float(grade.replace(",", "."))
# Penalty, 0 whether penalty is -
penalty = cells[table_structure["penalty_kick"]].strip()
penalty_scored, penalty_kick = penalty.split("/")
if penalty_scored.strip() == "-":
penalty_scored = 0
else:
penalty_scored = int(penalty_scored.strip())
if penalty_kick.strip() == "-":
penalty_kick = 0
else:
penalty_kick = int(penalty_kick.strip())
# Starter is considered as a boolean variable
starter_str = cells[table_structure["starter"]].strip()
if starter_str == "x":
starter = True
else:
starter = False
return [matchday, home_match, scores, grade, penalty_scored, penalty_kick, starter, postponed]
def getData(self, up_to_matchday = 38):
"""
To make the request to the page we have to inform the
website that we are a browser and that is why we
use the headers variable
"""
headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.106 Safari/537.36'}
# ------------------------------------------------------------------------#
#------------------- START SCRAPING TRANSFERMARKET -----------------------#
# ------------------------------------------------------------------------#
# In the response variable we will the download of the web page
response_tm = requests.get(self.url_tm, headers=headers)
"""
Now we will create a BeautifulSoup object from our response.
The 'html.parser' parameter represents which parser we will use when creating our object,
a parser is a software responsible for converting an entry to a data structure.
"""
page_bs_tm = BeautifulSoup(response_tm.content, 'html.parser')
# find name of the player
player_name = page_bs_tm.find("div", class_="dataName").find("b").text
serie_a_table_tm = None
# The find_all () method is able to return all tags that meet restrictions within parentheses
# The find() function will find the first table whose class is "responsive-table"
for box in page_bs_tm.find_all("div", class_="box"):
if box.find("a", {"name":"IT1"}) != None:
serie_a_table_tm = box.find("div", class_="responsive-table").find("tbody").find_all("tr")
# Creating a DataFrame with our data
df_tm = pd.DataFrame(columns=self.table_structure_tm.keys())
i = 0
for row in serie_a_table_tm:
# Now we will receive all the cells in the table with their values
statistics = self.readRowTm(player_name, row)
if statistics[0] <= up_to_matchday:
df_tm.loc[i] = statistics
i += 1
# Printing our gathered data
#print(df_tm)
# ------------------------------------------------------------------------#
#------------------- START SCRAPING FANTAGIAVENO -------------------------#
# ------------------------------------------------------------------------#
# In the response variable we will the download of the web page
response_fg = requests.get(self.url_fg, headers=headers)
"""
Now we will create a BeautifulSoup object from our response.
The 'html.parser' parameter represents which parser we will use when creating our object,
a parser is a software responsible for converting an entry to a data structure.
"""
page_bs_fg = BeautifulSoup(response_fg.content, 'html.parser')
# extract all table
tables_fg = page_bs_fg.find_all("table", class_="Border")
# Finding the right table tahta we want to observe and extract all rows of the table
right_table_fg = 7
result_table_fg = tables_fg[right_table_fg].find("tbody").find_all("tr")
# Creating a DataFrame with our data
df_fg = pd.DataFrame(columns=self.table_structure_fg.keys())
i = 0
for row in result_table_fg:
# Now we will receive all the cells in the table with their values
statistics = self.readRowFg(row)
if statistics[0] <= up_to_matchday:
df_fg.loc[i] = statistics
i += 1
# Printing our gathered data
# print(df_fg)
# ------------------------------------------------------------------------#
#------------------------- MERGE AND EDIT DATA ---------------------------#
# ------------------------------------------------------------------------#
df_result = | pd.merge(df_tm, df_fg, on='matchday', how='inner') | pandas.merge |
import gilda
import pandas as pd
from pathlib import Path
import pystow
import tqdm
from typing import Union
from indra_cogex.sources.processor import Processor
from indra_cogex.representation import Node, Relation
class ClinicaltrialsProcessor(Processor):
name = "clinicaltrials"
def __init__(self, path: Union[str, Path, None] = None):
default_path = pystow.join(
"indra",
"cogex",
"clinicaltrials",
name="clinical_trials.csv.gz",
)
if not path:
path = default_path
elif isinstance(path, str):
path = Path(path)
self.df = | pd.read_csv(path, sep=",", skiprows=10) | pandas.read_csv |
import scanpy as sc
import numpy as np
import matplotlib.pyplot as plt
from .utils import metagene_loadings
import pandas as pd
import seaborn as sns
def ordered_matrixplot(d, n_genes=5, groups=None, **kwargs):
"""
matrix plot of ranked groups, with columns ordered by score instead of abs(score).
Separates up- from down-regulated genes better, resulting in visually-cleaner plots
:param d:
:param n_genes:
:param kwargs:
:return:
"""
top_genes = np.stack([np.array(list(x)) for x in d.uns['rank_genes_groups']['names']][:n_genes])
top_scores = np.stack([np.array(list(x)) for x in d.uns['rank_genes_groups']['scores']][:n_genes])
# order top genes by actual score, not absolute value
ordered_top_genes = np.take_along_axis(top_genes, np.argsort(-1 * top_scores, axis=0), axis=0)
# print(ordered_top_genes)
grouping_key = d.uns['rank_genes_groups']['params']['groupby']
group_names = list(d.uns['rank_genes_groups']['names'].dtype.fields.keys())
ordered_top_mapping = {group_names[i]: ordered_top_genes[:, i] for i in range(len(group_names))}
if groups is not None:
ordered_top_mapping = {k: v for k, v in ordered_top_mapping.items() if k in groups}
# print(ordered_top_mapping)
sc.pl.matrixplot(d, var_names=ordered_top_mapping, groupby=grouping_key, **kwargs)
def plot_metagenes(data, comps=None, key='sca', **kwargs):
if comps is None:
if type(data) is dict:
comps = list(range(data['loadings'].shape[1]))
else:
comps = list(range(data.varm[key + '_loadings'].shape[1]))
fig, axs = plt.subplots(len(comps), 1)
loadings = metagene_loadings(data, key=key, **kwargs)
for i, comp in enumerate(comps):
df = | pd.DataFrame(loadings[comp]) | pandas.DataFrame |
import pandas as pd
from ..utils._checks import _check_participants
from ..utils._docs import fill_doc
@fill_doc
def parse_thi(df, participants):
"""Parse the THI from multiple THI questionnaires and participants.
The input .csv file can be obtained by exporting from evanmed
with the following settings:
-> Analysis
-> Select group
-> Export
-> CSV
-> Synthesis
-> [x] Export labels of choce
-> Select all 3 questionnaires
-> [x] Tinnitus Handicap Inventory (THI)
Parameters
----------
%(df_raw_evamed)s
%(participants)s
Returns
-------
%(df_clinical)s
"""
_check_participants(participants)
# clean-up columns
columns = [col for col in df.columns if "THI" in col]
assert len(columns) != 0, "THI not present in dataframe."
prefix = set(col.split("_")[0] for col in columns)
assert len(prefix) != 0 # sanity-check
thi_dict = dict(participant=[], prefix=[], date=[], result=[])
for idx in participants:
for pre in prefix:
thi_dict["participant"].append(idx)
thi_dict["prefix"].append(pre)
date = df.loc[df["patient_code"] == idx, f"{pre}_date"].values[0]
thi_dict["date"].append(date)
result = df.loc[df["patient_code"] == idx, f"{pre}_THI_R"].values[
0
]
thi_dict["result"].append(result)
thi = pd.DataFrame.from_dict(thi_dict)
thi.date = pd.to_datetime(thi.date)
# rename
mapper = {
"THIB": "Baseline",
"THIPREA": "Pre-assessment",
"THI": "Post-assessment",
}
thi["prefix"].replace(to_replace=mapper, inplace=True)
thi.rename(columns=dict(prefix="visit"), inplace=True)
return thi
@fill_doc
def parse_stai(df, participants):
"""Parse the STAI from multiple STAI questionnaires and participants.
The input .csv file can be obtained by exporting from evanmed
with the following settings:
-> Analysis
-> Select group
-> Export
-> CSV
-> Synthesis
-> [x] Export labels of choce
-> Select all 2 questionnaires
-> [x] State and Trait Anxiety Inventory (STAI)
Parameters
----------
%(df_raw_evamed)s
%(participants)s
Returns
-------
%(df_clinical)s
"""
_check_participants(participants)
# clean-up columns
columns = [col for col in df.columns if "STAI" in col]
assert len(columns) != 0, "STAI not present in dataframe."
prefix = set(col.split("_")[0] for col in columns)
assert len(prefix) != 0 # sanity-check
stai_dict = dict(participant=[], prefix=[], date=[], result=[])
for idx in participants:
for pre in prefix:
stai_dict["participant"].append(idx)
stai_dict["prefix"].append(pre)
date = df.loc[df["patient_code"] == idx, f"{pre}_date"].values[0]
stai_dict["date"].append(date)
result = df.loc[df["patient_code"] == idx, f"{pre}_STAI_R"].values[
0
]
stai_dict["result"].append(result)
stai = pd.DataFrame.from_dict(stai_dict)
stai.date = pd.to_datetime(stai.date)
# rename
mapper = {"STAIB": "Baseline", "STAI": "Post-assessment"}
stai["prefix"].replace(to_replace=mapper, inplace=True)
stai.rename(columns=dict(prefix="visit"), inplace=True)
return stai
@fill_doc
def parse_bdi(df, participants):
"""Parse the BDI from multiple BDI questionnaires and participants.
The input .csv file can be obtained by exporting from evanmed
with the following settings:
-> Analysis
-> Select group
-> Export
-> CSV
-> Synthesis
-> [x] Export labels of choce
-> Select all 2 questionnaires
-> [x] Beck's Depression Inventory (BDI)
Parameters
----------
%(df_raw_evamed)s
%(participants)s
Returns
-------
%(df_clinical)s
"""
_check_participants(participants)
# clean-up columns
columns = [col for col in df.columns if "BDI" in col]
assert len(columns) != 0, "BDI not present in dataframe."
prefix = set(col.split("_")[0] for col in columns)
assert len(prefix) != 0 # sanity-check
bdi_dict = dict(participant=[], prefix=[], date=[], result=[])
for idx in participants:
for pre in prefix:
bdi_dict["participant"].append(idx)
bdi_dict["prefix"].append(pre)
date = df.loc[df["patient_code"] == idx, f"{pre}_date"].values[0]
bdi_dict["date"].append(date)
result = df.loc[df["patient_code"] == idx, f"{pre}_BDI_R"].values[
0
]
bdi_dict["result"].append(result)
bdi = | pd.DataFrame.from_dict(bdi_dict) | pandas.DataFrame.from_dict |
import functools
import pendulum
import pandas as pd
from elasticsearch import Elasticsearch
from retrying import retry
import redis
import arrow
import json
from futu import OpenQuoteContext
from functools import reduce
from pandasql import sqldf
import os
# global systemSignal
systemSignal = os.system('ls /.dockerenv')
# @retry()
def get_es_group_stacks(codes=['US.APLE',
'US.TSLA',
'US.GOOG',
'US.FB',
'US.NVDA',
'US.AMD',
'US.AMZN',
'US.BABA'],
dim='kline_day',
startTS=pendulum.now().add(years=-5).timestamp(), # 默认5年数据
endTS=pendulum.now().timestamp()):
es = Elasticsearch(['192.168.80.183'], http_auth=('elastic', '<PASSWORD>'), port=9200)
def serialize(hit):
fields = hit["fields"]
for k in fields:
fields[k] = fields[k][0]
return fields
@retry(stop_max_attempt_number=5)
def get_es_stock(code='US.APLE',
dim='kline_day',
startTS=pendulum.now().add(years=-5).timestamp(), # 默认5年数据
endTS=pendulum.now().timestamp()):
sql = {
"query":
{"bool": {"must": [{"bool": {"must": [{"term": {"code": {"value": code, "boost": 1}}},
{"range": {"time_key": {"from": int(startTS), "to": None,
"include_lower": False,
"include_upper": False, "boost": 1}}}],
"adjust_pure_negative": True, "boost": 1}},
{"range": {"time_key": {"from": None, "to": int(endTS), "include_lower": False,
"include_upper": False, "boost": 1}}}],
"adjust_pure_negative": True, "boost": 1}},
"_source": False,
"stored_fields": "_none_",
"docvalue_fields": ["close", "code", "create_time", "high", "low", "open", "pe", "spider_time", "time_key",
"turnover", "turnover_rate", "volume"],
"sort": [{"_doc": {"order": "asc"}}]}
res = es.search(index=dim, doc_type=dim, body=sql, size=100000)
global df, fdf
try:
# res = es.search(index=dim, doc_type=dim, body=sql, size=10000)
df = pd.DataFrame(list(map(serialize, res["hits"]["hits"]))).sort_values(by='time_key', ascending=True)
# df.set_index(['create_time'], inplace=True, )
# df.index = pd.DatetimeIndex(df.index)
# todo 更改为前复权
adjfactor_sql = {
"query": {"term": {"code": {"value": code, "boost": 1}}},
"_source": False,
"stored_fields": "_none_",
"docvalue_fields": ["ex_div_date", "x", "y"],
"sort": [{"_doc": {"order": "asc"}}]
}
if systemSignal == 0:
join_sql = "select * from (select * from df left join fdf where df.create_time < fdf.ex_div_date order by fdf.ex_div_date desc) group by create_time order by create_time ;"
else:
join_sql = "select * from (select * from df left join fdf where df.create_time < fdf.ex_div_date order by fdf.ex_div_date asc) group by create_time order by create_time ;"
# join_sql = "select * from (select * from df left join fdf where df.create_time < fdf.ex_div_date order by fdf.ex_div_date desc) group by create_time order by create_time ;"
res_adj = es.search(index='adjustments_a', doc_type='adjustments', body=adjfactor_sql, size=100000)
hits_adj = res_adj["hits"]["hits"]
if len(hits_adj) > 0:
fdf = pd.DataFrame(list(map(serialize, hits_adj))).sort_values(by='ex_div_date', ascending=True)
# fdf['ex_div_date'] = fdf['ex_div_date'].apply(lambda T: time.strftime("%Y-%m-%d", time.strptime(T, "%Y-%m-%dT%H:%M:%S.000Z")))
pysqldf = lambda q: sqldf(q, globals())
# 线上docker环境, 降序5,4,3,2,1
qfq_df = pysqldf(join_sql)
for i in ['close', 'open', 'low', 'high']:
qfq_df[f'{i}'] = qfq_df[f'{i}'] * qfq_df['x'] + qfq_df['y']
qfq_df[f'{i}'] = qfq_df[f'{i}'].apply(float)
# qfq_df.set_index(['create_time'], inplace=True, )
# qfq_df.index = pd.DatetimeIndex(qfq_df.index)
else:
qfq_df = df
qfq_df.set_index(['create_time'], inplace=True, )
qfq_df.index = pd.DatetimeIndex(qfq_df.index)
except Exception as e:
qfq_df = | pd.DataFrame() | pandas.DataFrame |
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVR
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, MinMaxScaler, RobustScaler
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_squared_log_error
from sklearn.metrics import accuracy_score
from keras.callbacks import EarlyStopping
from keras.layers import LSTM
from keras.layers import Conv2D, MaxPooling2D, TimeDistributed
from keras.layers import Dense, Dropout
from keras.layers import Flatten, Reshape
from keras.layers import Embedding, Input
from keras.models import Sequential
from keras.models import load_model
from keras import optimizers
from keras.regularizers import L1L2
from keras.preprocessing.sequence import TimeseriesGenerator
import keras.backend as K
from keras.callbacks import ModelCheckpoint
from keras.utils import to_categorical
import numpy as np
import math
import pandas as pd
import os
import gc
import matplotlib.pyplot as plt
import seaborn as sns
from statsmodels.stats.outliers_influence import variance_inflation_factor
scaler = RobustScaler()
def outliers_iqr(data):
data = data.astype('float32')
mean = data.mean()
q1, q3 = np.percentile(data, [25,75])
iqr = q3-q1
lower_bound = q1-(iqr*1.5)
upper_bound = q3+(iqr*1.5)
return np.where((data>upper_bound) | (data<lower_bound), mean, data)
def outliers_z_score(data, threshold=3):
data = data.astype('float32')
mean = data.mean()
std = data.std()
z_scores = [(y-mean)/std for y in data]
print(data, mean)
return np.where(np.abs(z_scores)>threshold, mean, data)
def remove_outlier(data):
input_data = data.columns
print(input_data)
for cols in data:
if cols!='PM10' and cols!='locale':
data[cols] = outliers_iqr(data[cols])
return data
def create_dataset(signal_data, look_back=4):
dataX, dataY = [], []
for i in range(len(signal_data)-look_back):
dataX.append(signal_data[i:(i+look_back), 0])
dataY.append(signal_data[i + look_back, 0])
return np.array(dataX), np.array(dataY)
def ml_linear_regression(X_train, X_test, Y_train, Y_test):
#del [X_train['locale'], X_test['locale']]
del [X_train['date'], X_test['date']]
X_train = X_train.astype(float)
X_test = X_test.astype(float)
del [X_train['PM10'], X_test['PM10']]
gc.collect()
X_train = scaler.fit_transform(X_train)
X_test = scaler.fit_transform(X_test)
print(X_train)
sgd=optimizers.SGD(lr=0.003)
model=Sequential()
model.add(Dense(1, input_shape=(11,), kernel_initializer='normal', activation='linear'))
model.compile(optimizer=sgd, loss='mse', metrics=['accuracy'])
model.summary()
history = model.fit(X_train, Y_train, batch_size=1000, epochs=5000, verbose=1)
loss = history.history['loss']
x_epochs = range(1, len(loss) + 1)
plt.plot(x_epochs, loss, 'b', label='Training loss')
plt.title('Loss')
plt.legend()
plt.show()
score = model.evaluate(X_test, Y_test, batch_size=1000)
print(score)
def ml_logistic_regression(X_train, X_test, Y_train, Y_test):
#del [X_train['locale'], X_test['locale']]
del [X_train['date'], X_test['date']]
X_train = X_train.astype(float)
X_test = X_test.astype(float)
del [X_train['PM10'], X_test['PM10']]
gc.collect()
X_train = scaler.fit_transform(X_train)
X_test = scaler.fit_transform(X_test)
Y_train = to_categorical(Y_train)
Y_test = to_categorical(Y_test)
sgd=optimizers.SGD(lr=0.003, momentum=0.0, decay=0.0, nesterov=True)
model=Sequential()
model.add(Dense(4, input_shape=(11,), activation='softmax'))
model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
history = model.fit(X_train, Y_train, batch_size=1000, epochs=5000, verbose=1)
loss = history.history['loss']
x_epochs = range(1, len(loss) + 1)
plt.plot(x_epochs, loss, 'b', label='Training loss')
plt.title('Loss')
plt.legend()
plt.show()
score = model.evaluate(X_test, Y_test, batch_size=1000)
print(score)
def dl_DNN(X_train, X_test, Y_train, Y_test):
#del [X_train['locale'], X_test['locale']]
del [X_train['date'], X_test['date']]
X_train = X_train.astype(float)
X_test = X_test.astype(float)
del [X_train['PM10'], X_test['PM10']]
gc.collect()
X_train = scaler.fit_transform(X_train)
X_test = scaler.fit_transform(X_test)
Y_train = to_categorical(Y_train)
Y_test = to_categorical(Y_test)
adam=optimizers.Adam(lr=0.007)
model = Sequential()
model.add(Dense(12, input_dim=11, kernel_initializer='glorot_normal', bias_initializer='glorot_normal',
activation='relu', name='H1'))
model.add(Dropout(0.1))
model.add(Dense(10, kernel_initializer='glorot_normal', bias_initializer='glorot_normal',
activation='relu', name='H2'))
model.add(Dropout(0.1))
model.add(Dense(8, kernel_initializer='glorot_normal', bias_initializer='glorot_normal',
activation='relu', name='H3'))
model.add(Dropout(0.1))
model.add(Dense(4, activation='softmax'))
model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
history = model.fit(X_train, Y_train, batch_size=100, epochs=250, verbose=1)
loss = history.history['loss']
x_epochs = range(1, len(loss) + 1)
plt.plot(x_epochs, loss, 'b', label='Training loss')
plt.title('Loss')
plt.legend()
plt.show()
score = model.evaluate(X_test, Y_test, batch_size=1000)
print(score)
def dl_LSTM(X_train, X_test):
ts = 4
n_features = 11
locale_list = list(X_train['locale'].value_counts().keys())
train_list = []
test_list = []
train_target = []
test_target = []
print(locale_list)
print(X_train)
Xs_train = X_train
Xs_test = X_test
for locale in locale_list:
X_train = Xs_train[Xs_train['locale']==locale]
X_test = Xs_test[Xs_test['locale']==locale]
X_train = X_train.sort_values(['date'])
X_test = X_test.sort_values(['date'])
Y_train = X_train['PM10'].astype(int)
Y_test = X_test['PM10'].astype(int)
del [X_train['locale'], X_test['locale']]
del [X_train['date'], X_test['date']]
del [X_train['PM10'], X_test['PM10']]
X_train = X_train.astype(float)
X_test = X_test.astype(float)
column_list = list(X_train)
for s in range(1, ts):
tmp_train = X_train[column_list].shift(s)
tmp_test = X_test[column_list].shift(s)
tmp_train.columns = "shift_" + tmp_train.columns + "_" + str(s)
tmp_test.columns = "shift_" + tmp_test.columns + "_" + str(s)
X_train[tmp_train.columns] = X_train[column_list].shift(s)
X_test[tmp_test.columns] = X_test[column_list].shift(s)
X_train = X_train[ts-1:]
X_test = X_test[ts-1:]
Y_train = Y_train[ts-1:]
Y_test = Y_test[ts-1:]
train_list.append(X_train)
test_list.append(X_test)
train_target.append(Y_train)
test_target.append(Y_test)
X_train = pd.concat(train_list)
X_test = pd.concat(test_list)
Y_train = pd.concat(train_target)
Y_test = pd.concat(test_target)
print(Y_train.value_counts())
print(Y_test.value_counts())
gc.collect()
print(X_train)
print(Y_train)
print(Y_train.shape)
X_train = scaler.fit_transform(X_train)
X_test = scaler.fit_transform(X_test)
Y_train = to_categorical(Y_train, 4)
Y_test = to_categorical(Y_test, 4)
X_train = np.reshape(X_train, X_train.shape+(1,))
X_test = np.reshape(X_test, X_test.shape+(1,))
X_train = X_train.reshape(-1,n_features,ts)
X_test = X_test.reshape(-1,n_features,ts)
print(X_train.shape)
print(X_test.shape)
# LSTM
lstm_output_size = 64
# batch_size
batch_size = 32
optimizer=optimizers.Adam(lr=0.0005)
checkpoint_filepath = os.path.join('model', 'fresh_models', '{0}_LSTM.{1}-{2}.h5'.format('model', '{epoch:02d}', '{val_loss:.7f}'))
checkpoint_callback = ModelCheckpoint(checkpoint_filepath, save_best_only=True, verbose=1)
early_stopping_callback = EarlyStopping(monitor="val_loss", patience=10, verbose=1)
callbacks=[checkpoint_callback, early_stopping_callback]
model=Sequential()
model.add(LSTM(64, input_shape=(n_features, ts), activation='relu', dropout=0.3))
model.add(Dense(4, activation='softmax'))
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
history = model.fit(X_train, Y_train, batch_size=batch_size,
validation_data=(X_test, Y_test), epochs=20)
loss = history.history['loss']
val_loss = history.history['val_loss']
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
x_epochs = range(1, len(loss) + 1)
plt.plot(x_epochs, loss, 'b', label='Training loss')
plt.plot(x_epochs, val_loss, 'r', label='Validation loss')
plt.title('Loss')
plt.legend()
plt.show()
plt.plot(x_epochs, acc, 'b', label='Training acc')
plt.plot(x_epochs, val_acc, 'r', label='Validation acc')
plt.title('Accuracy')
plt.legend()
plt.show()
score = model.evaluate(X_test, Y_test, batch_size=batch_size)
print(score)
Y_pred = model.predict(X_test)
Y_pred = Y_pred.argmax(axis=1)
Y_test = Y_test.argmax(axis=1)
print(accuracy_score(Y_test, Y_pred))
return score
def dl_StackedLSTM(X_train, X_test):
ts = 4
n_features = 11
locale_list = list(X_train['locale'].value_counts().keys())
train_list = []
test_list = []
train_target = []
test_target = []
print(locale_list)
print(X_train)
Xs_train = X_train
Xs_test = X_test
for locale in locale_list:
X_train = Xs_train[Xs_train['locale']==locale]
X_test = Xs_test[Xs_test['locale']==locale]
X_train = X_train.sort_values(['date'])
X_test = X_test.sort_values(['date'])
Y_train = X_train['PM10'].astype(int)
Y_test = X_test['PM10'].astype(int)
del [X_train['locale'], X_test['locale']]
del [X_train['date'], X_test['date']]
del [X_train['PM10'], X_test['PM10']]
X_train = X_train.astype(float)
X_test = X_test.astype(float)
column_list = list(X_train)
for s in range(1, ts):
tmp_train = X_train[column_list].shift(s)
tmp_test = X_test[column_list].shift(s)
tmp_train.columns = "shift_" + tmp_train.columns + "_" + str(s)
tmp_test.columns = "shift_" + tmp_test.columns + "_" + str(s)
X_train[tmp_train.columns] = X_train[column_list].shift(s)
X_test[tmp_test.columns] = X_test[column_list].shift(s)
X_train = X_train[ts-1:]
X_test = X_test[ts-1:]
Y_train = Y_train[ts-1:]
Y_test = Y_test[ts-1:]
train_list.append(X_train)
test_list.append(X_test)
train_target.append(Y_train)
test_target.append(Y_test)
X_train = pd.concat(train_list)
X_test = pd.concat(test_list)
Y_train = | pd.concat(train_target) | pandas.concat |
import logging
from collections import OrderedDict
import pandas as pd
import pyprind
import six
import dask
from dask import delayed
from dask.diagnostics import ProgressBar
from py_stringmatching.tokenizer.qgram_tokenizer import QgramTokenizer
from py_stringmatching.tokenizer.whitespace_tokenizer import WhitespaceTokenizer
import cloudpickle as cp
import pickle
import py_entitymatching.catalog.catalog_manager as cm
from py_entitymatching.blocker.blocker import Blocker
import py_stringsimjoin as ssj
from py_entitymatching.utils.catalog_helper import log_info, get_name_for_key, \
add_key_column
from py_entitymatching.utils.generic_helper import parse_conjunct
from py_entitymatching.utils.validation_helper import validate_object_type
from py_entitymatching.dask.utils import validate_chunks, get_num_partitions, \
get_num_cores, wrap
logger = logging.getLogger(__name__)
class DaskRuleBasedBlocker(Blocker):
"""
WARNING THIS BLOCKER IS EXPERIMENTAL AND NOT TESTED. USE AT YOUR OWN RISK.
Blocks based on a sequence of blocking rules supplied by the user.
"""
def __init__(self, *args, **kwargs):
feature_table = kwargs.pop('feature_table', None)
self.feature_table = feature_table
self.rules = OrderedDict()
self.rule_str = OrderedDict()
self.rule_ft = OrderedDict()
self.filterable_sim_fns = {'jaccard', 'cosine', 'dice', 'overlap_coeff'}
self.allowed_ops = {'<', '<='}
self.rule_source = OrderedDict()
self.rule_cnt = 0
logger.warning("WARNING THIS BLOCKER IS EXPERIMENTAL AND NOT TESTED. USE AT YOUR OWN "
"RISK.")
super(Blocker, self).__init__(*args, **kwargs)
def _create_rule(self, conjunct_list, feature_table, rule_name):
if rule_name is None:
# set the rule name automatically
name = '_rule_' + str(self.rule_cnt)
self.rule_cnt += 1
else:
# use the rule name supplied by the user
name = rule_name
# create function string
fn_str = 'def ' + name + '(ltuple, rtuple):\n'
# add 4 tabs
fn_str += ' '
fn_str += 'return ' + ' and '.join(conjunct_list)
if feature_table is not None:
feat_dict = dict(
zip(feature_table['feature_name'], feature_table['function']))
else:
feat_dict = dict(zip(self.feature_table['feature_name'],
self.feature_table['function']))
six.exec_(fn_str, feat_dict)
return feat_dict[name], name, fn_str
def add_rule(self, conjunct_list, feature_table=None, rule_name=None):
"""Adds a rule to the rule-based blocker.
Args:
conjunct_list (list): A list of conjuncts specifying the rule.
feature_table (DataFrame): A DataFrame containing all the
features that are being referenced by
the rule (defaults to None). If the
feature_table is not supplied here,
then it must have been specified
during the creation of the rule-based
blocker or using set_feature_table
function. Otherwise an AssertionError
will be raised and the rule will not
be added to the rule-based blocker.
rule_name (string): A string specifying the name of the rule to
be added (defaults to None). If the
rule_name is not specified then a name will
be automatically chosen. If there is already
a rule with the specified rule_name, then
an AssertionError will be raised and the
rule will not be added to the rule-based
blocker.
Returns:
The name of the rule added (string).
Raises:
AssertionError: If `rule_name` already exists.
AssertionError: If `feature_table` is not a valid value
parameter.
Examples:
>>> import py_entitymatching
>>> from py_entitymatching.dask.dask_rule_based_blocker import DaskRuleBasedBlocker
>>> rb = DaskRuleBasedBlocker()
>>> A = em.read_csv_metadata('path_to_csv_dir/table_A.csv', key='id')
>>> B = em.read_csv_metadata('path_to_csv_dir/table_B.csv', key='id')
>>> block_f = em.get_features_for_blocking(A, B)
>>> rule = ['name_name_lev(ltuple, rtuple) > 3']
>>> rb.add_rule(rule, rule_name='rule1')
"""
if rule_name is not None and rule_name in self.rules.keys():
logger.error('A rule with the specified rule_name already exists.')
raise AssertionError('A rule with the specified rule_name already exists.')
if feature_table is None and self.feature_table is None:
logger.error('Either feature table should be given as parameter ' +
'or use set_feature_table to set the feature table.')
raise AssertionError('Either feature table should be given as ' +
'parameter or use set_feature_table to set ' +
'the feature table.')
if not isinstance(conjunct_list, list):
conjunct_list = [conjunct_list]
fn, name, fn_str = self._create_rule(conjunct_list, feature_table, rule_name)
self.rules[name] = fn
self.rule_source[name] = fn_str
self.rule_str[name] = conjunct_list
if feature_table is not None:
self.rule_ft[name] = feature_table
else:
self.rule_ft[name] = self.feature_table
return name
def delete_rule(self, rule_name):
"""Deletes a rule from the rule-based blocker.
Args:
rule_name (string): Name of the rule to be deleted.
Examples:
>>> import py_entitymatching as em
>>> from py_entitymatching.dask.dask_rule_based_blocker import DaskRuleBasedBlocker
>>> rb = DaskRuleBasedBlocker()
>>> A = em.read_csv_metadata('path_to_csv_dir/table_A.csv', key='id')
>>> B = em.read_csv_metadata('path_to_csv_dir/table_B.csv', key='id')
>>> block_f = em.get_features_for_blocking(A, B)
>>> rule = ['name_name_lev(ltuple, rtuple) > 3']
>>> rb.add_rule(rule, block_f, rule_name='rule_1')
>>> rb.delete_rule('rule_1')
"""
assert rule_name in self.rules.keys(), 'Rule name not in current set of rules'
del self.rules[rule_name]
del self.rule_source[rule_name]
del self.rule_str[rule_name]
del self.rule_ft[rule_name]
return True
def view_rule(self, rule_name):
"""Prints the source code of the function corresponding to a rule.
Args:
rule_name (string): Name of the rule to be viewed.
Examples:
>>> import py_entitymatching as em
>>> rb = em.DaskRuleBasedBlocker()
>>> A = em.read_csv_metadata('path_to_csv_dir/table_A.csv', key='id')
>>> B = em.read_csv_metadata('path_to_csv_dir/table_B.csv', key='id')
>>> block_f = em.get_features_for_blocking(A, B)
>>> rule = ['name_name_lev(ltuple, rtuple) > 3']
>>> rb.add_rule(rule, block_f, rule_name='rule_1')
>>> rb.view_rule('rule_1')
"""
assert rule_name in self.rules.keys(), 'Rule name not in current set of rules'
print(self.rule_source[rule_name])
def get_rule_names(self):
"""Returns the names of all the rules in the rule-based blocker.
Returns:
A list of names of all the rules in the rule-based blocker (list).
Examples:
>>> import py_entitymatching as em
>>> rb = em.DaskRuleBasedBlocker()
>>> A = em.read_csv_metadata('path_to_csv_dir/table_A.csv', key='id')
>>> B = em.read_csv_metadata('path_to_csv_dir/table_B.csv', key='id')
>>> block_f = em.get_features_for_blocking(A, B)
>>> rule = ['name_name_lev(ltuple, rtuple) > 3']
>>> rb.add_rule(rule, block_f, rule_name='rule_1')
>>> rb.get_rule_names()
"""
return self.rules.keys()
def get_rule(self, rule_name):
"""Returns the function corresponding to a rule.
Args:
rule_name (string): Name of the rule.
Returns:
A function object corresponding to the specified rule.
Examples:
>>> import py_entitymatching as em
>>> rb = em.DaskRuleBasedBlocker()
>>> A = em.read_csv_metadata('path_to_csv_dir/table_A.csv', key='id')
>>> B = em.read_csv_metadata('path_to_csv_dir/table_B.csv', key='id')
>>> block_f = em.get_features_for_blocking(A, B)
>>> rule = ['name_name_lev(ltuple, rtuple) > 3']
>>> rb.add_rule(rule, feature_table=block_f, rule_name='rule_1')
>>> rb.get_rule()
"""
assert rule_name in self.rules.keys(), 'Rule name not in current set of rules'
return self.rules[rule_name]
def set_feature_table(self, feature_table):
"""Sets feature table for the rule-based blocker.
Args:
feature_table (DataFrame): A DataFrame containing features.
Examples:
>>> import py_entitymatching as em
>>> rb = em.DaskRuleBasedBlocker()
>>> A = em.read_csv_metadata('path_to_csv_dir/table_A.csv', key='id')
>>> B = em.read_csv_metadata('path_to_csv_dir/table_B.csv', key='id')
>>> block_f = em.get_features_for_blocking(A, B)
>>> rb.set_feature_table(block_f)
"""
if self.feature_table is not None:
logger.warning(
'Feature table is already set, changing it now will not recompile '
'existing rules')
self.feature_table = feature_table
def block_tables(self, ltable, rtable, l_output_attrs=None,
r_output_attrs=None,
l_output_prefix='ltable_', r_output_prefix='rtable_',
verbose=False, show_progress=True, n_ltable_chunks=1,
n_rtable_chunks=1):
"""
WARNING THIS COMMAND IS EXPERIMENTAL AND NOT TESTED. USE AT YOUR OWN RISK
Blocks two tables based on the sequence of rules supplied by the user.
Finds tuple pairs from left and right tables that survive the sequence
of blocking rules. A tuple pair survives the sequence of blocking rules
if none of the rules in the sequence returns True for that pair. If any
of the rules returns True, then the pair is blocked.
Args:
ltable (DataFrame): The left input table.
rtable (DataFrame): The right input table.
l_output_attrs (list): A list of attribute names from the left
table to be included in the
output candidate set (defaults to None).
r_output_attrs (list): A list of attribute names from the right
table to be included in the
output candidate set (defaults to None).
l_output_prefix (string): The prefix to be used for the attribute names
coming from the left table in the output
candidate set (defaults to 'ltable\_').
r_output_prefix (string): The prefix to be used for the attribute names
coming from the right table in the output
candidate set (defaults to 'rtable\_').
verbose (boolean): A flag to indicate whether the debug
information should be logged (defaults to False).
show_progress (boolean): A flag to indicate whether progress should
be displayed to the user (defaults to True).
n_ltable_chunks (int): The number of partitions to split the left table (
defaults to 1). If it is set to -1, then the number of
partitions is set to the number of cores in the
machine.
n_rtable_chunks (int): The number of partitions to split the right table (
defaults to 1). If it is set to -1, then the number of
partitions is set to the number of cores in the
machine.
Returns:
A candidate set of tuple pairs that survived the sequence of
blocking rules (DataFrame).
Raises:
AssertionError: If `ltable` is not of type pandas
DataFrame.
AssertionError: If `rtable` is not of type pandas
DataFrame.
AssertionError: If `l_output_attrs` is not of type of
list.
AssertionError: If `r_output_attrs` is not of type of
list.
AssertionError: If the values in `l_output_attrs` is not of type
string.
AssertionError: If the values in `r_output_attrs` is not of type
string.
AssertionError: If the input `l_output_prefix` is not of type
string.
AssertionError: If the input `r_output_prefix` is not of type
string.
AssertionError: If `verbose` is not of type
boolean.
AssertionError: If `show_progress` is not of type
boolean.
AssertionError: If `n_ltable_chunks` is not of type
int.
AssertionError: If `n_rtable_chunks` is not of type
int.
AssertionError: If `l_out_attrs` are not in the ltable.
AssertionError: If `r_out_attrs` are not in the rtable.
AssertionError: If there are no rules to apply.
Examples:
>>> import py_entitymatching as em
>>> from py_entitymatching.dask.dask_rule_based_blocker import DaskRuleBasedBlocker
>>> rb = DaskRuleBasedBlocker()
>>> A = em.read_csv_metadata('path_to_csv_dir/table_A.csv', key='id')
>>> B = em.read_csv_metadata('path_to_csv_dir/table_B.csv', key='id')
>>> block_f = em.get_features_for_blocking(A, B)
>>> rule = ['name_name_lev(ltuple, rtuple) > 3']
>>> rb.add_rule(rule, feature_table=block_f)
>>> C = rb.block_tables(A, B)
"""
logger.warning(
"WARNING THIS COMMAND IS EXPERIMENTAL AND NOT TESTED. USE AT YOUR OWN RISK.")
# validate data types of input parameters
self.validate_types_params_tables(ltable, rtable,
l_output_attrs, r_output_attrs,
l_output_prefix,
r_output_prefix, verbose, 1)
# validate data type of show_progress
self.validate_show_progress(show_progress)
# validate input parameters
self.validate_output_attrs(ltable, rtable, l_output_attrs,
r_output_attrs)
# get and validate metadata
log_info(logger, 'Required metadata: ltable key, rtable key', verbose)
# # get metadata
l_key, r_key = cm.get_keys_for_ltable_rtable(ltable, rtable, logger,
verbose)
# # validate metadata
cm._validate_metadata_for_table(ltable, l_key, 'ltable', logger,
verbose)
cm._validate_metadata_for_table(rtable, r_key, 'rtable', logger,
verbose)
# validate rules
assert len(self.rules.keys()) > 0, 'There are no rules to apply'
# validate number of ltable and rtable chunks
validate_object_type(n_ltable_chunks, int, 'Parameter n_ltable_chunks')
validate_object_type(n_rtable_chunks, int, 'Parameter n_rtable_chunks')
validate_chunks(n_ltable_chunks)
validate_chunks(n_rtable_chunks)
# # determine the number of chunks
n_ltable_chunks = get_num_partitions(n_ltable_chunks, len(ltable))
n_rtable_chunks = get_num_partitions(n_rtable_chunks, len(rtable))
# # set index for convenience
l_df = ltable.set_index(l_key, drop=False)
r_df = rtable.set_index(r_key, drop=False)
# # remove l_key from l_output_attrs and r_key from r_output_attrs
l_output_attrs_1 = []
if l_output_attrs:
l_output_attrs_1 = [x for x in l_output_attrs if x != l_key]
r_output_attrs_1 = []
if r_output_attrs:
r_output_attrs_1 = [x for x in r_output_attrs if x != r_key]
# # get attributes to project
l_proj_attrs, r_proj_attrs = self.get_attrs_to_project(l_key, r_key,
l_output_attrs_1,
r_output_attrs_1)
l_df, r_df = l_df[l_proj_attrs], r_df[r_proj_attrs]
candset, rule_applied = self.block_tables_with_filters(l_df, r_df,
l_key, r_key,
l_output_attrs_1,
r_output_attrs_1,
l_output_prefix,
r_output_prefix,
verbose,
show_progress,
get_num_cores())
# pass number of splits as
# the number of cores in the machine
if candset is None:
# no filterable rule was applied
candset = self.block_tables_without_filters(l_df, r_df, l_key,
r_key, l_output_attrs_1,
r_output_attrs_1,
l_output_prefix,
r_output_prefix,
verbose, show_progress,
n_ltable_chunks, n_rtable_chunks)
elif len(self.rules) > 1:
# one filterable rule was applied but other rules are left
# block candset by applying other rules and excluding the applied rule
candset = self.block_candset_excluding_rule(candset, l_df, r_df,
l_key, r_key,
l_output_prefix + l_key,
r_output_prefix + r_key,
rule_applied,
show_progress, get_num_cores())
retain_cols = self.get_attrs_to_retain(l_key, r_key, l_output_attrs_1,
r_output_attrs_1,
l_output_prefix, r_output_prefix)
if len(candset) > 0:
candset = candset[retain_cols]
else:
candset = pd.DataFrame(columns=retain_cols)
# update catalog
key = get_name_for_key(candset.columns)
candset = add_key_column(candset, key)
cm.set_candset_properties(candset, key, l_output_prefix + l_key,
r_output_prefix + r_key, ltable, rtable)
# return candidate set
return candset
def block_candset_excluding_rule(self, c_df, l_df, r_df, l_key, r_key,
fk_ltable, fk_rtable, rule_to_exclude,
show_progress, n_chunks):
# # list to keep track of valid ids
valid = []
apply_rules_excluding_rule_pkl = cp.dumps(self.apply_rules_excluding_rule)
if n_chunks == 1:
# single process
valid = _block_candset_excluding_rule_split(c_df, l_df, r_df,
l_key, r_key,
fk_ltable, fk_rtable,
rule_to_exclude,
apply_rules_excluding_rule_pkl,
show_progress)
else:
# multiprocessing
c_splits = pd.np.array_split(c_df, n_chunks)
valid_splits = []
for i in range(len(c_splits)):
partial_result = delayed(_block_candset_excluding_rule_split)(c_splits[i],
l_df, r_df,
l_key, r_key,
fk_ltable,
fk_rtable,
rule_to_exclude,
apply_rules_excluding_rule_pkl, False)
# use Progressbar from
# Dask.diagnostics so set the
#show_progress to False
valid_splits.append(partial_result)
valid_splits = delayed(wrap)(valid_splits)
if show_progress:
with ProgressBar():
valid_splits = valid_splits.compute(scheduler="processes",
num_workers=get_num_cores())
else:
valid_splits = valid_splits.compute(scheduler="processes",
num_workers=get_num_cores())
valid = sum(valid_splits, [])
# construct output candset
if len(c_df) > 0:
candset = c_df[valid]
else:
candset = pd.DataFrame(columns=c_df.columns)
# return candidate set
return candset
def block_tables_without_filters(self, l_df, r_df, l_key, r_key,
l_output_attrs, r_output_attrs,
l_output_prefix, r_output_prefix,
verbose, show_progress, n_ltable_chunks,
n_rtable_chunks):
# do blocking
# # determine the number of processes to launch parallely
candset = None
apply_rules_pkl = cp.dumps(self.apply_rules)
if n_ltable_chunks == 1 and n_rtable_chunks == 1:
# single process
candset = _block_tables_split(l_df, r_df, l_key, r_key,
l_output_attrs, r_output_attrs,
l_output_prefix, r_output_prefix,
apply_rules_pkl, show_progress)
else:
# multiprocessing
# m, n = self.get_split_params(n_procs, len(l_df), len(r_df))
l_splits = pd.np.array_split(l_df, n_ltable_chunks)
r_splits = pd.np.array_split(r_df, n_rtable_chunks)
c_splits = []
for i in range(len(l_splits)):
for j in range(len(r_splits)):
partial_result = delayed(_block_tables_split)(l_splits[i], r_splits[j],
l_key, r_key,
l_output_attrs, r_output_attrs,
l_output_prefix, r_output_prefix,
apply_rules_pkl, False) # we will use
# Dask.diagnostics to display the progress bar so set
# show_progress to False
c_splits.append(partial_result)
c_splits = delayed(wrap)(c_splits)
if show_progress:
with ProgressBar():
c_splits = c_splits.compute(scheduler="processes", num_workers = get_num_cores())
else:
c_splits = c_splits.compute(scheduler="processes", num_workers=get_num_cores())
candset = | pd.concat(c_splits, ignore_index=True) | pandas.concat |
# % ---------------------------------------------------------------------------------------------------------------------------------------
# % Hospitalization Models
# % ---------------------------------------------------------------------------------------------------------------------------------------
# % This code provides predictive model described in "Early prediction of level-of-care requirements in patients with COVID-19." - Elife(2020)
# %
# % Authors: Hao, Boran, <NAME>, <NAME>, <NAME>, <NAME>,
# % <NAME>, <NAME>, <NAME>, and <NAME>.
# %
# % ---------------------------------------------------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Data
# ------------------------------------------------------------------------------
import lightgbm as lgb
from sklearn.metrics import classification_report, f1_score, roc_curve, auc, accuracy_score
import pylab as pl
import statsmodels.api as sm
from scipy import stats
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_selection import RFE
from sklearn.preprocessing import PolynomialFeatures
from sklearn.model_selection import train_test_split, StratifiedKFold, cross_val_score, KFold
from sklearn.model_selection import GridSearchCV
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegressionCV, LogisticRegression
from sklearn import preprocessing
import os
import math
import numpy as np
import pandas as pd
pd.options.display.max_columns = None
pd.options.display.max_rows = None
# Load Preprocessed Data
Y = pd.read_csv('Final_Y.csv')
X = pd.read_csv('Final_X.csv')
# -----------------------------------------------------------------------------
# FUNCTIONS
# -----------------------------------------------------------------------------
def stat_test(df, y):
name = pd.DataFrame(df.columns, columns=['Variable'])
df0 = df[y == 0]
df1 = df[y == 1]
pvalue = []
y_corr = []
for col in df.columns:
if df[col].nunique() == 2:
chi2stat, pval, Stat2chi = smprop.proportions_chisquare(
[df0[col].sum(), df1[col].sum()], [len(df0[col]), len(df1[col])], value=None)
pvalue.append(pval)
else:
pvalue.append(stats.ks_2samp(df0[col], df1[col]).pvalue)
y_corr.append(df[col].corr(y))
name['All_mean'] = df.mean().values
name['y1_mean'] = df1.mean().values
name['y0_mean'] = df0.mean().values
name['All_std'] = df.std().values
name['y1_std'] = df1.std().values
name['y0_std'] = df0.std().values
name['p-value'] = pvalue
name['y_corr'] = y_corr
# [['Variable','p-value','y_corr']]
return name.sort_values(by=['p-value'])
def high_corr(df, thres=0.8):
corr_matrix_raw = df.corr()
corr_matrix = corr_matrix_raw.abs()
high_corr_var_ = np.where(corr_matrix > thres)
high_corr_var = [(corr_matrix.index[x],
corr_matrix.columns[y],
corr_matrix_raw.iloc[x,
y]) for x,
y in zip(*high_corr_var_) if x != y and x < y]
return high_corr_var
def df_fillna(df):
df_nullsum = df.isnull().sum()
for col in df_nullsum[df_nullsum > 0].index:
df[col + '_isnull'] = df[col].isnull()
df[col] = df[col].fillna(df[col].median())
return df
def df_drop(df_new, drop_cols):
return df_new.drop(df_new.columns[df_new.columns.isin(drop_cols)], axis=1)
def clf_F1(best_C_grid, best_F1, best_F1std, classifier, X_train,
y_train, C_grid, nFolds, silent=True, seed=2020):
# global best_C_grid,best_F1, best_F1std
results = cross_val_score(
classifier,
X_train,
y_train,
cv=StratifiedKFold(
n_splits=nFolds,
shuffle=True,
random_state=seed),
n_jobs=-1,
scoring='f1') # cross_validation.
F1, F1std = results.mean(), results.std()
if silent == False:
print(C_grid, F1, F1std)
if F1 > best_F1:
best_C_grid = C_grid
best_F1, best_F1std = F1, F1std
return best_C_grid, best_F1, best_F1std
def my_RFE(df_new, col_y='Hospitalization', my_range=range(
5, 60, 2), my_penalty='l1', my_C=0.01, cvFolds=5, step=1):
F1_all_rfe = []
Xraw = df_new.drop(col_y, axis=1).values
y = df_new[col_y].values
names = df_new.drop(col_y, axis=1).columns
for n_select in my_range:
X = Xraw
clf = LogisticRegression(
C=my_C,
penalty=my_penalty,
class_weight='balanced',
solver='liblinear') # tol=0.01,
rfe = RFE(clf, n_select, step=step)
rfe.fit(X, y.ravel())
X = df_new.drop(col_y, axis=1).drop(
names[rfe.ranking_ > 1], axis=1).values
best_F1, best_F1std = 0.1, 0
best_C_grid = 0
for C_grid in [0.01, 0.1, 1, 10, 100, 1000, 10000, 100000]:
clf = LogisticRegression(
C=C_grid,
class_weight='balanced',
solver='liblinear') # penalty=my_penalty,
best_C_grid, best_F1, best_F1std = clf_F1(
best_C_grid, best_F1, best_F1std, clf, X, y, C_grid, cvFolds)
F1_all_rfe.append((n_select, best_F1, best_F1std))
F1_all_rfe = pd.DataFrame(
F1_all_rfe, index=my_range, columns=[
'n_select', "best_F1", "best_F1std"])
F1_all_rfe['F1_'] = F1_all_rfe['best_F1'] - F1_all_rfe['best_F1std']
X = Xraw
clf = LogisticRegression(
C=my_C,
penalty=my_penalty,
class_weight='balanced',
solver='liblinear') # 0.
rfe = RFE(
clf, F1_all_rfe.loc[F1_all_rfe['F1_'].idxmax(), 'n_select'], step=step)
rfe.fit(X, y.ravel())
id_keep_1st = names[rfe.ranking_ == 1].values
return id_keep_1st, F1_all_rfe
def my_train(X_train, y_train, model='LR', penalty='l1', cv=5,
scoring='f1', class_weight='balanced', seed=2020):
if model == 'SVM':
svc = LinearSVC(
penalty=penalty,
class_weight=class_weight,
dual=False,
max_iter=10000)
parameters = {'C': [0.001, 0.01, 0.1, 1, 10, 100, 1000]}
gsearch = GridSearchCV(svc, parameters, cv=cv, scoring=scoring)
elif model == 'LGB':
param_grid = {
'num_leaves': range(2, 15, 2),
'n_estimators': [50, 100, 500, 1000],
'colsample_bytree': [0.1, 0.3, 0.7, 0.9]
}
lgb_estimator = lgb.LGBMClassifier(
boosting_type='gbdt',
objective='binary',
learning_rate=0.1,
random_state=seed) # eval_metric='auc' num_boost_round=2000,
gsearch = GridSearchCV(
estimator=lgb_estimator,
param_grid=param_grid,
cv=cv,
n_jobs=-1,
scoring=scoring)
elif model == 'RF':
rfc = RandomForestClassifier(
random_state=seed,
class_weight=class_weight,
n_jobs=-1)
param_grid = {
'max_features': [0.05, 0.1, 0.3, 0.5, 0.7, 1],
'n_estimators': [100, 500, 1000],
'max_depth': range(2, 10, 1)
}
gsearch = GridSearchCV(
estimator=rfc,
param_grid=param_grid,
cv=cv,
scoring=scoring)
else:
LR = LogisticRegression(
penalty=penalty,
class_weight=class_weight,
solver='liblinear',
random_state=seed)
parameters = {'C': [0.01, 0.1, 1, 10, 100, 1000, 10000, 100000]}
gsearch = GridSearchCV(LR, parameters, cv=cv, scoring=scoring)
gsearch.fit(X_train, y_train)
clf = gsearch.best_estimator_
if model == 'LGB' or model == 'RF':
print('Best parameters found by grid search are:', gsearch.best_params_)
print('train set accuracy:', gsearch.best_score_)
return clf
def cal_f1_scores(y, y_pred_score):
fpr, tpr, thresholds = roc_curve(y, y_pred_score)
thresholds = sorted(set(thresholds))
metrics_all = []
for thresh in thresholds:
y_pred = np.array((y_pred_score > thresh))
metrics_all.append(
(thresh, auc(
fpr, tpr), f1_score(
y, y_pred, average='micro'), f1_score(
y, y_pred, average='macro'), f1_score(
y, y_pred, average='weighted')))
metrics_df = pd.DataFrame(
metrics_all,
columns=[
'thresh',
'tr AUC',
'tr micro F1-score',
'tr macro F1-score',
'tr weighted F1-score'])
return metrics_df.sort_values(
by='tr weighted F1-score', ascending=False).head(1) # ['thresh'].values[0]
def cal_f1_scores_te(y, y_pred_score, thresh):
fpr, tpr, thresholds = roc_curve(y, y_pred_score)
y_pred = np.array((y_pred_score > thresh))
metrics_all = [
(thresh, auc(
fpr, tpr), f1_score(
y, y_pred, average='micro'), f1_score(
y, y_pred, average='macro'), f1_score(
y, y_pred, average='weighted'))]
metrics_df = pd.DataFrame(
metrics_all,
columns=[
'thresh',
'AUC',
'micro F1-score',
'macro F1-score',
'weighted F1-score'])
return metrics_df
def my_test(X_train, xtest, y_train, ytest, clf,
target_names, report=False, model='LR'):
if model == 'SVM':
ytrain_pred_score = clf.decision_function(X_train)
else:
ytrain_pred_score = clf.predict_proba(X_train)[:, 1]
metrics_tr = cal_f1_scores(y_train, ytrain_pred_score)
thres_opt = metrics_tr['thresh'].values[0]
# ytest_pred=clf.predict(xtest)
if model == 'SVM':
ytest_pred_score = clf.decision_function(xtest)
else:
ytest_pred_score = clf.predict_proba(xtest)[:, 1]
metrics_te = cal_f1_scores_te(ytest, ytest_pred_score, thres_opt)
return metrics_te.merge(metrics_tr, on='thresh')
def tr_predict(df_new, col_y, target_names=['0', '1'], model='LR', penalty='l1',
cv_folds=5, scoring='f1', test_size=0.2, report=False, RFE=False, pred_score=False):
# scaler = preprocessing.StandardScaler()#MinMaxScaler
y = df_new[col_y].values
metrics_all = []
if is_BWH:
my_seeds = range(2020, 2021)
else:
my_seeds = range(2040, 2045)
for seed in my_seeds:
X = df_new.drop([col_y], axis=1).values
name_cols = df_new.drop([col_y], axis=1).columns.values
if is_BWH:
X = pd.DataFrame(X)
y = pd.DataFrame(y)
X_train = X.loc[Train_Index, :]
xtest = X.loc[Test_Index, :]
y_train = y.loc[Train_Index, :]
ytest = y.loc[Test_Index, :]
else:
X_train, xtest, y_train, ytest = train_test_split(
X, y, stratify=y, test_size=test_size, random_state=seed)
if RFE:
df_train = pd.DataFrame(X_train, columns=name_cols)
df_train[col_y] = y_train
# my_penalty='l1', my_C = 1, my_range=range(25,46,5),
id_keep_1st, F1_all_rfe = my_RFE(
df_train, col_y=col_y, cvFolds=cv_folds, scoring=scoring)
print(F1_all_rfe)
X_train = df_train[id_keep_1st]
df_test = pd.DataFrame(xtest, columns=name_cols)
xtest = df_test[id_keep_1st]
name_cols = id_keep_1st
clf = my_train(
X_train,
y_train,
model=model,
penalty=penalty,
cv=cv_folds,
scoring=scoring,
class_weight='balanced',
seed=seed)
metrics_all.append(
my_test(
X_train,
xtest,
y_train,
ytest,
clf,
target_names,
report=report,
model=model))
metrics_df = pd.concat(metrics_all)
metrics_df = metrics_df[cols_rep].describe(
).T[['mean', 'std']].stack().to_frame().T
# refit using all samples to get non-biased coef.
clf.fit(X, y)
if pred_score:
if model == 'SVM':
y_pred_score = clf.decision_function(X)
else:
y_pred_score = clf.predict_proba(X)[:, 1]
df_new['y_pred_score'] = y_pred_score
if model == 'LGB' or model == 'RF':
df_coef_ = pd.DataFrame(list(zip(name_cols, np.round(
clf.feature_importances_, 2))), columns=['Variable', 'coef_'])
else:
df_coef_ = pd.DataFrame(
list(zip(name_cols, np.round(clf.coef_[0], 2))), columns=['Variable', 'coef_'])
df_coef_ = df_coef_.append({'Variable': 'intercept_', 'coef_': np.round(
clf.intercept_, 2)}, ignore_index=True)
df_coef_['coef_abs'] = df_coef_['coef_'].abs()
if pred_score: # ==True
return df_coef_.sort_values('coef_abs', ascending=False)[
['Variable', 'coef_']], metrics_df, df_new['y_pred_score'] # , scaler
else:
return df_coef_.sort_values('coef_abs', ascending=False)[
['Variable', 'coef_']], metrics_df
# ------------------------------------------------------------------------------
# Hospitalization Models
# ------------------------------------------------------------------------------
# Test split
is_BWH = 1 # 1 for BWH as test and 0 for random split
cols_rep = [
'AUC',
'micro F1-score',
'weighted F1-score',
'tr AUC',
'tr micro F1-score',
'tr weighted F1-score']
# Load hospital names
HospitalNames = pd.read_csv('hos_stat_latest.csv', index_col=0, header=None)
HospitalNames["PID"] = HospitalNames.index
HospitalNames = HospitalNames.reset_index(drop=True)
HospitalNames = HospitalNames.sort_values(by=['PID'])
HospitalNames = HospitalNames.reset_index(drop=True)
Train_Index = list(HospitalNames[HospitalNames[1] != 'BWH'].index)
Test_Index = list(HospitalNames[HospitalNames[1] == 'BWH'].index)
df = X
# Remove high correlated
print(high_corr(df, thres=0.8))
df = df.drop(columns=['Alcohol_No'])
# All features -----------------------------------------------------------
Data1_DF_All = | pd.concat([Y, df], axis=1) | pandas.concat |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.