prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import numpy as np
import pandas as pd
from osgeo import gdal
import geopandas as gpd
from shapely.geometry import Point
import shapely.ops as sops
# =============================================================================
# Module 1
# =============================================================================
def carrega_bho(file, rasterfn):
# Carrega o DEM para filtrar area de interesse (+ rapido p carregar)
src = gdal.Open(rasterfn)
ulx, xres, xskew, uly, yskew, yres = src.GetGeoTransform()
lrx = ulx + (src.RasterXSize * xres)
lry = uly + (src.RasterYSize * yres)
bbox = (ulx, lry, lrx, uly)
""" carrega arquivo bho e garante dtypes inteiros em string"""
gdf = gpd.read_file(file, bbox=bbox)
# crs = gdf.crs
# # converte o que foi lido como int64 para object (queremos strings!)
# d = {k:np.dtype('O') for (k,v) in gdf.dtypes.items() if v=='int64'}
# # correcao do crs (por algum motivo .astype detona)
# gdf = gdf.astype(d)
# gdf.crs = crs
return gdf
def coords_in_bho(coords_list, df_bho_area):
ids = np.arange(len(coords_list))
geometry = [Point(xy) for xy in coords_list]
points = gpd.GeoDataFrame(ids, crs = df_bho_area.crs, geometry = geometry)
#aplica o join no shape de pontos (left index)
pontos_em_pols = gpd.sjoin(points, df_bho_area, how='left',op='within')
cobacias = pontos_em_pols['cobacia']
return cobacias
def roi_define(df_bho_area, df_bho_trecho, df_bho_ponto, cods, level=1, method='coord'):
if method=='coord':
lista_cobacias = upstream_bho(cods[0], df_bho_trecho)
lista_cobacias = sorted(lista_cobacias)
sub = [0] * len(lista_cobacias)
for i, codigo in enumerate(cods):
lista_i = upstream_bho(codigo, df_bho_trecho)
sub = np.where(np.isin(lista_cobacias, lista_i), i+1, sub)
# Filtra os shapes para a area de interesse
roi_df_area = df_bho_area[df_bho_area['cobacia'].isin(
lista_cobacias)].sort_values(by='cobacia').reset_index(drop=True)
roi_df_area['sub'] = sub
roi_df_trecho = df_bho_trecho[df_bho_trecho['cobacia'].isin(
lista_cobacias)].sort_values(by='cobacia').reset_index(drop=True)
roi_df_trecho['sub'] = sub
elif method=='nunivotto':
roi_df_area = df_bho_area[df_bho_area['nunivotto' + str(level)].isin(
cods)].sort_values(by='cobacia').reset_index(drop=True)
lista_cobacias = list(roi_df_area['cobacia'])
roi_df_trecho = df_bho_trecho[df_bho_trecho['cobacia'].isin(
lista_cobacias)].sort_values(by='cobacia').reset_index(drop=True)
# Extrai os pontos dentro da bacia de interesse
noorigem = list(roi_df_trecho['noorigem'])
nodestino = list(roi_df_trecho['nodestino'])
lista_pontos = list(set(noorigem + nodestino))
roi_df_ponto = (df_bho_ponto[df_bho_ponto['idponto'].isin(
lista_pontos)].sort_values(by='idponto').reset_index(drop=True))
return roi_df_area, roi_df_trecho, roi_df_ponto, lista_cobacias, lista_pontos
def upstream_bho(codigo, df_bho_trecho):
''' a partir de um codigo, sobe a rede de trechos da BHO-ANA
inclui o codigo na lista tbm.
'''
#Prepara fc para processamento
df_work = df_bho_trecho
# A subida é realizada por cobacia.
# Inicia a subida do rio
cobacia = [codigo]
next_cobacia = cobacia
lista_cobacias = [] # lista de subida global
sobe = 1
while sobe == 1:
#subida global
cobacia = next_cobacia
lista_cobacias.extend(cobacia)
#bacia atual
bacia = df_work[df_work['cobacia'].isin(cobacia)]
#encontra trecho e bacias logo a montante
trecho = bacia['cotrecho']
montante = df_work[df_work['nutrjus'].isin(trecho)]
next_cobacia = montante['cobacia']
if len(next_cobacia) == 0: # check if list is empty
sobe = 0
return lista_cobacias
def bho2mini(roi_df_trecho, roi_df_area, uparea_min = 30, lmin = 6):
mtrecs = roi_df_trecho[roi_df_trecho['nuareamont']>uparea_min]
mtrecs = mtrecs[['sub',
'cotrecho',
'cobacia',
'cocursodag',
'noorigem',
'nodestino',
'nucomptrec',
'nuareamont',
'nutrjus',
'nustrahler',
'geometry']]
roi_df_trecho['ismain'] = 0
water_courses = mtrecs.groupby('cocursodag').size()
# Loop nos cursos d'agua para agregar trechos
for c in water_courses.index:
twc = mtrecs[mtrecs['cocursodag']==c].sort_values(by='cobacia')
# Agrega tudo ate nao sobrar trechos com comprimento menor do que lmin
while twc['nucomptrec'].min()<lmin:
tmin = twc[twc['nucomptrec']==twc['nucomptrec'].min()].iloc[0]
if any(twc['nutrjus']==tmin['cotrecho']): #se existir trechos a montante
tmin_mon = twc[twc['nutrjus']==tmin['cotrecho']].iloc[0]
else:
# Aqui o trecho mais de montante curto é alongado se der
if any(roi_df_trecho['nutrjus'] == tmin['cotrecho']):
tmin_mon = roi_df_trecho[
(roi_df_trecho['nutrjus'] == tmin['cotrecho']) &
(roi_df_trecho['cocursodag'] == tmin['cocursodag'])].iloc[0]
else:
tmin_mon = tmin.copy()
tmin_mon['nucomptrec'] = 9999
if any(twc['cotrecho']==tmin['nutrjus']): #se existir trechos a jusante
tmin_jus = twc[twc['cotrecho']==tmin['nutrjus']].iloc[0]
else:
tmin_jus = tmin.copy()
tmin_jus['nucomptrec'] = 9999
if tmin_jus.name == tmin_mon.name:
twc.loc[tmin.name, 'nucomptrec'] = 9999
continue
l2r = np.minimum(tmin_mon['nucomptrec'], tmin_jus['nucomptrec'])
if tmin_mon['nucomptrec'] == l2r:
tmon = tmin_mon
tjus = tmin
elif tmin_jus['nucomptrec'] == l2r:
tmon = tmin
tjus = tmin_jus
else:
print('Error in finding minimum length!')
t2r = tjus.copy()
# t2r['cobacia'] = tmon['cobacia']
t2r['cotrecho'] = tmon['cotrecho']
t2r['noorigem'] = tmon['noorigem']
t2r['nucomptrec'] = tjus['nucomptrec'] + tmon['nucomptrec']
t2r['geometry'] = sops.unary_union([tjus.geometry, tmon.geometry])
twc.loc[t2r.name] = t2r
mtrecs.loc[t2r.name] = t2r
roi_df_trecho.loc[ # Atributte code to original file to track which trec was aggregated
roi_df_trecho.index.isin([tmon.name, tjus.name]), 'ismain'] = 1
try:
twc.drop(tmon.name, inplace=True)
mtrecs.drop(tmon.name, inplace=True)
except:
continue
# cobacias_in.loc[t2r.name].extend([tmon['cobacia']])
mtrecs = mtrecs.sort_values(by='cobacia').reset_index(drop=True)
mtrecs = mtrecs.set_crs(roi_df_area.crs)
mtrecs['nucomptrec'] = mtrecs.geometry.to_crs("ESRI:102033").length / 1000
for i, tr in mtrecs.iterrows():
# Essa parte nao ta nem um pouco otimizada...
# Nao aumenta o tempo consideravelmente, mas da pra melhorar
cobacias_upstr = upstream_bho(tr['cobacia'], roi_df_trecho)
trecs_upstr = roi_df_trecho['cobacia'].isin(cobacias_upstr)
roi_df_trecho.loc[
trecs_upstr, 'midx'] = i
# cobacias_upstr.remove(tr['cobacia'])
# trecs_upstr = roi_df_trecho['cobacia'].isin(cobacias_upstr)
# mtrecs.loc[
# mtrecs['cotrecho'].isin(roi_df_trecho.loc[trecs_upstr, 'cotrecho']),
# 'nutrjus'] = tr['cotrecho']
roi_df_area['midx'] = roi_df_trecho['midx']
mpols = roi_df_area[['sub', 'midx', 'cobacia', 'cocursodag', 'geometry']]
mpols = mpols.dissolve(by='midx', aggfunc='first')
mpols['nuareacont'] = mpols.geometry.to_crs("ESRI:102033").area / 1000000
# Loop nos trechos agregados para computar comprimento do maior afluente
for i, tr in mtrecs.iterrows():
tmini = roi_df_trecho[roi_df_trecho['midx'] == i]
tmini_in = tmini[tmini['ismain']==1] # Trecho principal
tmini_out = tmini.drop(tmini_in.index) # Afluentes
roi_df_trecho.loc[
tmini.index, 'catid'] = tr['cotrecho']
mtrecs.loc[ # Reasign downstream cats
mtrecs['nutrjus'].isin(tmini_in['cotrecho']), 'nutrjus'] = tr['cotrecho']
# Compute larger afl
if any(tmini_out.index):
for j, trin in tmini_out.iterrows():
if any(tmini_out['cotrecho']==trin['nutrjus']):
tmini_out.loc[j, 'nucomptrec'] = trin['nucomptrec'] + tmini_out.loc[
tmini_out['cotrecho']==trin['nutrjus'], 'nucomptrec'].iloc[0]
tmini_out.loc[j, 'nodestino'] = tmini_out.loc[
tmini_out['cotrecho']==trin['nutrjus'], 'nodestino'].iloc[0]
mtrecs.loc[i, 'nucompafl'] = tmini_out['nucomptrec'].max()
mtrecs.loc[i,
['noorigafl', 'nodestafl']] = tmini_out.loc[
tmini_out['nucomptrec']==tmini_out['nucomptrec'].max(),
['noorigem', 'nodestino']].iloc[0].tolist()
else: # caso maior tributario nao for encontrado, relacao pela area da mini
mtrecs.loc[i, 'nucompafl'] = 2 * (
mpols.loc[i, 'nuareacont'] / np.pi) ** (1/2)
#mtrecs.loc[np.isnan(mtrecs['noorigemafl']), ['noorigemafl', 'nodestinoafl']] = 0
#mtrecs[['noorigemafl', 'nodestinoafl']] = mtrecs[['noorigemafl', 'nodestinoafl']].astype('Int32')
bho_trecs = roi_df_trecho[['cotrecho',
'cobacia',
'catid',
'midx',
'noorigem',
'nodestino',
'cocursodag',
'nucomptrec',
'nuareacont',
'nuareamont',
'nutrjus',
'geometry']]
return mtrecs, mpols, bho_trecs
# =============================================================================
# Module 2
# Needs DEM map, HRU map and (in case of inertial propagation) HAND map
# =============================================================================
from rasterstats_gdal import *
def s_trec(cods_trecho, cods_ponto, corigem, cdestino, ccomp):
m = cods_trecho.merge(cods_ponto, how='left', left_on=corigem, right_on='idponto')
m = m.merge(cods_ponto, how='left', left_on=cdestino, right_on='idponto')
# m = m.sort_values(by='cobacia')
# m.set_index(cods_trecho.index, inplace=True)
diff_elev = m['elev_x']-m['elev_y']
river_l = cods_trecho[ccomp]
river_s = diff_elev / river_l
river_s = river_s.dropna(axis=0)
return river_s
def get_slopes(trecsfn, polsfn, pointsfn, demfn):
# Load vectors
mtrecs = gpd.read_file(trecsfn)
mpols = gpd.read_file(polsfn)
mpoints = gpd.read_file(pointsfn)
# Compute slopes based on real afluents (agreggated)
elevpoint = zonal_stats(
pointsfn, demfn, stats=['min'])
elevpoint = [d['min'] for d in elevpoint]
elevpoint = pd.Series(elevpoint)
mpoints['elev'] = elevpoint
cods_ponto = mpoints[['idponto', 'elev']]
cods_trecho = mtrecs[[ 'cobacia',
'noorigem', 'nodestino', 'nucomptrec',
'noorigafl', 'nodestafl', 'nucompafl']]
# Compute slope in main stretches and in aggregated afluents
mtrecs['nudecltrec'] = s_trec(cods_trecho, cods_ponto, 'noorigem', 'nodestino', 'nucomptrec')
mtrecs['nudeclafl'] = s_trec(cods_trecho, cods_ponto, 'noorigafl', 'nodestafl', 'nucompafl')
# Compute afl slopes based on polygon hypsometric curve (remaining stretches)
elevarea = zonal_stats(
polsfn,
demfn, stats=['percentile_10', 'percentile_85'])
mpols['p10'] = [d['percentile_10'] for d in elevarea]
mpols['p85'] = [d['percentile_85'] for d in elevarea]
mpols['diffelev'] = mpols['p85'] - mpols['p10']
mpols['nudecl'] = mpols['diffelev'] / mtrecs['nucompafl']
mtrecs.loc[np.isnan(mtrecs['nudeclafl']), 'nudeclafl'] = mpols['nudecl']
return mtrecs
def get_hrus(polsfn, hrufn):
hru_rate = zonal_stats(
polsfn,
hrufn, categorical=True)
n = | pd.DataFrame(hru_rate) | pandas.DataFrame |
#!/usr/bin/env python3
# _*_coding:utf-8 _*_
# @Time :Created on Dec 04 4:39 PM 2018
# @Author :<NAME>
import os,sys
import numpy as np
import pandas as pd
import glob
import math
def compute_time_difference(time_to_seconds_list):
'''calculate the delta time
Input: time_to_seconds_list.
Output: new list for store delta time.'''
save_time_difference = []
for i in range(0, len(time_to_seconds_list) - 1):
save_time_difference.append(abs(time_to_seconds_list[i + 1] - time_to_seconds_list[i]))
save_time_difference.insert(0, 0)
return save_time_difference
def compute_speed_difference(Speed_list):
'''Calculate the delta speed.
Input: Speed_list
Output: new list for store delta speed.'''
save_speed_difference = []
for i in range(0, len(Speed_list) - 1):
difference = math.fabs(Speed_list[i + 1] - Speed_list[i])
save_speed_difference.append(difference)
save_speed_difference.insert(0, 0.0)
save_speed_difference1 = [round(j, 2) for j in save_speed_difference]
return save_speed_difference1
def compute_heading_difference(Heading_list):
'''Calculate the delta speed.
Input: Heading_list
Output: new list for store delta heading.'''
save_heading_difference = []
for i in range(0,len(Heading_list)-1):
difference = math.fabs(Heading_list[i+1]-Heading_list[i])
save_heading_difference.append(difference)
save_heading_difference.insert(0,0)
return save_heading_difference
def save_data_into_file(MMSI_list,
Longitude_list,
Latitude_list,
Speed_list,
Heading_list,
Day_list,
time_to_seconds_list,
delta_time,
delta_speed,
delta_heading):
'''This function is for storing the data and outputing the data into a file.'''
# dictionary for storing the list and transfer it to dataframe
save_dict = {'MMSI':MMSI_list,
'Longitude':Longitude_list,
'Latitude':Latitude_list,
'Speed':Speed_list,
'Heading':Heading_list,
'Day':Day_list,
'time_to_seconds':time_to_seconds_list,
'delta_time':delta_time,
'delta_speed':delta_speed,
'delta_heading':delta_heading}
data = pd.DataFrame(save_dict)
# output the file
name_mmsi = int(data.iloc[0]['MMSI'])
name_day = int(data.iloc[0]['Day'])
data.to_csv(r'C:\Users\LPT-ucesxc0\AIS-Data\Danish_AIS_data_process\aisdk_20180901\%d-%d.csv' % (name_mmsi, name_day),
index=False)
file_names = glob.glob(r"C:\Users\LPT-ucesxc0\AIS-Data\Danish_AIS_data_process\aisdk_20180901\test\*.csv")
threshold_heading_max_value = 20
for file in file_names:
file_load = pd.read_csv(file)
file_load['Timestamp']= | pd.to_datetime(file_load['Timestamp'], format='%d/%m/%Y %H:%M:%S') | pandas.to_datetime |
import pandas as pd
from sklearn import linear_model
import statsmodels.api as sm
import numpy as np
from scipy import stats
df_all = pd.read_csv("/mnt/nadavrap-students/STS/data/imputed_data2.csv")
print(df_all.columns.tolist())
print (df_all.info())
df_all = df_all.replace({'MtOpD':{False:0, True:1}})
df_all = df_all.replace({'Complics':{False:0, True:1}})
mask_reop = df_all['Reoperation'] == 'Reoperation'
df_reop = df_all[mask_reop]
mask = df_all['surgyear'] == 2010
df_2010 = df_all[mask]
mask = df_all['surgyear'] == 2011
df_2011 = df_all[mask]
mask = df_all['surgyear'] == 2012
df_2012 = df_all[mask]
mask = df_all['surgyear'] == 2013
df_2013 = df_all[mask]
mask = df_all['surgyear'] == 2014
df_2014 = df_all[mask]
mask = df_all['surgyear'] == 2015
df_2015 = df_all[mask]
mask = df_all['surgyear'] == 2016
df_2016 = df_all[mask]
mask = df_all['surgyear'] == 2017
df_2017 = df_all[mask]
mask = df_all['surgyear'] == 2018
df_2018 = df_all[mask]
mask = df_all['surgyear'] == 2019
df_2019 = df_all[mask]
avg_hospid = pd.DataFrame()
def groupby_siteid():
df2010 = df_2010.groupby('HospID')['HospID'].count().reset_index(name='2010_total')
df2011 = df_2011.groupby('HospID')['HospID'].count().reset_index(name='2011_total')
df2012 = df_2012.groupby('HospID')['HospID'].count().reset_index(name='2012_total')
df2013 = df_2013.groupby('HospID')['HospID'].count().reset_index(name='2013_total')
df2014 = df_2014.groupby('HospID')['HospID'].count().reset_index(name='2014_total')
df2015 = df_2015.groupby('HospID')['HospID'].count().reset_index(name='2015_total')
df2016 = df_2016.groupby('HospID')['HospID'].count().reset_index(name='2016_total')
df2017 = df_2017.groupby('HospID')['HospID'].count().reset_index(name='2017_total')
df2018 = df_2018.groupby('HospID')['HospID'].count().reset_index(name='2018_total')
df2019 = df_2019.groupby('HospID')['HospID'].count().reset_index(name='2019_total')
df1 =pd.merge(df2010, df2011, on='HospID', how='outer')
df2 =pd.merge(df1, df2012, on='HospID', how='outer')
df3 =pd.merge(df2, df2013, on='HospID', how='outer')
df4 =pd.merge(df3, df2014, on='HospID', how='outer')
df5 =pd.merge(df4, df2015, on='HospID', how='outer')
df6 =pd.merge(df5, df2016, on='HospID', how='outer')
df7 =pd.merge(df6, df2017, on='HospID', how='outer')
df8 =pd.merge(df7, df2018, on='HospID', how='outer')
df_sum_all_Years =pd.merge(df8, df2019, on='HospID', how='outer')
df_sum_all_Years.fillna(0,inplace=True)
cols = df_sum_all_Years.columns.difference(['HospID'])
df_sum_all_Years['Distinct_years'] = df_sum_all_Years[cols].gt(0).sum(axis=1)
cols_sum = df_sum_all_Years.columns.difference(['HospID','Distinct_years'])
df_sum_all_Years['Year_sum'] =df_sum_all_Years.loc[:,cols_sum].sum(axis=1)
df_sum_all_Years['Year_avg'] = df_sum_all_Years['Year_sum']/df_sum_all_Years['Distinct_years']
df_sum_all_Years.to_csv("/tmp/pycharm_project_723/files/total op sum all years HospID.csv")
# print("details on site id dist:")
# # print("num of all sites: ", len(df_sum_all_Years))
#
# less_8 =df_sum_all_Years[df_sum_all_Years['Distinct_years'] !=10]
# less_8.to_csv("total op less 10 years siteid.csv")
# print("num of sites with less years: ", len(less_8))
#
# x = np.array(less_8['Distinct_years'])
# print(np.unique(x))
avg_hospid['HospID'] = df_sum_all_Years['HospID']
avg_hospid['total_year_sum'] = df_sum_all_Years['Year_sum']
avg_hospid['total_year_avg'] = df_sum_all_Years['Year_avg']
avg_hospid['num_of_years'] = df_sum_all_Years['Distinct_years']
def groupby_siteid_reop():
df2010 = df_2010.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'Reoperation').sum()).reset_index(name='2010_reop')
df2011 = df_2011.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'Reoperation').sum()).reset_index(name='2011_reop')
df2012 = df_2012.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'Reoperation').sum()).reset_index(name='2012_reop')
df2013 = df_2013.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'Reoperation').sum()).reset_index(name='2013_reop')
df2014 = df_2014.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'Reoperation').sum()).reset_index(name='2014_reop')
df2015 = df_2015.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'Reoperation').sum()).reset_index(name='2015_reop')
df2016 = df_2016.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'Reoperation').sum()).reset_index(name='2016_reop')
df2017 = df_2017.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'Reoperation').sum()).reset_index(name='2017_reop')
df2018 = df_2018.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'Reoperation').sum()).reset_index(name='2018_reop')
df2019 = df_2019.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'Reoperation').sum()).reset_index(name='2019_reop')
df1 =pd.merge(df2010, df2011, on='HospID', how='outer')
df2 =pd.merge(df1, df2012, on='HospID', how='outer')
df3 =pd.merge(df2, df2013, on='HospID', how='outer')
df4 =pd.merge(df3, df2014, on='HospID', how='outer')
df5 =pd.merge(df4, df2015, on='HospID', how='outer')
df6 =pd.merge(df5, df2016, on='HospID', how='outer')
df7 =pd.merge(df6, df2017, on='HospID', how='outer')
df8 =pd.merge(df7, df2018, on='HospID', how='outer')
df_sum_all_Years =pd.merge(df8, df2019, on='HospID', how='outer')
df_sum_all_Years.fillna(0,inplace=True)
cols = df_sum_all_Years.columns.difference(['HospID'])
df_sum_all_Years['Distinct_years_reop'] = df_sum_all_Years[cols].gt(0).sum(axis=1)
cols_sum = df_sum_all_Years.columns.difference(['HospID', 'Distinct_years_reop'])
df_sum_all_Years['Year_sum_reop'] = df_sum_all_Years.loc[:, cols_sum].sum(axis=1)
df_sum_all_Years['Year_avg_reop'] = df_sum_all_Years['Year_sum_reop'] / avg_hospid['num_of_years']
df_sum_all_Years.to_csv("/tmp/pycharm_project_723/files/sum all years HospID reop.csv")
# -----------------------first op------------------------------------
df_10 = df_2010.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'First Time').sum()).reset_index(name='2010_FirstOperation')
df_11 = df_2011.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'First Time').sum()).reset_index(name='2011_FirstOperation')
df_12 = df_2012.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'First Time').sum()).reset_index(name='2012_FirstOperation')
df_13 = df_2013.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'First Time').sum()).reset_index(name='2013_FirstOperation')
df_14 = df_2014.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'First Time').sum()).reset_index(name='2014_FirstOperation')
df_15 = df_2015.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'First Time').sum()).reset_index(name='2015_FirstOperation')
df_16 = df_2016.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'First Time').sum()).reset_index(name='2016_FirstOperation')
df_17 = df_2017.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'First Time').sum()).reset_index(name='2017_FirstOperation')
df_18 = df_2018.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'First Time').sum()).reset_index(name='2018_FirstOperation')
df_19 = df_2019.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'First Time').sum()).reset_index(name='2019_FirstOperation')
d1 = pd.merge(df_10, df_11, on='HospID', how='outer')
d2 = pd.merge(d1, df_12, on='HospID', how='outer')
d3 = pd.merge(d2, df_13, on='HospID', how='outer')
d4 = pd.merge(d3, df_14, on='HospID', how='outer')
d5 = pd.merge(d4, df_15, on='HospID', how='outer')
d6 = pd.merge(d5, df_16, on='HospID', how='outer')
d7 = pd.merge(d6, df_17, on='HospID', how='outer')
d8 = pd.merge(d7, df_18, on='HospID', how='outer')
df_sum_all_Years_total = pd.merge(d8, df_19, on='HospID', how='outer')
df_sum_all_Years_total.fillna(0, inplace=True)
cols = df_sum_all_Years_total.columns.difference(['HospID'])
df_sum_all_Years_total['Distinct_years'] = df_sum_all_Years_total[cols].gt(0).sum(axis=1)
cols_sum = df_sum_all_Years_total.columns.difference(['HospID', 'Distinct_years'])
df_sum_all_Years_total['Year_sum'] = df_sum_all_Years_total.loc[:, cols_sum].sum(axis=1)
df_sum_all_Years_total['Year_avg'] = df_sum_all_Years_total['Year_sum'] / avg_hospid['num_of_years']
df_sum_all_Years_total.to_csv("/tmp/pycharm_project_723/files/First Operation sum all years HospID.csv")
#---------------------------merge------------------------
temp_first = pd.DataFrame()
temp_first['HospID'] = df_sum_all_Years_total['HospID']
temp_first['Year_sum_Firstop'] = df_sum_all_Years_total['Year_sum']
temp_first['Year_avg_Firstop'] = df_sum_all_Years_total['Year_avg']
temp_reop = pd.DataFrame()
temp_reop['HospID'] = df_sum_all_Years['HospID']
temp_reop['Year_avg_reop'] = df_sum_all_Years['Year_avg_reop']
temp_reop['Year_sum_reop'] = df_sum_all_Years['Year_sum_reop']
df_mort = groupby_mortality_siteid()
df_reop_mort = groupby_mortality_siteid_reop()
df_reop_complics = groupby_complics_siteid()
df20 = pd.merge(avg_hospid, temp_first, on='HospID', how='outer')
temp_merge = pd.merge(df20, temp_reop, on='HospID', how='outer')
temp_merge2 = pd.merge(temp_merge, df_mort, on='HospID', how='outer')
temp_merge3 = pd.merge(temp_merge2,df_reop_mort, on='HospID', how='outer')
total_avg_site_id = pd.merge(temp_merge3, df_reop_complics, on='HospID', how='outer')
total_avg_site_id['firstop/total'] = (total_avg_site_id['Year_sum_Firstop'] / total_avg_site_id['total_year_sum']) *100
total_avg_site_id['reop/total'] = (total_avg_site_id['Year_sum_reop'] / total_avg_site_id['total_year_sum']) * 100
total_avg_site_id['mortalty_rate'] = (total_avg_site_id['Mortality'] / total_avg_site_id['total_year_sum'])*100
total_avg_site_id['mortalty_reop_rate'] = (total_avg_site_id['Mortality_reop'] / total_avg_site_id['Year_sum_reop']) * 100
total_avg_site_id['Complics_reop_rate'] = (total_avg_site_id['Complics_reop'] / total_avg_site_id['Year_sum_reop']) * 100
total_avg_site_id.fillna(0, inplace=True)
total_avg_site_id.to_csv('total_avg_HospID.csv')
# df_siteid_reg['SiteID'] =total_avg_site_id['SiteID']
# df_siteid_reg['total_year_avg'] = total_avg_site_id['total_year_avg']
def groupby_mortality_siteid():
dfmort = df_all.groupby('HospID')['MtOpD'].apply(lambda x: (x == 1).sum()).reset_index(name='Mortality')
dfmort.to_csv("/tmp/pycharm_project_723/files/mortality HospID.csv")
return dfmort
def groupby_mortality_siteid_reop():
dfmort = df_reop.groupby('HospID')['MtOpD'].apply(lambda x: (x == 1).sum()).reset_index(name='Mortality_reop')
dfmort.to_csv("/tmp/pycharm_project_723/files/mortality HospID reop.csv")
return dfmort
def groupby_complics_siteid():
df_comp = df_all.groupby('HospID')['Complics'].apply(lambda x: (x == 1).sum()).reset_index(name='Complics')
dfmort = df_reop.groupby('HospID')['Complics'].apply(lambda x: (x == 1).sum()).reset_index(name='Complics_reop')
df20 = pd.merge(df_comp, dfmort, on='HospID', how='outer')
df20.to_csv("/tmp/pycharm_project_723/files/Complics HospID.csv")
return df20
# groupby_siteid()
# groupby_siteid_reop()
df_all = pd.read_csv("/mnt/nadavrap-students/STS/data/imputed_data2.csv")
avg_HospID = pd.read_csv("/tmp/pycharm_project_723/total_avg_HospID.csv")
df_all = df_all.replace({'MtOpD':{False:0, True:1}})
df_all = df_all.replace({'Complics':{False:0, True:1}})
mask_reop = df_all['Reoperation'] == 'Reoperation'
df_reop = df_all[mask_reop]
df_op = df_all[~mask_reop]
Mortality_siteid = pd.DataFrame()
Mortality_surgid = pd.DataFrame()
Complics_siteid = pd.DataFrame()
Complics_surgid = pd.DataFrame()
def groupby_mortality_HospID():
df_count = df_all.groupby('HospID')['HospID'].count().reset_index(name='total')
df_count_op = df_op.groupby('HospID')['HospID'].count().reset_index(name='count_First')
df_mort_op = df_op.groupby('HospID')['MtOpD'].apply(lambda x: (x == 1).sum()).reset_index(name='Mortality_First')
df_PredMort_op = df_op.groupby('HospID')['PredMort'].mean().reset_index(name='PredMort_First_avg')
df_count_reop = df_reop.groupby('HospID')['HospID'].count().reset_index(name='count_Reop')
df_mort_reop = df_reop.groupby('HospID')['MtOpD'].apply(lambda x: (x == 1).sum()).reset_index(name='Mortality_Reop')
df_PredMort_reop= df_reop.groupby('HospID')['PredMort'].mean().reset_index(name='PredMort_Reoperation_avg')
df1 = pd.merge(df_count, df_count_op, on='HospID', how='outer')
df2 = pd.merge(df1, df_count_reop, on='HospID', how='outer')
df3 = pd.merge(df2, df_mort_op, on='HospID', how='outer')
df4 = | pd.merge(df3, df_mort_reop, on='HospID', how='outer') | pandas.merge |
#####################################################################
##### IMPORT STANDARD MODULES
#####################################################################
from __future__ import print_function
from ..data import DataBlock
from ..preprocess import PreProcess
import pandas as pd
import numpy as np
from sklearn.datasets import load_iris
from random import sample
#####################################################################
##### TESTS FOR DATABLOCK
#####################################################################
def test_datablock(datablock):
assert datablock.train.shape == (150, 5)
assert datablock.test.shape == (150, 5)
assert datablock.predict.shape == (150, 5)
#####################################################################
##### TESTS FOR PREPROCESS
#####################################################################
def test_check_missing_no_missing(datablock):
pp = PreProcess(datablock)
result = pp.check_missing(printResult=False,returnResult=True)
for df,miss in result.items():
print(df,miss)
assert miss.sum()==0
def test_check_missing_missing_induced(datablock):
df = | pd.DataFrame(datablock.train,copy=True) | pandas.DataFrame |
import pandas as pd
import xarray as xr
import re
import numpy as np
import datetime as dt
class AWS:
'''This class represents an Automatic Weather Station and its time series'''
def __init__(self, name, code, lat, lon, elev):
self.name = name
self.code = code
self.lat = lat
self.lon = lon
self.elev = elev
self.atmvar = dict()
def add_atmvar(self, name, time_series_dataarray):
self.atmvar[name] = time_series_dataarray
class AWSWriter:
'''This class is responsible for saving a group of AWS as a .csv file'''
pass
class AWSWiscReader:
'''This class reads an AWS from a .txt file from wisc data'''
def read_aws(self, filepath):
aws = self.read_metadata(filepath)
da = self.read_time_series(filepath)
aws.add_atmvar('T2m', da)
return aws
def read_metadata(self, filepath):
with open(filepath) as f:
firstline = f.readline().rstrip()
first_match_obj = re.match( r'Year: (.*) Month: (.*) ID: (.*) ARGOS: (.*) Name: (.*)', firstline)
secondline = f.readline().rstrip()
second_match_obj = re.match( r'Lat: (.*) Lon: (.*) Elev: (.*)', secondline)
return AWS( first_match_obj.group(5).strip(),
first_match_obj.group(3).strip(),
second_match_obj.group(1).strip(),
second_match_obj.group(2).strip(),
second_match_obj.group(3).strip(),
)
def read_time_series(self, filepath):
df = pd.read_csv(filepath, skiprows=2, header=None, sep='\s+', na_values=444.0)
temp = df[5]
time = pd.date_range("2021-12-01", "2021-12-31 23:50:00", freq="10min")
da = xr.DataArray(temp, coords=[time], dims=['time'])
return da
class AWSHalleyReader:
'''This class reads an AWS from a .txt file from halley or rothera station'''
def read_aws(self, filepath):
aws = AWS(None, None, None, None, None)
varnames = ['Temp_ext_Avg', 'Temp_hmp_Avg', 'Temp_box_Avg']
for varname in varnames:
da_min = self.read_time_series(filepath, varname)
da = self.resample_time_series(da_min)
aws.add_atmvar(varname, da)
return aws
def read_time_series(self, filepath, varname):
df = pd.read_csv(filepath, sep=',')
time = pd.to_datetime(df['TIMESTAMP'])
data = df[varname]
da_min = xr.DataArray(data, coords=[time], dims='time')
return da_min
def resample_time_series(self, da_min):
da = da_min.resample(time='10min', closed='right', label='right', skipna=True).mean()[1:-1]
return da
class AWSArgusReader:
'''This class reads an AWS from a .txt file from ARGUS Australian station'''
def read_aws(self, filepath):
aws = AWS(None, None, None, None, None)
varnames = ['AIR_TEMPERATURE_1M', 'AIR_TEMPERATURE_2M', 'AIR_TEMPERATURE_4M']
for varname in varnames:
da_min = self.read_time_series(filepath, varname)
da = self.resample_time_series(da_min)
aws.add_atmvar(varname, da)
return aws
def read_time_series(self, filepath, varname):
df = pd.read_csv(filepath, sep=',')
time = pd.to_datetime(df['OBSERVATION_DATE'])
data = df[varname].replace(0, np.nan)
da_min = xr.DataArray(data, coords=[time], dims=['time']).sortby('time')
return da_min
def resample_time_series(self, da_min):
da = da_min.resample(time='10min', closed='right', label='right', skipna=True).mean()[1:-1]
return da
class AWSNOAAReader:
'''This class reads an AWS from a .txt file from NOAA data'''
def read_aws(self, filepath):
aws = AWS(None, None, None, None, None)
varnames = ['TEMPERATURE at 2 Meters', 'TEMPERATURE at 10 Meters', 'TEMPERATURE at Tower Top']
varcolumns = [10, 11, 12]
for (n, c) in zip(varnames, varcolumns):
da_min = self.read_time_series(filepath, c)
da = self.resample_time_series(da_min)
aws.add_atmvar(n, da)
return aws
def read_time_series(self, filepath, varcolumn):
df = pd.read_csv(filepath, header=None, sep='\s+', parse_dates={'datetime':[1,2,3,4,5]})
df['datetime'] = pd.to_datetime(df['datetime'], format='%Y %m %d %H %M')
time = df['datetime']
data = df[varcolumn]
da_min = xr.DataArray(data, coords=[time], dims=['time']).sortby('time')
return da_min
def resample_time_series(self, da_min):
da = da_min.resample(time='10min', closed='right', label='right', skipna=True).mean()[1:-1]
return da
class AWSNZReader:
'''This class reads an AWS from a .txt file from NZ data'''
def read_aws(self, filepath):
aws = AWS(None, None, None, None, None)
varnames = ['Air Temperature in degrees Celsius']
for name in varnames:
da_min = self.read_time_series(filepath, name)
da = self.resample_time_series(da_min)
aws.add_atmvar(name, da)
return aws
def read_time_series(self, filepath, name):
parsedict = {'datetime': [' Year Month Day Hour Minutes in YYYY.2',
'MM.2',
'DD.2',
'HH24.2',
'MI format in Universal coordinated time']}
df = pd.read_csv(filepath, sep=',', low_memory=False, parse_dates=parsedict)
df['datetime'] = pd.to_datetime(df['datetime'], format='%Y %m %d %H %M')
time = df['datetime']
data_str = df[name]
data = data_str.apply(lambda x: float(x.strip()) if x.strip() != '' else np.nan)
da_min = xr.DataArray(data, coords=[time], dims=['time']).sortby('time')
return da_min
def resample_time_series(self, da_min):
da = da_min.resample(time='10min', closed='right', label='right', skipna=True).mean()[1:-1]
return da
class AWSGUReader:
'''This class read an AWS from data_5sec_con_nuevo_sensor.txt file from Glaciar Union data'''
def read_aws(self, filepath):
aws = AWS(None, None, None, None, None)
da_5sec = self.read_time_series(filepath)
da = self.resample_time_series(da_5sec)
aws.add_atmvar('T2m', da)
return aws
def read_time_series(self, filepath):
df = pd.read_csv(filepath, sep=',', header = None, skiprows=21207)
time = pd.date_range('2021-12-02 23:50:00','2021-12-04 12:19:40', freq='5s')
time = time + dt.timedelta(seconds=60*60*3)
data = df[6]
da_5sec = xr.DataArray(data.values, coords=[time.values], dims=['time'])
return da_5sec
def resample_time_series(self, da_5sec):
da = da_5sec.resample(time='10min', closed='right', label='right', skipna=True).mean()[1:-1]
return da
class AWSEFMReader:
'''This class read an AWS file from <NAME> station data'''
def read_aws(self, filepath):
aws = AWS(None, None, None, None, None)
da_min = self.read_time_series(filepath)
da = self.resample_time_series(da_min)
aws.add_atmvar('T2m', da)
return aws
def read_time_series(self, filepath):
df = pd.read_csv(filepath, sep = ',', header=None)
year = df[0].astype(str)
month = df[1].astype(str)
day = df[2].apply(lambda x: '0'+str(x))
hhmm = df[3]
time_str = year + '-' + month + '-' + day + ' ' + hhmm + ':00'
time = | pd.to_datetime(time_str, format='%Y-%m-%d %H:%M:%S') | pandas.to_datetime |
# ********************************************************************************** #
# #
# Project: FastClassAI workbecnch #
# #
# Author: <NAME> #
# Contact: <EMAIL> #
# #
# This notebook is a part of Skin AanaliticAI development kit, created #
# for evaluation of public datasets used for skin cancer detection with #
# large number of AI models and data preparation pipelines. #
# #
# License: MIT #
# Copyright (C) 2021.01.30 <NAME> #
# https://opensource.org/licenses/MIT #
# #
# ********************************************************************************** #
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os # allow changing, and navigating files and folders,
import sys
import re # module to use regular expressions,
import glob # lists names in folders that match Unix shell patterns
import random # functions that use and generate random numbers
import cv2
import numpy as np # support for multi-dimensional arrays and matrices
import pandas as pd # library for data manipulation and analysis
import seaborn as sns # advance plots, for statistics,
import matplotlib as mpl # to get some basif functions, heping with plot mnaking
import scipy.cluster.hierarchy as sch
import matplotlib.pyplot as plt # for making plots,
from PIL import Image, ImageDraw
import matplotlib.gridspec
from scipy.spatial import distance
from scipy.cluster import hierarchy
from matplotlib.font_manager import FontProperties
from scipy.cluster.hierarchy import leaves_list, ClusterNode, leaders
import graphviz # allows visualizing decision trees,
from sklearn.tree import export_graphviz
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import ParameterGrid
from sklearn.decomposition import PCA
from sklearn.dummy import DummyClassifier
from sklearn.tree import DecisionTreeClassifier # accepts only numerical data
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score
from sklearn.neighbors import KNeighborsClassifier
# Fuction, ........................................................................
# new 2020.12.13
def sklearn_grid_search(*,
# ... models and manes,
method,
grid,
run_name="",
dataset_name="",
dataset_variant="",
module_name="",
# .... input data names and paths
file_namepath_table,
file_namepath_table_dict=None,
PATH_batch_data=None, # if None, it is provided by the file
PATH_batch_labels=None, # if None, it is provided by the file
PATH_results=None, # If None, the same as the last PATH_features will be used,
# .... names used to search for subset names and save results
class_encoding,
class_decoding,
dropout_value='to_dropout',
train_subset_name="train", # because I donth have to call that train in my files,
valid_subset_name="valid", # if None, train_proportion will be used
test_subset_name_list="test", # if None, the loist is simply shorter,
train_proportion=0.7, # used only if subset_names_valid = None, by none,
unit_test=True,
# ... results and info,
store_predictions=True,
track_progres=True,
verbose=False,
):
# ......................................................................
# Set up,
# ......................................................................
# - contats
colname_with_classname_in_batch_labels_table = "classname"
# - variables,
model_ID = -1 # to start usnique counts of the models from 0
batchfile_table = file_namepath_table # legacy issue
available_subsets_in_batchfile_table = pd.Series(batchfile_table.subset_name.tolist())
# - to store the results,
dot_data_dict = dict() # decision trees stored in dot format,
model_acc_and_parameters_list = list()
model_predictions_dict = dict()
model_parameters_dict = dict() # for iterations from grid
# - dict to use values from file_namepath_table to load the data
if file_namepath_table_dict==None:
file_namepath_table_dict = {
"subset_name":'subset_name',
"batch_data_file_name":'extracted_features_file_name',
"batch_data_file_path":'extracted_features_file_path',
"batch_labels_file_name":'labels_file_name',
"batch_labels_file_path":'labels_file_path'
}
else:
pass
# - check if samples shdoul be dropped
samples_to_drop_detected=(np.array(list(class_encoding.keys()))==dropout_value).sum()>0
# ......................................................................
# SELECT DATASET NAMES
# ......................................................................
'''
Create xy_names with subset names used as train valid, test
xy names will be used to find names oF datafiles to load
in the batchfile_table
'''
# 1. train set (train) - 1st POSITION IN xy_names -
xy_names = []
xy_names.append(train_subset_name) # 1st is always the train dataset,
# check if you are asking for existing subset name
if sum(available_subsets_in_batchfile_table==train_subset_name)>=1:
requested_subsets_are_avaialble_in_batchfile_table = True
else:
requested_subsets_are_avaialble_in_batchfile_table = False
# ......................................................................
if unit_test == True:
xy_names.append(train_subset_name) # later on updated to valid,
xy_names.append(train_subset_name) # later on updatef to train
some_values_required_to_build_valid_dataset_are_missing=False # because it is not used at all
else:
# 2. validation set (valid), - 2nd POSITION IN xy_names -
if valid_subset_name!=None:
'valid subset specified externally'
xy_names.append(valid_subset_name) # second is always the valid dataset,
train_proportion=None # done here, to ensure it will be ignored later on,
# check if you are asking for existing subset name
if sum(available_subsets_in_batchfile_table==valid_subset_name)==0:
requested_subsets_are_avaialble_in_batchfile_table = False
else:
pass
else:
'valid subset will be created form train set with specific proportion'
xy_names.append("valid") # second is always the valid dataset,
# Ensure we have that value as flot betwe 0 and 1
try:
train_proportion = float(train_proportion)
except:
if verbose==True:
print(f"ERROR: train_proportion or valid_subset_name are missing !")
else:
pass
# test if you even can find validation dataset and stop the system form loading data and gridsearchibng
if train_proportion==None and valid_subset_name==None:
some_values_required_to_build_valid_dataset_are_missing = True
else:
some_values_required_to_build_valid_dataset_are_missing = False
# 3. test sets (test)
'can be more then one test set, but they must have different names eg test1, test2, ... '
if test_subset_name_list!=None:
# adapt to loo below, it the string was given with just one name,
if isinstance(test_subset_name_list, str):
test_subset_name_list = [test_subset_name_list]
# place each test names, but only if it is in batchfile_table,
for test_subset_name in test_subset_name_list:
if sum(available_subsets_in_batchfile_table==test_subset_name)==0:
'to check if you are asking for existing subset name or there is some problem'
requested_subsets_are_avaialble_in_batchfile_table = False
else:
xy_names.append(test_subset_name)
else:
pass # xy_names will be simply shorter, and no
# check if all values in xy_names are unique with exception of unit test
'otherwise loaded data will be oiverwritten in dict, and you will use them without knowing that these are same datsets'
if unit_test==True:
all_subsetnames_in_xy_names_are_unique = True # its not true, but I will modify it after loading the data in that single case
else:
all_subsetnames_in_xy_names_are_unique = len(pd.Series(xy_names).unique().tolist())==len(xy_names)
# ......................................................................
# STOP POINT AFTER SELECTING AND COMBINING DATASET NAMES
# ......................................................................
'''
this part of the code will stop any further actions, because at least one element
of information provided for dataset to load, was not correct, and it could corrupt
the results,
'''
if requested_subsets_are_avaialble_in_batchfile_table==False:
if verbose==True or track_progres==True:
print("KeyError: at least one subset name is different then subset names used in batchfile_table or was not there")
print("the operations were stopped")
pass
elif some_values_required_to_build_valid_dataset_are_missing==True:
if verbose==True or track_progres==True:
print("KeyError: train_proportion or valid_subset_name are missing")
print("the operations were stopped")
pass
elif all_subsetnames_in_xy_names_are_unique==False:
if verbose==True or track_progres==True:
print("KeyError: at least one subset name is not unique - please make sure they are unique")
print("the operations were stopped")
pass
else:
# ......................................................................
# GRID SEARCH
# ......................................................................
'''
here the data will be loaded and module constructed, and predictions made
'''
if track_progres==True or verbose==True:
print(f"\nGrid search for - {method} - with {len(grid)} params combinations: {pd.to_datetime('now')}")
print(f" method: {method}")
print(f"run_name: {run_name}")
print(f"dataset_name: {dataset_name}")
print(f"dataset_variant: {dataset_variant}")
print(f"module_name: {module_name}")
print(f"Number of combinations: {len(grid)}")
print(f"Unit test run: {unit_test}")
print("")
else:
pass
# ...
for params_i, params in enumerate(grid):
# ......................................................................
# GET PARAMS FOR ONE PARAM COMBINATION
# ......................................................................
# UPDATE MODEL ID,
model_ID +=1
if track_progres==True:
print('.', end="")
else:
pass
# SET PARAMETERS
pca_axes_nr = params["pca"]
model_params_dct = dict(zip(params["pc"],[params[x] for x in params["pc"]]))
random_state_nr = params["random_state_nr"]
# store random nr to check if you need to re-load the data,
if model_ID==0:
random_state_nr_inmemory = random_state_nr
else:
pass
# ......................................................................
# LOAD DATA
# ......................................................................
'''
Conditionally, - only if something has chnaged or it is thw 1st run,
'''
if model_ID>0 and random_state_nr_inmemory==random_state_nr and samples_to_drop_detected==False:
affix_to_info_on_loaded_datasubset = " - no loading, using copy of data from last run, conditions were unchanged"
pass
elif model_ID==0 or random_state_nr_inmemory!=random_state_nr or samples_to_drop_detected==True:
# update random nr,
random_state_nr_inmemory = random_state_nr
affix_to_info_on_loaded_datasubset = "" # just a message to knwo if the data were loaded again,
####### LOSD TRAIN; VALID AND TEST SUBSETS #########################################################
'''
in case, validation subset is created from train subset,
it will be ommitted and loaded in the next step (3)
'''
# Step 1. Create dictionaries to store data
xy_data_dct = dict()
xy_labels_dct = dict()
xy_idx_dct = dict()
# Step 2. Add data bact/labels to sx_dict
for xy_i, xy_name in enumerate(xy_names):
# get df-subset with filenames and paths
r_filter = batchfile_table.loc[:, file_namepath_table_dict["subset_name"]]==xy_name
batchfiles_to_load = pd.DataFrame(batchfile_table.loc[r_filter, :])
# ommit valid datasets it not provided separately
'''
they may not be available and must be created from train set
'''
# .... wait if there is not subset designated for valid,
if xy_i==1 and valid_subset_name==None:
pass
# .... proceed using separate source data (train and valid were already separated)
else:
# - a - load individual batches that will create one subset
'''
load and join data and batch label tables
from all batches for a given dataset
'''
for row_i, row_nr in enumerate(list(range(batchfiles_to_load.shape[0]))):
# - a.1 - find filenames and paths in the table
one_data_batch_filename = batchfiles_to_load.loc[:, file_namepath_table_dict["batch_data_file_name"]].iloc[row_nr]
one_data_batch_path = batchfiles_to_load.loc[:, file_namepath_table_dict["batch_data_file_path"]].iloc[row_nr]
# ...
one_batch_label_filename = batchfiles_to_load.loc[:, file_namepath_table_dict["batch_labels_file_name"]].iloc[row_nr]
one_batch_label_path = batchfiles_to_load.loc[:, file_namepath_table_dict["batch_labels_file_path"]].iloc[row_nr]
# - a.2 - load, and concatenate
'''
check if paths were not enforced in funciton parameters,
'''
if row_i==0:
if PATH_batch_data==None:
os.chdir(one_data_batch_path)
else:
os.chdir(PATH_batch_data)
encoded_img_batch = np.load(one_data_batch_filename)
# ......
if PATH_batch_labels==None:
os.chdir(one_batch_label_path)
else:
os.chdir(PATH_batch_labels)
batch_labels = pd.read_csv(one_batch_label_filename)
batch_labels.reset_index(drop=True, inplace=True) # to be sure :)
else:
if PATH_batch_data==None:
os.chdir(one_data_batch_path)
else:
os.chdir(PATH_batch_data)
encoded_img_batch = np.r_[encoded_img_batch, np.load(one_data_batch_filename)]
# ......
if PATH_batch_labels==None:
os.chdir(one_batch_label_path)
else:
os.chdir(PATH_batch_labels)
batch_labels = pd.concat([batch_labels, pd.read_csv(one_batch_label_filename)], axis=0)
batch_labels.reset_index(drop=True, inplace=True)
# - b - Add loaded data to dict
if unit_test==False:
xy_data_dct[xy_name] = encoded_img_batch
xy_labels_dct[xy_name] = batch_labels
xy_idx_dct[xy_name] = np.arange(batch_labels.shape[0], dtype="int")
else:
'''
assign names to dict which using unit test=True
it is because xy_names in case of unit test, are [train, train, train]
ie only one set would be loaded and saved, and no transformations later on possible
'''
unit_set_xy_names = ["train", "valid", "test"]
xy_data_dct[unit_set_xy_names[xy_i]] = encoded_img_batch
xy_labels_dct[unit_set_xy_names[xy_i]] = batch_labels
xy_idx_dct[unit_set_xy_names[xy_i]] = np.arange(batch_labels.shape[0], dtype="int")
####### CREATE VALID DATASETS FROM TRAIN SET #########################################################
# Step 3. create valid dataset from train data if necessarly,
if unit_test==False:
if valid_subset_name==None and train_proportion!=None:
# Split data into train/test sets
xy_data_dct[train_subset_name], xy_data_dct["valid"], xy_labels_dct[train_subset_name], xy_labels_dct["valid"] = train_test_split(
xy_data_dct[train_subset_name], xy_labels_dct[train_subset_name],
train_size=train_proportion,
test_size=(1-train_proportion),
random_state=random_state_nr
)
# get xy_idx to identify raw images in train/valid datasets,
_, _, xy_idx_dct[train_subset_name], xy_idx_dct["valid"] = train_test_split(
xy_idx_dct[train_subset_name], np.arange(xy_idx_dct[train_subset_name ].shape[0], dtype="int"),
train_size=train_proportion,
test_size=(1-train_proportion),
random_state=random_state_nr # Caution, random_state_nr must be the same as in the above,
)
else:
pass
else:
pass
####### Correct subset names for unit test #########################################################
# Step 4. Update xy_names
if unit_test == False:
xy_names_loaded = xy_names.copy()
else:
xy_names_loaded = ["train", "valid", "test"] # otherwise it is only train, train, train
# ......................................................................
# remove classes that shodul be dropped out (its not the mask - work only for entire classes)
# ......................................................................
if samples_to_drop_detected==True:
info_on_nr_of_dropped_items = dict()
for ii, xy_name in enumerate(xy_names_loaded):
# find indexes of samples that can be used
idx_without_dropped_samples = np.where(xy_labels_dct[xy_name].loc[:,colname_with_classname_in_batch_labels_table]!=dropout_value)[0]
# update each array/df
'''
data and idx == array, labels==pd.df)
'''
dataarr_before_the_drop = xy_data_dct[xy_name].shape[0]
# ...
xy_data_dct[xy_name] = xy_data_dct[xy_name][idx_without_dropped_samples]
xy_idx_dct[xy_name] = xy_idx_dct[xy_name][idx_without_dropped_samples]
xy_labels_dct[xy_name] = pd.DataFrame(xy_labels_dct[xy_name].iloc[idx_without_dropped_samples,:])
# ...
info_on_nr_of_dropped_items[xy_name]=f"dropped {dataarr_before_the_drop-xy_data_dct[xy_name].shape[0]} items"
else:
pass
# ......................................................................
# INFO AFTER LOADING THE LOAD DATA
# ......................................................................
# info
if verbose==True:
print(f"\n /-/ {params_i} /-/ params combination:")
print(f"- {params}")
else:
pass
# parht of info that I wish to see even when just tracking the progres
if verbose==True or (track_progres==True and model_ID==0):
print(f"- DATA SUBSETS LOADED: {affix_to_info_on_loaded_datasubset}")
for ii, xy_name in enumerate(xy_names_loaded):
if samples_to_drop_detected==True:
message_on_dropped_samples = f' -//- {info_on_nr_of_dropped_items[xy_name]}'
else:
message_on_dropped_samples = ""
# .....
if unit_test==True:
if ii==0:
print(f" . {xy_name}: {xy_data_dct[xy_name].shape}{message_on_dropped_samples}")
else:
print(f" . {xy_name}: {xy_data_dct[xy_name].shape} - unit test - copy of train set{message_on_dropped_samples}")
else:
if ii==1 and train_proportion!=None:
print(f" . {xy_name}: {xy_data_dct[xy_name].shape} - CREATED FROM TRAIN SET ({np.round(1-train_proportion, 3)} of train set){message_on_dropped_samples}")
else:
print(f" . {xy_name}: {xy_data_dct[xy_name].shape}{message_on_dropped_samples}")
else:
pass
# ......................................................................
# DATA PREPROCESSING
# ......................................................................
# copy to make tranformation without reloading data
xy_data_dct_final = xy_data_dct.copy()
# correction
'set train dataset name'
if unit_test==True:
train_subset_name_used_in_xy_names = "train"
else:
train_subset_name_used_in_xy_names = train_subset_name
##### STEP 1. PCA,....................................................
if pca_axes_nr!=0:
# Train PCA model and use it to tranfomr data
pca = PCA(n_components=pca_axes_nr) # it will use max nr of components == nr of features in dataset !
pca.fit(xy_data_dct_final[train_subset_name], y=None) # Unsupervised learning, no y variable
# ...
for xy_name in xy_names_loaded:
xy_data_dct_final[xy_name] = pca.transform(xy_data_dct_final[xy_name])
else:
pass
##### STEP 2. encode batch_labels,...................................................
xy_labels_dct_encoded = dict()
for xy_name in xy_names_loaded:
xy_labels_dct_encoded[xy_name] = xy_labels_dct[xy_name].classname.map(class_encoding)
# ......................................................................
# BASELINE
# ......................................................................
# Create Most frequet baseline,
dummy = DummyClassifier(strategy='most_frequent')
dummy.fit(xy_data_dct_final[train_subset_name_used_in_xy_names].astype(np.float), xy_labels_dct_encoded[train_subset_name_used_in_xy_names].astype(int))
# ..
baseline_acc = dict()
for xy_name in xy_names_loaded:
baseline_acc[f"baseline_acc_{xy_name}"] = dummy.score(xy_data_dct_final[xy_name], xy_labels_dct_encoded[xy_name])
if verbose==True:
print("- RESULTS:")
print(" - ", model_ID, baseline_acc)
else:
pass
# ......................................................................
# SKLEARN MODELS
# ......................................................................
'these are selected models, in the future I woudl liek make this section much more advanced'
###### STEP 1. select the model ..............................................
if method=="knn":
model = KNeighborsClassifier(algorithm='brute', n_jobs=-1, **model_params_dct)
elif method=="svm":
model = SVC(random_state=random_state_nr, probability=True, **model_params_dct)
# enable probability estimates prior to calling fit - slows down the alg.
elif method=="logreg":
LogisticRegression(multi_class='ovr', solver='liblinear', **model_params_dct)
# last time worked better with scaling, but this time scaling was not implemented in data pre-processing
elif method=="random_forest":
model = RandomForestClassifier(random_state=random_state_nr, **model_params_dct)
##### STEP 2. fit classifier ..............................................
model.fit(xy_data_dct_final[train_subset_name_used_in_xy_names].astype(np.float),
xy_labels_dct_encoded[train_subset_name_used_in_xy_names].astype(int))
##### STEP 3. get accuracy results ..............................................
model_acc = dict()
for xy_name in xy_names_loaded:
model_acc[f"model_acc_{xy_name}"] = model.score(xy_data_dct_final[xy_name], xy_labels_dct_encoded[xy_name])
if verbose==True:
print(" - ", model_ID, model_acc)
else:
pass
#### STEP 4.
knn_kneighbors = dict()
if method=="knn":
for xy_name in xy_names_loaded:
knn_distances = model.kneighbors(xy_data_dct_final[xy_name], 10)
knn_kneighbors[xy_name] = knn_distances
else:
for xy_name in xy_names_loaded:
knn_kneighbors[xy_name] = None
# ......................................................................
# COLLECT THE RESULTS
# ......................................................................
'''
acc_restuls_and_params were added to all objects in case
I woudl have some dounbts about results origine
'''
# collect acc_restuls_and_params
acc_restuls_and_params = {
"model_ID": model_ID,
"run_name": run_name,
"method": method,
"dataset_name": dataset_name,
"dataset_variant": dataset_variant,
"module": module_name,
"unit_test":unit_test,
# ....
**baseline_acc,
**model_acc,
**params,
"pca_components_used":pca_axes_nr # legacy,
}
model_acc_and_parameters_list.append(acc_restuls_and_params) # in list, so it can be used as pd.df immediately,
# store params,
"to allow future runs"
model_parameters_dict[model_ID] = {
"acc_restuls_and_params": acc_restuls_and_params, # when added, I can easily control what I am selecting in other
"para ms":params
}
# Collect Model predictions,
if store_predictions==True:
one_model_predictions = dict()
for xy_name in xy_names_loaded:
# make predictions and decode them,
predictions = model.predict(xy_data_dct_final[xy_name])
decoded_predictions = pd.Series(predictions).map(class_decoding).values
model_predictions_proba = model.predict_proba(xy_data_dct_final[xy_name])
decoded_y_labels = | pd.Series(xy_labels_dct_encoded[xy_name]) | pandas.Series |
import os
from src.processing import Processing
import pandas as pd
import re
from sklearn.decomposition import PCA
# counts triplets and number of nucleic acids in k-mer
# df: given dataframe
# all_triplets: all 64 triplet combinations of A,C,G,T
def fillDataFrame(df, all_triplets):
alphabet = ['A', 'C', 'G', 'T']
top_list_df = df.copy()
del top_list_df['File']
# add columns
for b in alphabet:
top_list_df[b] = 0
for tpl in all_triplets:
top_list_df[tpl] = 0
# counts nucleotides in k-mer
for b in alphabet:
top_list_df[b] = [kmer.upper().count(b) for kmer in top_list_df.index.tolist()]
# counts triplets in k-mer
for trpl in all_triplets:
top_list_df[trpl] = [sum(1 for _ in re.finditer('(?={})'.format(trpl), kmer.upper())) for kmer in
top_list_df.index.tolist()]
return top_list_df
# inherits from process
class KMerPCAData(Processing):
def __init__(self, data, selected, k, peak, top, feature, cmd, secStruct_data, no_sec_peak):
super().__init__(data, selected, k, peak, top, feature, cmd, secStruct_data, no_sec_peak)
# processes data to display pca as scatterplot
def processData(self):
pca_dimension = 2
top_kmer = self.getTopKmer()
all_triplets = self.getAllTriplets()
file_name1 = os.path.basename(self.getProfileObj1().getName()) # get filenames
file_name2 = os.path.basename(self.getProfileObj2().getName())
top_list_file1 = top_kmer.query('File==@file_name1') # get top k-mer
top_list_file2 = top_kmer.query('File==@file_name2') # get top k-mer
pca = PCA(n_components=pca_dimension)
pca_df1 = None
pca_df2 = None
top_list_df1 = None
top_list_df2 = None
# create dataframe
if len(top_list_file1) > 1:
try:
top_list_df1 = fillDataFrame(top_list_file1, all_triplets) # fill remaining data
pca_data1 = pca.fit_transform(top_list_df1)
pca_df1 = pd.DataFrame(data=pca_data1, columns=['PC1', 'PC2'], index=top_list_df1.index)
except ValueError:
pca_df1 = None
if len(top_list_file2) > 1:
try:
top_list_df2 = fillDataFrame(top_list_file2, all_triplets)
pca_data2 = pca.fit_transform(top_list_df2)
pca_df2 = | pd.DataFrame(data=pca_data2, columns=['PC1', 'PC2'], index=top_list_df2.index) | pandas.DataFrame |
# %%
#######################################
def firefox_urls_visited(places_sqlite: str):
"""For a given 'places.sqlite' database path for the Firefox browser, returns a pandas.DataFrame containing the human-readable 'last_visit_date', 'visi_count', and 'url'.
Examples:
>>> results = firefox_urls_visited('places.sqlite')\n
>>> results.head()\n
last_visit_date visit_count url\n
0 2021-05-15 09:01:10.772497 1 http://www.sans.org/\n
1 2021-05-15 09:01:10.871628 1 https://www.sans.org/\n
2 2021-05-15 09:01:14.508394 1 https://www.sans.org/account/login\n
3 2021-05-15 09:01:33.163270 2 https://www.sans.org/account/loginsso\n
4 2021-05-15 09:01:15.093349 1 https://idp.sans.org/simplesaml/saml2/idp/SSOS...\n
References:
# Good reference for what .sqlite files contain various artifacts\n
https://www.foxtonforensics.com/browser-history-examiner/firefox-history-location\n
# Helpful in showings ways to change column position\n
https://sparkbyexamples.com/pandas/pandas-change-position-of-a-column/\n
# Some good info and testing, I got the idea of using list comps from here\n
https://towardsdatascience.com/apply-function-to-pandas-dataframe-rows-76df74165ee\n
# Good info on assignment and the SettingWithCopyWarning\n
https://realpython.com/pandas-settingwithcopywarning/\n
Args:
places_sqlite (str): Reference the relative path of the 'places.sqlite' file.
Returns:
pandas.DataFrame: Returns a DataFrame.
"""
import pandas
import datetime
import sqlite3
db_conn = sqlite3.connect(places_sqlite)
table_name = 'moz_places'
cursor = db_conn.execute(f"select * from '{table_name}';")
column_headers = list(map(lambda x:x[0], cursor.description))
row_contents = [e for e in cursor]
data_frame = pandas.DataFrame(row_contents, columns=column_headers)
listcomp_last_visit_date = [ datetime.datetime.fromtimestamp(x / 1000000) for x in data_frame['last_visit_date'] ]
temp_series = | pandas.Series(listcomp_last_visit_date) | pandas.Series |
import pandas as pd
import numpy as np
import tensorflow as tf
from datetime import datetime
from influxdb_client import InfluxDBClient, Point, WritePrecision
from influxdb_client.client.write_api import SYNCHRONOUS
from keras.models import load_model
import os
import time
token = os.environ.get("INFLUXDB_TOKEN")
org = os.environ.get("INFLUXDB_ORGANIZATION")
bucket = os.environ.get("INFLUXDB_BUCKET")
client = InfluxDBClient(url="http://localhost:8086", token=token)
measurement = 'a2_rssi'
field = 'signalStrength'
model_rssi=load_model('rssi_forcast.h5')
model_events=load_model('model_3_multi.h5')
def getDataPoints(count):
query = f'from(bucket: "{bucket}") |> range(start: -{count}s) |> filter(fn: (r) => r._measurement == "{measurement}" and r._field == "{field}")'
data = { 'timestamp': [], field: [] }
for record in client.query_api().query(query, org=org)[0].records:
data['timestamp'].append(record.values.get('_time'))
data[field].append(record.values.get('_value'))
return data;
def updateDataPoints(df):
write_api = client.write_api(write_options=SYNCHRONOUS)
sequence = []
for index, row in df.iterrows():
forecast_date = row['forecasted_dates']
event = row['disruption_event']
rssi = row['forcasted_rssi']
ticks = row['timestamp'].value
sequence.append(f'disruption_event={event} forecasted_rssi={rssi} predicted_dates={forecast_date} {ticks}')
write_api.write(bucket, org, sequence)
def annotate_event(out_predict):
if out_predict==0:
return 'No distruption'
elif out_predict==1:
return 'Hardwarefehler Bediengeraet'
elif out_predict==2:
return 'Hardwarefehler Verteiler'
elif out_predict==3:
return 'Position unverifiziert (Eichung falsch)'
elif out_predict==4:
return 'Keine Linienleitertelegramme empfangen'
elif out_predict==5:
return 'Zwangsbremse wurde aktiviert'
elif out_predict==6:
return 'Position unbekannt (ZSI0)'
elif out_predict==7:
return 'Stoerung: Zwangsbremse wurde aktiviert'
def predict(num_prediction, df, model_rssi,model_events):
look_back=15
prediction_list = df[-look_back:]
disrupution_events=[]
for _ in range(num_prediction):
x = prediction_list[-look_back:]
x = x.reshape((1, look_back, 1))
x_2=prediction_list[-10:]
x_2 = x_2.reshape((1, 10, 1))
out = model_rssi.predict(x)[0][0]
prediction_list = np.append(prediction_list, out)
out = np.argmax(model_events.predict(x_2),axis=1)
disrupution_events.append(annotate_event(out))
prediction_list = prediction_list[look_back-1:]
return prediction_list,disrupution_events
def predict_dates(num_prediction,df):
last_date = df['DateTime'].values[-1]
prediction_dates = | pd.date_range(last_date, periods=num_prediction+1) | pandas.date_range |
import pandas as pd
import urllib.request
import traceback
from backend.common import *
DATA_PATH = f'{get_root()}/data/world.xlsx'
def consolidate_country_col(df, country_col, country_id_col, covid_df):
"""
This method adjusts the values in the country field of the passed DF
so that the values are matching those in the covid_DF whenever possible,
so that we can subsequently join them on the country field.
"""
covid_countries = covid_df[['country_id', 'country']].drop_duplicates()
covid_countries['country_lower'] = covid_countries['country'].str.lower()
covid_countries['country_id_lower'] = covid_countries['country_id'].str.lower()
df = df.rename(columns={
country_col: 'country_other',
country_id_col: 'country_id_other',
})
df['country_other_lower'] = df['country_other'].str.lower()
df['country_id_other_lower'] = df['country_id_other'].str.lower()
def _take_first_non_null_col(_df, _cols):
return _df[_cols].fillna(method='bfill', axis=1).iloc[:, 0]
def _consolidate_on(_df, col):
_join_df = covid_countries.set_index(f'{col}_lower')
_df = _df.join(_join_df, on=f'{col}_other_lower')
_df['country_other'] = _take_first_non_null_col(_df, ['country', 'country_other'])
for c in _join_df.columns:
del _df[c]
return _df
df = _consolidate_on(df, 'country_id')
df = _consolidate_on(df, 'country')
df = df[df['country_other'].isin(covid_countries['country'])]
del df['country_id_other']
del df['country_other_lower']
del df['country_id_other_lower']
df = df.rename(columns={
'country_other': 'country'
})
return df
def get_google_mobility_df(covid_df):
url = 'https://www.gstatic.com/covid19/mobility/Global_Mobility_Report.csv'
df = pd.read_csv(url, nrows=1)
dtypes = {col: 'float' if col.endswith('baseline') else 'object' for col in df.columns}
df = | pd.read_csv(url, dtype=dtypes) | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 7 11:28:15 2021
@author: elinewby
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 13 19:46:31 2020
@author: elinewby
"""
import biolqm
import numpy as np
import pandas as pd
import itertools
def calculateToandAwayControl(netName, subsets, wtSims = 10, subsetSims = 10, seed = 0, verbose = False, write = False, writePath = 'ControlVals.xlsx'):
def writePerturbation(subsets):
perturbations = []
for subset in subsets:
nodes = [n.strip() for n in subset.split(',')]
for i in range(2**len(nodes)):
form = "{0:0"+str(len(nodes))+"b}"
pertVals = list(form.format(i))
perturbation = [nodes[j]+'%'+str(pertVals[j]) for j in range(len(nodes))]
if(not (perturbation in perturbations) and len(perturbation) == len(nodes)):
perturbations.append(perturbation)
return perturbations
def willWrite(path):
pd.set_option('display.max_colwidth', -1)
ResultFile = open(path, "w")
ResultFile.write(attDF.transpose().to_string())
ResultFile.write("\n\n\n")
ResultFile.write(resultHamming.transpose().to_string())
ResultFile.close()
lqm = biolqm.load(netName)
nodes = [n.toString() for n in lqm.getComponents()]
if(len(biolqm.fixpoints(lqm)) < len(biolqm.trapspaces(lqm))):
trap = biolqm.trapspace(lqm)
isComplex = True
else:
trap = biolqm.fixpoints(lqm)
isComplex = False
attDF = pd.DataFrame([s for s in trap])
atts = attDF.values.astype(float)
nodesResultOrder = list(attDF.columns)
nonFixed = {i:[] for i in range(len(attDF))}
if(isComplex):
for i in range(len(atts)):
for j in range(len(atts[i])):
if(atts[i][j] > 1):
nonFixed[i].append(nodesResultOrder[j])
atts[i][j] = 0.5
attTypes = {}
attTypes['Type 0'] = [0]
N = len(nodes)
numAtts = len(atts)
numTypes = 1
for i in range(1,len(atts)):
typeFound = False
for j in range(numTypes):
attType = 'Type ' + str(j)
minDistance = 1
for k in range(len(attTypes[attType])):
aNum = attTypes[attType][k]
distance = np.sum(abs(atts[aNum] - atts[i]))/len(nodes)
if(distance < minDistance):
minDistance = distance
if(minDistance <= 0.15 and not typeFound):
attTypes[attType].append(i)
typeFound = True
if(not typeFound):
attTypes['Type ' + str(numTypes)] = [i]
numTypes += 1
if(numTypes == 1):
attTypes = {}
for i in range(len(atts)):
attTypes['Type ' + str(i)] = [i]
combos = list(itertools.combinations(attTypes.keys(),2))
diffs = np.zeros(len(combos))
for i in range(len(combos)):
type1 = combos[i][0]
type2 = combos[i][1]
att1 = atts[attTypes[type1][0]]
att2 = atts[attTypes[type2][0]]
diffs[i] = np.sum(abs(att1-att2))
#Test Subsets
pertNodes2 = [[]]
pertNodes2.extend(writePerturbation(subsets))
resultTableHamming = {}
resultDict = {}
np.random.seed(seed)
pertAttTargets = {}
goodTo = {}
goodAway = {}
for subset in subsets:
goodTo[subset] = False
goodAway[subset] = False
for perts in pertNodes2:
if(perts == []):
numSims = wtSims
else:
numSims = subsetSims
result = np.zeros(numAtts)
isBad = False
perturbation = ""
subset = ""
for a in perts:
node = a.split('%')[0]
subset += node+', '
perturbation += a
subset = subset[:-2]
pertAttTargets[perturbation] = []
for i in range(numAtts):
isTo = True
for p in perts:
node, val = p.split('%')
val = int(val)
if(attDF[node][i] != val):
isTo = False
pertAttTargets[perturbation].append(isTo)
if(sum(pertAttTargets[perturbation]) == numAtts):
isBad = True
if(sum(pertAttTargets[perturbation]) < numAtts):
goodAway[subset] = True
if(sum(pertAttTargets[perturbation]) > 0):
goodTo[subset] = True
if(len(perts) == 0):
isBad = False
if(isBad):
if(verbose):
print()
print(perts,"Bad Intervention")
resultTableHamming[perturbation] = []
names = []
for i in range(numAtts):
resultTableHamming[perturbation].append(np.round(result[i], decimals = 3))
if(i == numAtts):
names.append(" Middle")
else:
names.append(" Attractor " + str(i))
resultDict[perturbation] = [0,0]
continue
L = len(perts)
pertNodeIndex = []
if(verbose):
print("\nPerturbing ", perts,"\n")
pert = lqm
if(int(L)>=1):
for n in perts:
pert = biolqm.perturbation(pert, n)
pertNode = n.split('%')[0]
pertNodeIndex.append(nodesResultOrder.index(pertNode))
pertFps = biolqm.fixpoints(pert)
pertAtts2 = pd.DataFrame([s for s in pertFps])
pertAtts = pertAtts2.values
pertIsComplex = False
if(len(biolqm.fixpoints(pert)) < len(biolqm.trapspaces(pert))):
pertTrap = biolqm.trapspace(pert)
pertIsComplex = True
else:
pertTrap = biolqm.fixpoints(pert)
pertIsComplex = False
pertAtts2 = pd.DataFrame([s for s in pertTrap])
pertAtts = pertAtts2.values
pertNonFixed = {i:[] for i in range(len(pertAtts))}
if(pertIsComplex):
for i in range(len(pertAtts)):
for j in range(len(atts[0])):
if(pertAtts[i][j] > 1):
pertNonFixed[i].append(nodesResultOrder[j])
for i in range(numSims):
if(verbose):
if(numSims < 100):
print("{0:2.0f}".format(i/numSims*100),"%")
else:
if(i%(numSims/100)==0):
print("{0:2.0f}".format(i/numSims*100),"%")
initial1 = np.random.randint(0,2**(N-10))
initial2 = np.random.randint(0,2**10)
form = "{0:0"+str(N-10)+"b}"
initString1 = form.format(initial1)
initString2 = "{0:010b}".format(initial2)
initString = initString1+initString2
isDone = False
steps = 0
while(not isDone and steps < 20):
hamming = np.zeros(numAtts)
hammingPerts = np.zeros(len(pertAtts))
walk = biolqm.random(pert, "-i "+initString+" -m 50 -s "+str(seed))
Data = | pd.DataFrame([s for s in walk]) | pandas.DataFrame |
"""
summaries.py
ross spicer
functions for the creation of log and summary files form model results
"""
from pandas import DataFrame, read_csv, concat
import os
import numpy as np
from importlib import import_module
from constants import mmbtu_to_kWh, mmbtu_to_gal_HF
from constants import mmbtu_to_gal_LP, mmbtu_to_Mcf, mmbtu_to_cords
from aaem.components import comp_lib
from copy import deepcopy
def building_log(coms, res_dir):
"""
creates a log for the non-residental component buildings outputs by community
pre:
coms: the run model outputs: a dictionary
{<"community_name">:
{'model':<a run driver object>,
'output dir':<a path to the given communities outputs>
},
... repeated for each community
}
res_dir: directory to save the log in
post:
a csv file "non-residential_summary.csv"log is saved in res_dir
"""
out = []
#~ print 'NRB LOG'
for c in sorted(coms.keys()):
if c.find('+') != -1 or c.find("_intertie") != -1:
continue
try:
com = coms[c]['Non-residential Energy Efficiency']
#~ print coms[c]['community data'].get_section('Non-residential Energy Efficiency')
types = coms[c]['community data'].get_item('Non-residential Energy Efficiency',
"consumption estimates").index
estimates = deepcopy(com.comp_specs["building inventory"]).fillna(0)
estimates = estimates.set_index('Building Type')
estimates = estimates.astype(float)
#~ print estimates
num = 0
try:
if 'Average' in set(estimates.ix['Average'].index):
num = len(estimates.ix['Average'])
else:
num = 1
except KeyError:
pass
estimates = estimates.groupby(estimates.index).sum()
try:
estimates.ix["Unknown"] = estimates.ix["Average"]
estimates = estimates[estimates.index != "Average"]
except KeyError:
pass
count = []
act = []
est = []
elec = []
hf = []
#~ print types
for t in types:
if t in ['Water & Sewer',]:
continue
try:
n = 0
sf_m = np.nan
sf_e = np.nan
elec_used = np.nan
hf_used = np.nan
if t == 'Average':
n = num
sf_e = estimates['Square Feet']['Unknown']
hf_used = \
estimates['Fuel Oil']['Unknown']/mmbtu_to_gal_HF + \
estimates['Natural Gas']['Unknown']/mmbtu_to_Mcf + \
estimates['Propane']['Unknown']/mmbtu_to_gal_LP + \
estimates['HW District']['Unknown']/mmbtu_to_cords
elec_used = estimates['Electric']['Unknown']/mmbtu_to_kWh
else:
n = com.buildings_df['count'][t]
sf_e = estimates['Square Feet'][t]
hf_used = \
estimates['Fuel Oil'][t]/mmbtu_to_gal_HF + \
estimates['Natural Gas'][t]/mmbtu_to_Mcf + \
estimates['Propane'][t]/mmbtu_to_gal_LP + \
estimates['HW District'][t]/mmbtu_to_gal_HF +\
estimates['Biomass'][t]/mmbtu_to_cords
#~ print hf_used
elec_used = estimates['Electric'][t]/mmbtu_to_kWh
sf_m = com.buildings_df['Square Feet'][t]
except KeyError as e:
#~ print e
pass
count.append(n)
act.append(sf_m)
est.append(sf_e)
elec.append(elec_used)
hf.append(hf_used)
percent = com.buildings_df['Square Feet'].sum() /\
estimates['Square Feet'].sum()
percent2 = float(com.buildings_df['count'].sum())/\
(com.buildings_df['count'].sum()+num)
if np.isnan(percent):
percent = 0.0
if np.isnan(percent2):
percent2 = 0.0
name = c
if name == 'Barrow':
name = 'Utqiagvik (Barrow)'
out.append([name,percent*100,percent2*100]+ count+act+est+elec+hf)
except (KeyError,AttributeError, ZeroDivisionError)as e :
#~ print c +":"+ str(e)
pass
#~ print out
try:
l = [n for n in types if n not in ['Water & Sewer',]]
except UnboundLocalError:
return
c = []
e = []
m = []
ec = []
hf = []
for i in range(len(l)):
if l[i] == 'Average':
l[i] = 'Unknown'
c.append('number buildings')
m.append('square feet(measured)')
e.append('square feet(including estimates)')
ec.append("electricity used (mmbtu)")
hf.append("heating fuel used (mmbtu)")
data = DataFrame(out,columns = ['community',
'% sqft measured',
'% buildings from inventory'] + l + l + l + l + l
).set_index('community').round(2)
f_name = os.path.join(res_dir,'non-residential_building_summary.csv')
fd = open(f_name,'w')
fd.write(("# non residental building component building "
"summary by community\n"))
fd.write(",%,%," + str(c)[1:-1].replace(" '",'').replace("'",'') + "," + \
str(m)[1:-1].replace("' ",'').replace("'",'') + "," + \
str(e)[1:-1].replace("' ",'').replace("'",'') + "," +\
str(ec)[1:-1].replace("' ",'').replace("'",'') + "," +\
str(hf)[1:-1].replace("' ",'').replace("'",'') +'\n')
fd.close()
data.to_csv(f_name, mode='a')
def village_log (coms, res_dir):
"""
creates a log comparing the consumption and costs of the residential,
non-residential, and water/wastewater components
pre:
coms: the run model outputs: a dictionary
{<"community_name">:
{'model':<a run driver object>,
'output dir':<a path to the given communities outputs>
},
... repeated for each community
}
res_dir: directory to save the log in
post:
a csv file "village_sector_consumption_summary.csv"log is saved
in res_dir
"""
out = []
for c in sorted(coms.keys()):
if c.find('+') != -1 or c.find("_intertie") != -1:
continue
try:
start_year = coms[c]['community data'].get_item('community',
'current year')
#~ print coms[c]['forecast'].consumption.ix[start_year]['consumption']
consumption = \
int(coms[c]['forecast']\
.consumption.ix[start_year]['consumption'])
population = int(coms[c]['forecast'].population.ix[start_year])
try:
res = coms[c]['Residential Energy Efficiency']
res_con = [res.baseline_HF_consumption[0],
res.baseline_kWh_consumption[0] / mmbtu_to_kWh]
res_cost = [res.baseline_HF_cost[0], res.baseline_kWh_cost[0]]
except KeyError:
res_con = [np.nan, np.nan]
res_cost = [np.nan, np.nan]
try:
com = coms[c]['Non-residential Energy Efficiency']
com_con = [com.baseline_HF_consumption,
com.baseline_kWh_consumption / mmbtu_to_kWh]
com_cost = [com.baseline_HF_cost[0],com.baseline_kWh_cost[0]]
except KeyError:
com_con = [np.nan, np.nan]
com_cost = [np.nan, np.nan]
try:
ww = coms[c]['Water and Wastewater Efficiency']
ww_con = [ww.baseline_HF_consumption[0],
ww.baseline_kWh_consumption[0] / mmbtu_to_kWh ]
ww_cost = [ww.baseline_HF_cost[0],ww.baseline_kWh_cost[0]]
except KeyError:
ww_con = [np.nan, np.nan]
ww_cost = [np.nan, np.nan]
name = c
if name == 'Barrow':
name = 'Utqiagvik (Barrow)'
t = [name, consumption, population,
coms[c]['community data'].get_item('community','region')] +\
res_con + com_con + ww_con + res_cost + com_cost + ww_cost
out.append(t)
except AttributeError:
pass
start_year = 2017
data = DataFrame(out,columns = ['community', 'consumption year 1 (kWh)',
'Population', 'Region',
'Residential Heat (MMBTU)',
'Residential Electricity (MMBTU)',
'Non-Residential Heat (MMBTU)',
'Non-Residential Electricity (MMBTU)',
'Water/Wastewater Heat (MMBTU)',
'Water/Wastewater Electricity (MMBTU)',
'Residential Heat (cost ' + str(start_year)+')',
'Residential Electricity (cost ' + str(start_year)+')',
'Non-Residential Heat (cost ' + str(start_year)+')',
'Non-Residential Electricity (cost ' + str(start_year)+')',
'Water/Wastewater Heat (cost ' + str(start_year)+')',
'Water/Wastewater Electricity (cost ' + str(start_year)+')',
]
).set_index('community')
f_name = os.path.join(res_dir,'village_sector_consumption_summary.csv')
#~ fd = open(f_name,'w')
#~ fd.write("# summary of consumption and cost\n")
#~ fd.close()
data.to_csv(f_name, mode='w')
def fuel_oil_log (coms, res_dir):
"""
create a
"""
out = []
for c in sorted(coms.keys()):
if c+"_intertie" in coms.keys():
continue
try:
it = coms[c]['community data'].intertie
if it is None:
it = 'parent'
if c.find("_intertie") == -1:
res = coms[c]['Residential Energy Efficiency']
com = coms[c]['Non-residential Energy Efficiency']
wat = coms[c]['Water and Wastewater Efficiency']
else:
k = c.replace("_intertie","")
res = coms[k]['Residential Energy Efficiency']
com = coms[k]['Non-residential Energy Efficiency']
wat = coms[k]['Water and Wastewater Efficiency']
eff = coms[c]['community data'].get_item("community",
"diesel generation efficiency")
if eff == 0:
eff = np.nan
year = res.start_year
try:
elec = float(coms[c]['forecast'].generation[\
"generation diesel"][year]) / eff
except KeyError:
elec = 0
if it == 'child' or np.isnan(elec):
elec = 0
res = res.baseline_fuel_Hoil_consumption[0]
com = com.baseline_fuel_Hoil_consumption
wat = wat.baseline_fuel_Hoil_consumption [0]
total = res + com + wat + elec
name = c
if name == 'Barrow':
name = 'Utqiagvik (Barrow)'
out.append([name,elec,res,com,wat,total])
except (KeyError,AttributeError) as e:
#~ print e
pass
data = DataFrame(out,columns = ['community','Utility diesel (gallons)',
'Residential Heating oil (gallons)',
'Non-residential Heating Oil (gallons)',
'Water/wastewater heating oil (gallons)',
'Total (gallons)']
).set_index('community').round(2)
f_name = os.path.join(res_dir,'fuel_oil_summary.csv')
#~ fd = open(f_name,'w')
#~ fd.write("# fuel_oil summary by community\n")
#~ fd.close()
data.to_csv(f_name, mode='w')
def forecast_comparison_log (coms, res_dir):
"""
creates a table of results for each community comparing the forecast
consumption results and the component consumtption results
pre:
coms: the run model outputs: a dictionary
{<"community_name">:
{'model':<a run driver object>,
'output dir':<a path to the given communities outputs>
},
... repeated for each community
}
res_dir: directory to save the log in
post:
a csv file "forecast_comparsion_summary.csv"log is saved in res_dir
"""
out = []
for c in sorted(coms.keys()):
try:
it = coms[c]['community data'].intertie
if it is None:
it = 'parent'
if it == 'child':
continue
try:
it_list = coms[c]['community data'].intertie_list
it_list = [c] + list(set(it_list).difference(["''"]))
except AttributeError:
it_list = [c]
#~ print it_list
res = coms[c]['Residential Energy Efficiency']
com = coms[c]['Non-residential Energy Efficiency']
wat = coms[c]['Water and Wastewater Efficiency']
fc = coms[c]['forecast']
first_year = max([res.start_year,
com.start_year,
wat.start_year,
fc.consumption.index[0]])
res_kwh = 0
com_kwh = 0
wat_kwh = 0
for ic in it_list:
try:
ires = coms[ic]['Residential Energy Efficiency']
icom = coms[ic]['Non-residential Energy Efficiency']
iwat = coms[ic]['Water and Wastewater Efficiency']
except KeyError:
continue
res_kwh += ires.baseline_kWh_consumption[first_year - ires.start_year]
com_kwh += icom.baseline_kWh_consumption
wat_kwh += iwat.baseline_kWh_consumption[first_year - iwat.start_year]
comp_res = float(res_kwh)
comp_wat = float(wat_kwh)
comp_com = float(com_kwh)
comp_non_res = float(com_kwh + wat_kwh)
comp_total = float(com_kwh + wat_kwh + res_kwh)
#~ print fc.consumption_to_save.ix[first_year]
#~ print ""
fc_res = float(fc.consumption.ix[first_year]\
['consumption residential'])
fc_non_res = float(fc.consumption.ix[first_year]\
['consumption non-residential'])
if np.isnan(fc_non_res):
fc_non_res = 0
fc_total = float(fc.consumption.ix[first_year]\
['consumption'])
if np.isnan(fc_total):
fc_total = 0
res_diff = fc_res - comp_res
non_res_diff = fc_non_res - comp_non_res
total_diff = fc_total - comp_total
res_per = (abs(res_diff)/ (fc_res + comp_res))*100.0
non_res_per = (abs(non_res_diff)/ (fc_non_res + comp_non_res))*100.0
total_per = (abs(total_diff)/ (fc_total + comp_total))*100.0
name = c
if name == 'Barrow':
name = 'Utqiagvik (Barrow)'
out.append([name,fc_res,comp_res,res_diff,res_per,
fc_non_res,comp_com,comp_wat,comp_non_res,
non_res_diff,non_res_per,
fc_total,comp_total,total_diff,total_per])
except (KeyError,AttributeError) as e:
#~ print e
pass
data = DataFrame(out,columns = \
['community',
'Forecast (trend line) Residential Consumption [kWh]',
'Forecast (modeled) Residential Consumption [kWh]',
'Difference Residential Consumption [kWh]',
'Percent Difference Residential Consumption [%]',
'Forecast (trend line) Non-Residential Consumption [kWh]',
'Forecast (modeled) Non-Residential (non-residential) Consumption [kWh]',
'Forecast (modeled) Non-Residential (water/wastewater) Consumption [kWh]',
'Forecast (modeled) Non-Residential Consumption [kWh]',
'Difference Non-Residential Consumption [kWh]',
'Percent Difference Non-Residential Consumption [%]',
'Forecast (trend line) Total Consumption [kWh]',
'Forecast (modeled) Total Consumption [kWh]',
'Difference Total Consumption [kWh]',
'Percent Difference Total Consumption [%]']
).set_index('community').round(2)
f_name = os.path.join(res_dir,
'forecast_component_consumption_comparison_summary.csv')
#~ fd = open(f_name,'w')
#~ fd.write(("# comparison of forecast kWh consumption vs."
#~ " component kWh consumption summary by community\n"))
#~ fd.close()
data.to_csv(f_name, mode='w')
def electric_price_summary (coms, res_dir):
"""
"""
out = None
for c in sorted(coms.keys()):
#~ print c
#~ print dir(coms[c]['community data'])
try:
if c.find('+') != -1:
continue
it = coms[c]['community data'].intertie
if it is None:
it = 'parent'
if it == 'child':
continue
base_cost = float(coms[c]['community data'].get_item("community",
"electric non-fuel price"))
prices = deepcopy(coms[c]['community data'].get_item("community",
"electric prices"))
name = c
if name == 'Barrow':
name = 'Utqiagvik (Barrow)'
prices[name] = prices[prices.columns[0]]#'price']
#~ del prices[prices.columns[0]]
prices = prices.T
prices["base cost"] = base_cost
if out is None:
out = prices
else:
out = concat([out,prices])
#~ print out
except (KeyError, TypeError) as e:
#~ print e
continue
if out is None:
return
f_name = os.path.join(res_dir,
'electric_prices_summary.csv')
#~ fd = open(f_name,'w')
#~ fd.write(("# list of the electricity prices forecasted\n"))
#~ fd.close()
out.index = [i.replace('_',' ') for i in out.index]
out = out.drop_duplicates()
out[[out.columns[-1]] + out.columns[:-1].tolist()].to_csv(f_name, mode='w')
def genterate_npv_summary (coms, res_dir):
"""
generate a log of the npv results
pre:
coms: the run model outputs: a dictionary
{<"community_name">:
{'model':<a run driver object>,
'output dir':<a path to the given communities outputs>
},
... repeated for each community
}
res_dir: directory to save the log in
post:
summary may be saved
"""
for community in coms:
#~ print community
components = coms[community]
npvs = []
for comp in components:
try:
npvs.append([comp,
components[comp].get_NPV_benefits(),
components[comp].get_NPV_costs(),
components[comp].get_NPV_net_benefit(),
components[comp].get_BC_ratio()
])
except AttributeError:
pass
name = community
if name == 'Barrow':
name = 'Utqiagvik (Barrow)'
f_name = os.path.join(res_dir,community.replace(' ','_'),
community.replace(' ','_') + '_npv_summary.csv')
cols = ['Component',
community +': NPV Benefits',
community +': NPV Cost',
community +': NPV Net Benefit',
community +': Benefit Cost Ratio']
npvs = DataFrame(npvs,
columns = cols).set_index('Component')
npvs.to_csv(f_name)
def community_forcast_summaries (community, components, forecast, res_dir):
"""generate community forecast summary
"""
forecast = deepcopy(forecast)
componetns = deepcopy(components)
#~ components = coms[community]
if community.find('+') != -1:
return
#~ print community
#### electricity
data = forecast.population
data['community'] = community
#~ print list(data.columns)[::-1]
data = data[['community', 'population']]
data['population_qualifier'] = 'I'
try:
forecast.consumption.columns
except AttributeError:
return
data[forecast.consumption.columns] =\
forecast.consumption
data['generation'] = forecast.generation['generation']
data.columns = ['community',
'population',
'population_qualifier',
'residential_electricity_consumed [kWh/year]',
'electricity_consumed/generation_qualifier',
'non-residential_electricity_consumed [kWh/year]',
'total_electricity_consumption [kWh/year]',
'total_electricity_generation [kWh/year]']
data = data[['community',
'population',
'population_qualifier',
'total_electricity_consumption [kWh/year]',
'residential_electricity_consumed [kWh/year]',
'non-residential_electricity_consumed [kWh/year]',
'total_electricity_generation [kWh/year]',
'electricity_consumed/generation_qualifier']]
f_name = os.path.join(res_dir,
community.replace(' ','_') + '_electricity_forecast.csv')
with open(f_name, 'w') as s:
s.write((
'# Electricity Forecast for ' + community + '\n'
'# Qualifier info: \n'
'# M indicates a measured value\n'
'# P indicates a projected value\n'
'# I indicates a value carried over from the input data.'
' May be projected or measured see input data metadata.\n'
))
data.to_csv(f_name, mode = 'a')
#### generation ####
data = forecast.generation
data['community'] = community
data['population'] = forecast.population['population']
data['population_qualifier'] = 'I'
data['generation_qualifier'] = \
forecast.\
consumption['consumption_qualifier']
data = data[list(data.columns[-4:]) + list(data.columns[:-4])]
#~ print data
data.columns = [
'community',
'population',
'population_qualifer',
'generation_qualifer',
'generation total (kWh/year)',
'generation diesel (kWh/year)',
'generation hydro (kWh/year)',
'generation natural gas (kWh/year)',
'generation wind (kWh/year)',
'generation solar (kWh/year)',
'generation biomass (kWh/year)'
]
f_name = os.path.join(res_dir,
community.replace(' ','_') + '_generation_forecast.csv')
with open(f_name, 'w') as s:
s.write((
'# generation Forecast for ' + community + '\n'
'# Qualifier info: \n'
'# M indicates a measured value\n'
'# P indicates a projected value\n'
'# I indicates a value carried over from the input data.'
' May be projected or measured see input data metadata.\n'
))
data.to_csv(f_name, mode = 'a')
ires = components['Residential Energy Efficiency']
icom = components['Non-residential Energy Efficiency']
iwat = components['Water and Wastewater Efficiency']
#### heat demand ####
data = | DataFrame(forecast.population['population']) | pandas.DataFrame |
import os
import time
import math
import json
import hashlib
import datetime
import pandas as pd
import numpy as np
from run_pyspark import PySparkMgr
graph_type = "loan_agent/"
def make_md5(x):
md5 = hashlib.md5()
md5.update(x.encode('utf-8'))
return md5.hexdigest()
def make_node_schema(entity_name, entity_df, comp_index_properties = None, mix_index_properties = None):
properties = {"propertyKeys": []}
for col in entity_df.columns:
if entity_df[col].dtype == np.float:
prop = {"name": col, "dataType": "Float", "cardinality": "SINGLE"}
elif entity_df[col].dtype == np.integer:
prop = {"name": col, "dataType": "Integer", "cardinality": "SINGLE"}
else:
prop = {"name": col, "dataType": "String", "cardinality": "SINGLE"}
properties["propertyKeys"].append(prop)
vertexLabels = {"vertexLabels": []}
vertexLabels["vertexLabels"].append({"name": entity_name})
vertexIndexes = {"vertexIndexes": []}
if comp_index_properties is not None:
for prop in comp_index_properties:
vertexIndexes["vertexIndexes"].append({
"name" : entity_name + "_" + prop + "_comp",
"propertyKeys" : [ prop ],
"composite" : True,
"unique" : False
})
if mix_index_properties is not None:
for prop in mix_index_properties:
vertexIndexes["vertexIndexes"].append({
"name" : entity_name + "_" + prop + "_mixed",
"propertyKeys" : [ prop ],
"composite" : False,
"unique" : False,
"mixedIndex" : "search"
})
vertexIndexes["vertexIndexes"].append({
"name" : entity_name + "_graph_label_mixed",
"propertyKeys" : [ "graph_label" ],
"composite" : False,
"unique" : False,
"mixedIndex" : "search"
})
return {**properties, **vertexLabels, **vertexIndexes}
def make_node_mapper(entity_name, entity_df):
entity_file = "gra_" + entity_name + ".csv"
vertexMap = {"vertexMap": {entity_file: {}}}
vertexMap["vertexMap"][entity_file] = {
"[VertexLabel]" : entity_name
}
for col in entity_df.columns:
vertexMap["vertexMap"][entity_file][col] = col
return vertexMap
def make_vertex_centric_schema(edge_name, index_property, direction, order):
if direction not in ["BOTH", "IN", "OUT"]:
print("direction should be in {}".format(["BOTH", "IN", "OUT"]))
return None
if order not in ["incr", "decr"]:
print("order should be in {}".format(["incr", "decr"]))
return None
vertexCentricIndexes = {"vertexCentricIndexes": []}
vertexCentricIndexes["vertexIndexes"].append({
"name" : edge_name + "_" + index_property,
"edge" : edge_name,
"propertyKeys" : [ index_property ],
"order": order,
"direction": direction
})
return vertexCentricIndexes
def make_edge_schema(relation_df = None, relation_comp_index_properties = None, relation_mix_index_properties = None):
properties = {"propertyKeys": []}
relation_columns = relation_df.columns.tolist()
if "Left" not in relation_columns or "Right" not in relation_columns:
print("relation df lacks Left and Right columns ")
for col in relation_df.columns:
if col in ["Left", "Right", "Type"]:
continue
if relation_df[col].dtype == np.float:
prop = {"name": col, "dataType": "Float", "cardinality": "SINGLE"}
elif relation_df[col].dtype == np.integer:
prop = {"name": col, "dataType": "Integer", "cardinality": "SINGLE"}
else:
prop = {"name": col, "dataType": "String", "cardinality": "SINGLE"}
properties["propertyKeys"].append(prop)
relation_names = relation_df["Type"].value_counts().index.tolist()
edgeLabels = {"edgeLabels": []}
for relation in relation_names:
edgeLabels["edgeLabels"].append({
"name": relation,
"multiplicity": "MULTI",
"unidirected": False
})
edgeIndexes = {"edgeIndexes": []}
for relation_name in relation_names:
if relation_comp_index_properties is not None:
for prop in relation_comp_index_properties:
edgeIndexes["edgeIndexes"].append({
"name": relation_name + "_" + prop + "_comp",
"propertyKeys": [ prop ],
"composite": True,
"unique": False,
"indexOnly": relation_name
})
if relation_mix_index_properties is not None:
for prop in relation_mix_index_properties:
edgeIndexes["edgeIndexes"].append({
"name" : relation_name + "_" + prop + "_mixed",
"propertyKeys": [ prop ],
"composite": False,
"unique": False,
"mixedIndex": "search",
"indexOnly": relation_name
})
return {**properties, **edgeLabels, **edgeIndexes}
def make_edge_mapper(entity_relations, relation_df=None, specific_relation=None):
edgeMap = {"edgeMap": {}}
for relation_name, entity_pairs in entity_relations.items():
if specific_relation is not None and relation_name != specific_relation:
continue
for pair in entity_pairs:
relation_file = "gra_" + relation_name + ".csv"
edge = {"[edge_left]": {"Left": pair[0]},
"[EdgeLabel]": relation_name,
"[edge_right]": {"Right": pair[1]}}
if relation_df is not None:
relation_columns = relation_df.columns.tolist()
if "Left" not in relation_columns or "Right" not in relation_columns:
print("relation df lacks Left and Right columns ")
for col in relation_df.columns:
if col in ["Left", "Right", "Type"]:
continue
edge[col] = col
edgeMap["edgeMap"][relation_file] = edge
return edgeMap
def dump_schema(schema, datamapper, folder):
if not os.path.exists(graph_type + folder):
os.makedirs(graph_type + folder)
f = open(graph_type + folder + "/schema.json", 'w')
f.write(json.dumps(schema))
f.close()
f = open(graph_type + folder + "/datamapper.json", 'w')
f.write(json.dumps(datamapper))
f.close()
spark_args = {}
pysparkmgr = PySparkMgr(spark_args)
_, spark, sc = pysparkmgr.start('xubin.xu')
# 申请表
apply_loan_df = spark.sql("select * from adm.adm_credit_apply_quota_doc").toPandas()
# 支用表
zhiyong_loan_df = spark.sql("select * from adm.adm_credit_loan_apply_doc").toPandas()
zhiyong_loan_df.quota_apply_id = zhiyong_loan_df.quota_apply_id.astype("int")
# 逾期表
overdue_sql = """select
*
from adm.adm_credit_apply_quota_doc t1
--逾期关联,存在一个客户不同时间多笔申请,不同申请会对应不同的逾期状态
--当前逾期天数和历史最大逾期天数
left join
(
select
quota_apply_id,
max(overdue_days_now) as overdue_days_now,
max(his_max_overdue_days) as his_max_overdue_days
from
(
select
c4.quota_apply_id,
c3.overdue_days_now,
c3.his_max_overdue_days
from
adm.adm_credit_loan_apply_doc c4
left join
(
select
c2.business_id,
max(overdue_days_now) as overdue_days_now,
max(overdue_day_calc) as his_max_overdue_days
from
(
select
c1.*,
(case when (overdue_day_calc>0 and latest_actual_repay_date is not null) then 0 else overdue_day_calc end) as overdue_days_now
FROM adm.adm_credit_rpt_risk_overdue_bill c1
) c2
group by c2.business_id
) c3
on c4.loan_no=c3.business_id
) c5
group by quota_apply_id
) t4
on t1.quota_apply_id=t4.quota_apply_id
--首逾天数:当前首逾天数,历史最大首逾天数----------------------------------------------------------
left join
(
select
quota_apply_id,
max(fpd) as fpd,
max(fpd_ever) as fpd_ever
from
(
select
a1.*,a2.*
from
adm.adm_credit_loan_apply_doc a1
left join
(
select
c1.business_id,
(case when (overdue_day_calc>0 and latest_actual_repay_date is null) then overdue_day_calc else 0 end) as fpd,--当前首逾天数
c1.overdue_day_calc as fpd_ever--历史首逾天数
from
adm.adm_credit_rpt_risk_overdue_bill c1
where periods=1
) a2
on a1.loan_no=a2.business_id
) a3
group by quota_apply_id
) t5
on t1.quota_apply_id=t5.quota_apply_id"""
overday_df = spark.sql(overdue_sql).toPandas()
# 构建借款者实体
def make_borrower_entity():
shouxin_zhiyong_df = pd.merge(apply_loan_df, zhiyong_loan_df[
["quota_apply_id", "apply_id", "apply_status_risk", "loan_status", "loan_amount", "repayment_principal"]],
how='left', on='quota_apply_id')
borrower_basic_df = shouxin_zhiyong_df[
["name", "uus_id", "employee_no", "identity_no", "sex", "age", "zociac", "educate_level", "marital_status",
"city", "access_role", "entry_date",
"resign_date", "on_job_status", "current_working_days", "uc_job_level_name", "store_city", "apply_id",
"team_code", "shop_code", "area_code", "marketing_code", "region_code"]]
borrower = shouxin_zhiyong_df.groupby("identity_no")
borrower_ext_df = pd.DataFrame([], columns=["identity_no", "累计贷款笔数", "未结清贷款笔数", "累计贷款金额", "当前贷款余额"])
idx = 0
for group, df in borrower:
loans_cnt = df[(~pd.isnull(df.apply_id)) & (df.apply_status_risk_y == "放款成功")].apply_id.count()
unclosed_loans_cnt = df[(~pd.isnull(df.apply_id)) & (df.apply_status_risk_y == "放款成功") & (
df.loan_status == "REPAYING")].apply_id.count()
loans_amt = df[(~pd.isnull(df.apply_id)) & (df.apply_status_risk_y == "放款成功")].loan_amount_y.sum()
unpayed_amt = loans_amt - df[
(~pd.isnull(df.apply_id)) & (df.apply_status_risk_y == "放款成功")].repayment_principal.sum()
borrower_ext_df.loc[idx] = {"identity_no": group, "累计贷款笔数": loans_cnt, "未结清贷款笔数": unclosed_loans_cnt,
"累计贷款金额": loans_amt, "当前贷款余额": unpayed_amt}
idx += 1
borrower_basic_df.drop_duplicates(borrower_basic_df.columns, keep='first', inplace=True)
borrower_entity_df = pd.merge(borrower_basic_df, borrower_ext_df, on="identity_no")
borrower_entity_df = borrower_entity_df.fillna(0)
overday_gp = overday_df[(~pd.isnull(overday_df.overdue_days_now))].groupby("identity_no")["overdue_days_now"].max()
overday_now_df = pd.DataFrame({"identity_no": overday_gp.index, "overdue_days_now": overday_gp.values})
borrower_entity_df = pd.merge(borrower_entity_df, overday_now_df, how="left", on="identity_no")
his_overday_gp = overday_df[(~pd.isnull(overday_df.his_max_overdue_days))].groupby("identity_no")[
"his_max_overdue_days"].max()
his_overday_df = pd.DataFrame({"identity_no": his_overday_gp.index, "his_max_overdue_days": his_overday_gp.values})
borrower_entity_df = pd.merge(borrower_entity_df, his_overday_df, how="left", on="identity_no")
borrower_entity_df = borrower_entity_df.fillna(0)
borrower_entity_df["tag"] = ""
for idx in borrower_entity_df.index:
max_overday = borrower_entity_df.loc[idx, "overdue_days_now"]
his_max_overday = borrower_entity_df.loc[idx, "his_max_overdue_days"]
loan_amt = borrower_entity_df.loc[idx, "累计贷款金额"]
job_status = borrower_entity_df.loc[idx, "on_job_status"]
tag = borrower_entity_df.loc[idx, "tag"]
if his_max_overday > 90:
tag = tag + ",坏客户"
if max_overday > 30:
tag = tag + ",首逾30+"
if job_status == "离职":
tag = tag + ",离职"
if loan_amt > 0:
tag = tag + ",放款"
else:
tag = tag + ",未放款"
p = tag.find(",")
if p == 0:
tag = tag[1:]
borrower_entity_df.loc[idx, "tag"] = tag
borrower_entity_df.drop(["apply_id"], axis=1, inplace=True)
borrower_entity_df.drop_duplicates(borrower_entity_df.columns, inplace=True)
return borrower_entity_df
borrower_entity_df = make_borrower_entity()
borrower_entity_df.columns = ["姓名", "uus_id", "员工号", "身份证号", "性别", "年龄", "星座", "教育程度", "婚姻状态", "城市", "角色", "入职日期",
"离职日期",
"当前在职状态", "当前在职天数", "当前职级", "门店所在城市", "team_code", "shop_code", "area_code",
"marketing_code", "region_code",
"累计贷款笔数", "未结清贷款笔数", "累计贷款金额", "当前贷款余额", "当前逾期天数", "历史最大逾期天数", "tag"]
# 构建联系人实体
def make_contact_entity():
contact_df = spark.sql("select * from credit_loan_api_service.personal_contact_info").toPandas()
contact_df = contact_df[contact_df.product_id == "ELOAN_AGENT"]
contact_df = contact_df[["contact_name", "contact_way", "contact_relationship", "uid"]]
contact_df.columns = ["姓名", "联系方式", "关系", "uid"]
contact_df.drop_duplicates(contact_df.columns, inplace=True)
return contact_df
contact_entity_df = make_contact_entity()
contact_entity_df["ext_id"] = contact_entity_df["姓名"] + contact_entity_df["联系方式"] + contact_entity_df["关系"] + \
contact_entity_df["uid"]
contact_entity_df.ext_id = contact_entity_df.ext_id.apply(lambda x: make_md5(x))
# 构建地址实体
def make_address_entity():
address_df = spark.sql("select * from credit_loan_api_service.credit_personal_info").toPandas()
address_df = address_df[address_df.product_id == "ELOAN_AGENT"]
address_df = address_df[["address", "province", "city", "district", "uid"]]
address_df.columns = ["地址", "省份", "城市", "区", "uid"]
address_df.drop_duplicates(address_df.columns, inplace=True)
return address_df
address_entity_df = make_address_entity()
# 构建手机实体
def make_phone_entity():
phones_df = apply_loan_df[["uus_id", "telephone"]]
phones_df = pd.concat([phones_df, zhiyong_loan_df[["uus_id", "telephone"]]])
phones_df = pd.merge(borrower_entity_df[["uus_id"]], phones_df, how="left", on="uus_id")
phones_df = phones_df[~pd.isnull(phones_df.telephone)]
phones_df["tag"] = "借款人"
contact_phones_df = contact_entity_df[["uid", "联系方式"]]
contact_phones_df.rename(columns={"uid": "uus_id", "联系方式": "telephone"}, inplace=True)
contact_phones_df = contact_phones_df[~pd.isnull(contact_phones_df.telephone)]
contact_phones_df["tag"] = "联系人"
phones_df = pd.concat([phones_df, contact_phones_df])
phones_df.rename(columns={"telephone": "手机号"}, inplace=True)
phones_df.drop_duplicates(phones_df.columns, keep='first', inplace=True)
return phones_df
phones_entity_df = make_phone_entity()
# 构建团队,门店,区域,市场,大区实体
def build_teams(code):
team_gp = borrower_entity_df.groupby(code)
team_df = pd.DataFrame([], columns=["编号", "名称", "放款总人数", "放款总金额", "当前总贷款余额", "总坏客户人数"])
idx = 0
for group, df in team_gp:
loan_cnt = df[df["累计贷款笔数"] > 0]["累计贷款笔数"].count()
loan_amt = df["累计贷款金额"].sum()
unpaid_amt = df["当前贷款余额"].sum()
bad_cnt = df[df.tag.str.contains("坏客户")]["身份证号"].count()
team_df.loc[idx] = {"编号": group, "名称": "", "放款总人数": loan_cnt, "放款总金额": loan_amt,
"当前总贷款余额": unpaid_amt, "总坏客户人数": bad_cnt}
idx += 1
team_df.drop_duplicates(team_df.columns, inplace=True)
return team_df
def make_shop_entity():
shop_df = build_teams("shop_code")
shop_df = shop_df[(shop_df["编号"].str.strip().str.len() > 0) & (shop_df["编号"]!=0)]
shop_address_df = spark.sql("select shop_id, shop_code, shop_name, address, city_name from spark_dw.dw_ke_bkjf_shh_house_shop_base_da").toPandas()
shop_df = pd.merge(shop_df, shop_address_df[["shop_code", "shop_name", "address", "city_name"]],
how = "left", left_on="编号", right_on="shop_code")
shop_df["名称"] = shop_df.shop_name
shop_df.drop(["shop_name", "shop_code"], axis=1, inplace=True)
shop_df.rename(columns={"address": "地址", "city_name": "城市"}, inplace=True)
shop_df.drop_duplicates(shop_df.columns, inplace=True)
return shop_df
def make_group_entity(group):
team_df = build_teams(group + "_code")
team_df = team_df[(team_df["编号"].str.strip().str.len() > 0) & (team_df["编号"]!=0)]
tmp_df = apply_loan_df[[group + "_code", group + "_name"]]
team_df = pd.merge(team_df, tmp_df, how="left", left_on="编号", right_on=group + "_code")
team_df["名称"] = team_df[group + "_name"]
team_df.drop([group + "_code", group + "_name"], axis=1, inplace=True)
team_df.drop_duplicates(team_df.columns, inplace=True)
return team_df
team_df = make_group_entity("team")
team_df['tag'] = np.where(team_df['总坏客户人数'] > 1, '高风险组', '正常组')
shop_entity_df = make_shop_entity()
shop_entity_df['tag'] = np.where(shop_entity_df['总坏客户人数'] > 2, '高风险门店', '正常门店')
area_df = make_group_entity("area")
marketing_df = make_group_entity("marketing")
region_df = make_group_entity("region")
# 构建设备ip实体
def make_device_ip():
ip_df = spark.sql("""select ip, udid, union_id, event_time from credit_biz_metrics.device_fingerprint
where date(event_time)>=date('2020-08-24') and udid!='2408c710977177815f01fbc344dedc8b'""").toPandas()
ip_df.sort_values(by="event_time", inplace=True)
ip_df.drop_duplicates(list(set(ip_df.columns).difference({"event_time"})), keep='first', inplace=True)
return ip_df
ip_df = make_device_ip()
# 构建设备实体
def make_device_entity():
device_df = spark.sql("""select udid, union_id, imei, idfa, meid, event_time from credit_biz_metrics.device_fingerprint
where date(event_time)>=date('2020-08-24') and udid!='2408c710977177815f01fbc344dedc8b'""").toPandas()
device_df.sort_values(by="event_time", inplace=True)
device_df.drop_duplicates(list(set(device_df.columns).difference({"event_time"})), keep='first', inplace=True)
return device_df
device_df = make_device_entity()
# 构建借款者-联系人关系
def make_borrower_contact():
borrower_contact_df = pd.merge(borrower_entity_df[["uus_id"]], contact_entity_df, left_on="uus_id", right_on="uid")[["uus_id", "关系", "uid", "ext_id"]]
borrower_contact_df.rename(columns={"uus_id": "Left", "关系": "Type", "ext_id": "Right"}, inplace=True)
borrower_contact_df = borrower_contact_df[["Left", "Type", "Right"]]
borrower_contact_df.drop_duplicates(borrower_contact_df.columns, inplace=True)
return borrower_contact_df
borrower_contact_df = make_borrower_contact()
# 构建借款者-手机关系
def make_borrower_phones():
borrower_phones = phones_entity_df[phones_entity_df.tag == "借款人"]
borrower_phones.rename(columns={"uus_id": "Left", "手机号": "Right"}, inplace=True)
borrower_phones["Type"] = "借款人号码"
borrower_phones = borrower_phones[["Left", "Type", "Right"]]
borrower_phones.drop_duplicates(borrower_phones.columns, inplace=True)
return borrower_phones
borrower_phones_df = make_borrower_phones()
# 构建联系人-手机关系
def make_contact_phones():
contact_phones = phones_entity_df[phones_entity_df.tag == "联系人"]
contact_phones.rename(columns={"uus_id": "Left", "手机号": "Right"}, inplace=True)
contact_phones["Type"] = "联系人号码"
contact_phones = contact_phones[["Left", "Type", "Right"]]
contact_phones.drop_duplicates(contact_phones.columns, inplace=True)
return contact_phones
contact_phones_df = make_contact_phones()
# 构建借款人-地址关系
def make_borrower_address():
borrower_address = pd.merge(borrower_entity_df[["uus_id"]], address_entity_df["uid"], left_on="uus_id", right_on="uid")
borrower_address["Type"] = "居住"
borrower_address.rename(columns={"uus_id": "Left", "uid": "Right"}, inplace=True)
borrower_address = borrower_address[["Left", "Type", "Right"]]
borrower_address.drop_duplicates(borrower_address.columns, inplace=True)
return borrower_address
borrower_address_df = make_borrower_address()
# 构建借款者-团队关系
def make_borrower_team():
tmp_gp = zhiyong_loan_df.groupby(["identity_no", "team_code"])
borrower_team = pd.DataFrame([], columns=['Left', 'Type', 'Right', '放款时间', '放款状态'])
idx = 0
for group, df in tmp_gp:
loans = df[(~pd.isnull(df.apply_id)) & (df.apply_status_risk=="放款成功")]
if loans.shape[0] == 0:
borrower_team.loc[idx] = {"Left": group[0], "Type": "所属团队", "Right": group[1], "放款时间": "", "放款状态": df.apply_status_risk.values[0]}
idx += 1
continue
min_loan_time = loans.loan_success_time.min()
team_code = loans[loans.loan_success_time == min_loan_time].team_code.values[0]
borrower_team.loc[idx] = {"Left": group[0], "Type": "所属团队", "Right": team_code, "放款时间": min_loan_time, "放款状态": "放款成功"}
idx += 1
borrower_team.drop_duplicates(borrower_team.columns, keep='first', inplace=True)
apply_no_zhiyong = pd.merge(borrower_entity_df[["身份证号", "team_code"]], borrower_team["Left"], how="left", left_on="身份证号", right_on="Left")
apply_no_zhiyong = apply_no_zhiyong[pd.isnull(apply_no_zhiyong.Left)]
apply_no_zhiyong.drop_duplicates(apply_no_zhiyong.columns, inplace=True)
apply_no_zhiyong.drop(["Left"], axis=1, inplace=True)
apply_no_zhiyong.rename(columns={"身份证号": "Left", "team_code": "Right"}, inplace=True)
apply_no_zhiyong["Type"] = "所属团队"
apply_no_zhiyong["放款时间"] = ""
apply_no_zhiyong["放款状态"] = "未支用"
apply_no_zhiyong = apply_no_zhiyong[["Left", "Type", "Right", "放款时间", "放款状态"]]
return pd.concat([borrower_team, apply_no_zhiyong])
borrower_team = make_borrower_team()
# 构建团队-门店关系
def make_team_shop():
tmp_gp = zhiyong_loan_df.groupby(["team_code", "shop_code"])
team_shop = pd.DataFrame([], columns=['Left', 'Type', 'Right', '放款时间', '放款状态'])
idx = 0
for group, df in tmp_gp:
if pd.isnull(group):
continue
loans = df[(~pd.isnull(df.apply_id)) & (df.apply_status_risk=="放款成功")]
if loans.shape[0] == 0:
team_shop.loc[idx] = {"Left": group[0], "Type": "所属门店", "Right": group[1], "放款时间": "", "放款状态": ",".join(df.apply_status_risk.unique())}
idx += 1
continue
min_loan_time = loans.loan_success_time.min()
shop_code = loans[loans.loan_success_time == min_loan_time].shop_code.values[0]
team_shop.loc[idx] = {"Left": group[0], "Type": "所属门店", "Right": shop_code, "放款时间": min_loan_time, "放款状态": "放款成功"}
idx += 1
tmp_df = pd.merge(team_df, borrower_entity_df[['team_code', 'shop_code']], how="left", left_on="编号", right_on="team_code")
tmp_df.drop_duplicates(tmp_df.columns, inplace=True)
apply_no_zhiyong = pd.merge(tmp_df[["编号", 'shop_code']], team_shop["Left"], how="left", left_on="编号", right_on="Left")
apply_no_zhiyong = apply_no_zhiyong[pd.isnull(apply_no_zhiyong.Left)]
apply_no_zhiyong.drop_duplicates(apply_no_zhiyong.columns, inplace=True)
apply_no_zhiyong.drop(["Left"], axis=1, inplace=True)
apply_no_zhiyong.rename(columns={"编号": "Left", "shop_code": "Right"}, inplace=True)
apply_no_zhiyong["Type"] = "所属门店"
apply_no_zhiyong["放款时间"] = ""
apply_no_zhiyong["放款状态"] = "未支用"
apply_no_zhiyong = apply_no_zhiyong[["Left", "Type", "Right", "放款时间", "放款状态"]]
return pd.concat([team_shop, apply_no_zhiyong])
team_shop = make_team_shop()
# 构建门店-区域关系
def make_shop_area():
tmp_gp = zhiyong_loan_df.groupby(["shop_code", "area_code"])
shop_area = pd.DataFrame([], columns=['Left', 'Type', 'Right', '放款时间', '放款状态'])
idx = 0
for group, df in tmp_gp:
if pd.isnull(group):
continue
loans = df[(~pd.isnull(df.apply_id)) & (df.apply_status_risk=="放款成功")]
if loans.shape[0] == 0:
shop_area.loc[idx] = {"Left": group[0], "Type": "所属区域", "Right": group[1], "放款时间": "", "放款状态": ",".join(df.apply_status_risk.unique())}
idx += 1
continue
min_loan_time = loans.loan_success_time.min()
area_code = loans[loans.loan_success_time == min_loan_time].area_code.values[0]
shop_area.loc[idx] = {"Left": group[0], "Type": "所属区域", "Right": area_code, "放款时间": min_loan_time, "放款状态": "放款成功"}
idx += 1
tmp_df = pd.merge(shop_entity_df, borrower_entity_df[['shop_code','area_code']], how="left", left_on="编号", right_on="shop_code")
tmp_df.drop_duplicates(tmp_df.columns, inplace=True)
apply_no_zhiyong = pd.merge(tmp_df[["编号", 'area_code']], shop_area["Left"], how="left", left_on="编号", right_on="Left")
apply_no_zhiyong = apply_no_zhiyong[pd.isnull(apply_no_zhiyong.Left)]
apply_no_zhiyong.drop_duplicates(apply_no_zhiyong.columns, inplace=True)
apply_no_zhiyong.drop(["Left"], axis=1, inplace=True)
apply_no_zhiyong.rename(columns={"编号": "Left", "area_code": "Right"}, inplace=True)
apply_no_zhiyong["Type"] = "所属区域"
apply_no_zhiyong["放款时间"] = ""
apply_no_zhiyong["放款状态"] = "未支用"
apply_no_zhiyong = apply_no_zhiyong[["Left", "Type", "Right", "放款时间", "放款状态"]]
return pd.concat([shop_area, apply_no_zhiyong])
shop_area = make_shop_area()
# 构建区域-市场关系
def make_area_marketing():
tmp_gp = zhiyong_loan_df.groupby(["area_code", "marketing_code"])
area_marketing = pd.DataFrame([], columns=['Left', 'Type', 'Right', '放款时间', '放款状态'])
idx = 0
for group, df in tmp_gp:
if pd.isnull(group):
continue
loans = df[(~ | pd.isnull(df.apply_id) | pandas.isnull |
#Test allometry
import os
import sys
sys.path.append(os.path.dirname(os.getcwd()))
import glob
import pandas as pd
from analysis import allometry
from analysis.check_site import get_site, get_year
def test_run():
shps = glob.glob("data/*.shp")
#Get site names
df = | pd.DataFrame({"path":shps}) | pandas.DataFrame |
from datetime import datetime
import warnings
import numpy as np
import pytest
from pandas.core.dtypes.generic import ABCDateOffset
import pandas as pd
from pandas import (
DatetimeIndex,
Index,
PeriodIndex,
Series,
Timestamp,
bdate_range,
date_range,
)
from pandas.tests.test_base import Ops
import pandas.util.testing as tm
from pandas.tseries.offsets import BDay, BMonthEnd, CDay, Day, Hour
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
class TestDatetimeIndexOps(Ops):
def setup_method(self, method):
super().setup_method(method)
mask = lambda x: (isinstance(x, DatetimeIndex) or isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
f = lambda x: isinstance(x, DatetimeIndex)
self.check_ops_properties(DatetimeIndex._field_ops, f)
self.check_ops_properties(DatetimeIndex._object_ops, f)
self.check_ops_properties(DatetimeIndex._bool_ops, f)
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH#7206
msg = "'Series' object has no attribute '{}'"
for op in ["year", "day", "second", "weekday"]:
with pytest.raises(AttributeError, match=msg.format(op)):
getattr(self.dt_series, op)
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
assert s.year == 2000
assert s.month == 1
assert s.day == 10
msg = "'Series' object has no attribute 'weekday'"
with pytest.raises(AttributeError, match=msg):
s.weekday
def test_repeat_range(self, tz_naive_fixture):
tz = tz_naive_fixture
rng = date_range("1/1/2000", "1/1/2001")
result = rng.repeat(5)
assert result.freq is None
assert len(result) == 5 * len(rng)
index = pd.date_range("2001-01-01", periods=2, freq="D", tz=tz)
exp = pd.DatetimeIndex(
["2001-01-01", "2001-01-01", "2001-01-02", "2001-01-02"], tz=tz
)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = pd.date_range("2001-01-01", periods=2, freq="2D", tz=tz)
exp = pd.DatetimeIndex(
["2001-01-01", "2001-01-01", "2001-01-03", "2001-01-03"], tz=tz
)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = pd.DatetimeIndex(["2001-01-01", "NaT", "2003-01-01"], tz=tz)
exp = pd.DatetimeIndex(
[
"2001-01-01",
"2001-01-01",
"2001-01-01",
"NaT",
"NaT",
"NaT",
"2003-01-01",
"2003-01-01",
"2003-01-01",
],
tz=tz,
)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
def test_repeat(self, tz_naive_fixture):
tz = tz_naive_fixture
reps = 2
msg = "the 'axis' parameter is not supported"
rng = pd.date_range(start="2016-01-01", periods=2, freq="30Min", tz=tz)
expected_rng = DatetimeIndex(
[
Timestamp("2016-01-01 00:00:00", tz=tz, freq="30T"),
Timestamp("2016-01-01 00:00:00", tz=tz, freq="30T"),
Timestamp("2016-01-01 00:30:00", tz=tz, freq="30T"),
Timestamp("2016-01-01 00:30:00", tz=tz, freq="30T"),
]
)
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
assert res.freq is None
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
with pytest.raises(ValueError, match=msg):
np.repeat(rng, reps, axis=1)
def test_resolution(self, tz_naive_fixture):
tz = tz_naive_fixture
for freq, expected in zip(
["A", "Q", "M", "D", "H", "T", "S", "L", "U"],
[
"day",
"day",
"day",
"day",
"hour",
"minute",
"second",
"millisecond",
"microsecond",
],
):
idx = pd.date_range(start="2013-04-01", periods=30, freq=freq, tz=tz)
assert idx.resolution == expected
def test_value_counts_unique(self, tz_naive_fixture):
tz = tz_naive_fixture
# GH 7735
idx = pd.date_range("2011-01-01 09:00", freq="H", periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)), tz=tz)
exp_idx = pd.date_range("2011-01-01 18:00", freq="-1H", periods=10, tz=tz)
expected = Series(range(10, 0, -1), index=exp_idx, dtype="int64")
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.date_range("2011-01-01 09:00", freq="H", periods=10, tz=tz)
tm.assert_index_equal(idx.unique(), expected)
idx = DatetimeIndex(
[
"2013-01-01 09:00",
"2013-01-01 09:00",
"2013-01-01 09:00",
"2013-01-01 08:00",
"2013-01-01 08:00",
pd.NaT,
],
tz=tz,
)
exp_idx = DatetimeIndex(["2013-01-01 09:00", "2013-01-01 08:00"], tz=tz)
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = DatetimeIndex(["2013-01-01 09:00", "2013-01-01 08:00", pd.NaT], tz=tz)
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(
DatetimeIndex,
(
[0, 1, 0],
[0, 0, -1],
[0, -1, -1],
["2015", "2015", "2016"],
["2015", "2015", "2014"],
),
):
assert idx[0] in idx
@pytest.mark.parametrize(
"idx",
[
DatetimeIndex(
["2011-01-01", "2011-01-02", "2011-01-03"], freq="D", name="idx"
),
DatetimeIndex(
["2011-01-01 09:00", "2011-01-01 10:00", "2011-01-01 11:00"],
freq="H",
name="tzidx",
tz="Asia/Tokyo",
),
],
)
def test_order_with_freq(self, idx):
ordered = idx.sort_values()
tm.assert_index_equal(ordered, idx)
assert ordered.freq == idx.freq
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
tm.assert_index_equal(ordered, expected)
assert ordered.freq == expected.freq
assert ordered.freq.n == -1
ordered, indexer = idx.sort_values(return_indexer=True)
tm.assert_index_equal(ordered, idx)
tm.assert_numpy_array_equal(indexer, np.array([0, 1, 2]), check_dtype=False)
assert ordered.freq == idx.freq
ordered, indexer = idx.sort_values(return_indexer=True, ascending=False)
expected = idx[::-1]
tm.assert_index_equal(ordered, expected)
tm.assert_numpy_array_equal(indexer, np.array([2, 1, 0]), check_dtype=False)
assert ordered.freq == expected.freq
assert ordered.freq.n == -1
@pytest.mark.parametrize(
"index_dates,expected_dates",
[
(
["2011-01-01", "2011-01-03", "2011-01-05", "2011-01-02", "2011-01-01"],
["2011-01-01", "2011-01-01", "2011-01-02", "2011-01-03", "2011-01-05"],
),
(
["2011-01-01", "2011-01-03", "2011-01-05", "2011-01-02", "2011-01-01"],
["2011-01-01", "2011-01-01", "2011-01-02", "2011-01-03", "2011-01-05"],
),
(
[pd.NaT, "2011-01-03", "2011-01-05", "2011-01-02", pd.NaT],
[pd.NaT, pd.NaT, "2011-01-02", "2011-01-03", "2011-01-05"],
),
],
)
def test_order_without_freq(self, index_dates, expected_dates, tz_naive_fixture):
tz = tz_naive_fixture
# without freq
index = DatetimeIndex(index_dates, tz=tz, name="idx")
expected = DatetimeIndex(expected_dates, tz=tz, name="idx")
ordered = index.sort_values()
tm.assert_index_equal(ordered, expected)
assert ordered.freq is None
ordered = index.sort_values(ascending=False)
tm.assert_index_equal(ordered, expected[::-1])
assert ordered.freq is None
ordered, indexer = index.sort_values(return_indexer=True)
tm.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
tm.assert_numpy_array_equal(indexer, exp, check_dtype=False)
assert ordered.freq is None
ordered, indexer = index.sort_values(return_indexer=True, ascending=False)
tm.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
tm.assert_numpy_array_equal(indexer, exp, check_dtype=False)
assert ordered.freq is None
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.date_range("2011-01-01", "2011-01-31", freq="D", name="idx")
result = idx.drop_duplicates()
tm.assert_index_equal(idx, result)
assert idx.freq == result.freq
idx_dup = idx.append(idx)
assert idx_dup.freq is None # freq is reset
result = idx_dup.drop_duplicates()
tm.assert_index_equal(idx, result)
assert result.freq is None
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.date_range("2011-01-01", "2011-01-31", freq="D", name="idx")
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep="last")
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep="last")
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
@pytest.mark.parametrize(
"freq",
[
"A",
"2A",
"-2A",
"Q",
"-1Q",
"M",
"-1M",
"D",
"3D",
"-3D",
"W",
"-1W",
"H",
"2H",
"-2H",
"T",
"2T",
"S",
"-3S",
],
)
def test_infer_freq(self, freq):
# GH 11018
idx = pd.date_range("2011-01-01 09:00:00", freq=freq, periods=10)
result = pd.DatetimeIndex(idx.asi8, freq="infer")
tm.assert_index_equal(idx, result)
assert result.freq == freq
def test_nat(self, tz_naive_fixture):
tz = tz_naive_fixture
assert pd.DatetimeIndex._na_value is pd.NaT
assert pd.DatetimeIndex([])._na_value is pd.NaT
idx = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], tz=tz)
assert idx._can_hold_na
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
assert idx.hasnans is False
tm.assert_numpy_array_equal(idx._nan_idxs, np.array([], dtype=np.intp))
idx = pd.DatetimeIndex(["2011-01-01", "NaT"], tz=tz)
assert idx._can_hold_na
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
assert idx.hasnans is True
tm.assert_numpy_array_equal(idx._nan_idxs, np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
idx = pd.DatetimeIndex(["2011-01-01", "2011-01-02", "NaT"])
assert idx.equals(idx)
assert idx.equals(idx.copy())
assert idx.equals(idx.astype(object))
assert idx.astype(object).equals(idx)
assert idx.astype(object).equals(idx.astype(object))
assert not idx.equals(list(idx))
assert not idx.equals(pd.Series(idx))
idx2 = pd.DatetimeIndex(["2011-01-01", "2011-01-02", "NaT"], tz="US/Pacific")
assert not idx.equals(idx2)
assert not idx.equals(idx2.copy())
assert not idx.equals(idx2.astype(object))
assert not idx.astype(object).equals(idx2)
assert not idx.equals(list(idx2))
assert not idx.equals(pd.Series(idx2))
# same internal, different tz
idx3 = pd.DatetimeIndex._simple_new(idx.asi8, tz="US/Pacific")
| tm.assert_numpy_array_equal(idx.asi8, idx3.asi8) | pandas.util.testing.assert_numpy_array_equal |
#!/usr/bin/env python
"""Module containing functions for converting messages to dataframe."""
import collections
import datetime
import stat
from typing import Text, Sequence, List, Any, Dict, Optional
import pandas as pd
from google.protobuf import descriptor
from google.protobuf import message
from grr_response_proto import osquery_pb2
from grr_response_proto import semantic_pb2
def from_sequence(seq: Sequence[Any]) -> pd.DataFrame:
"""Converts sequence of objects to a dataframe.
Args:
seq: Sequence of objects to convert.
Returns:
Pandas dataframe representing given sequence of objects.
"""
dframes = [from_object(obj) for obj in seq]
if not dframes:
return pd.DataFrame()
return | pd.concat(dframes, ignore_index=True, sort=False) | pandas.concat |
#coding:utf-8
import pandas as pd
import numpy as np
# 读取个人信息
train_agg = pd.read_csv('../data/train_agg.csv',sep='\t')
test_agg = pd.read_csv('../data/test_agg.csv',sep='\t')
agg = pd.concat([train_agg,test_agg],copy=False)
# 日志信息
train_log = pd.read_csv('../data/train_log.csv',sep='\t')
test_log = pd.read_csv('../data/test_log.csv',sep='\t')
log = pd.concat([train_log,test_log],copy=False)
log['EVT_LBL_1'] = log['EVT_LBL'].apply(lambda x:x.split('-')[0])
log['EVT_LBL_2'] = log['EVT_LBL'].apply(lambda x:x.split('-')[1])
log['EVT_LBL_3'] = log['EVT_LBL'].apply(lambda x:x.split('-')[1])
# 用户唯一标识
train_flg = | pd.read_csv('../data/train_flg.csv',sep='\t') | pandas.read_csv |
import datetime as dt
from functools import partial
from io import BytesIO, StringIO
from fastapi import HTTPException
import numpy as np
import pandas as pd
import pyarrow as pa
from pyarrow import feather
import pytest
from solarperformanceinsight_api import utils, models
httpfail = partial(
pytest.param, marks=pytest.mark.xfail(strict=True, raises=HTTPException)
)
@pytest.mark.parametrize(
"inp,typ,exp",
(
(
"time,datas\n2020-01-01T00:00Z,8.9",
StringIO,
pd.DataFrame({"time": [pd.Timestamp("2020-01-01T00:00Z")], "datas": [8.9]}),
),
(
b"time,datas\n2020-01-01T00:00Z,8.9",
BytesIO,
pd.DataFrame({"time": [pd.Timestamp("2020-01-01T00:00Z")], "datas": [8.9]}),
),
(
b"time,datas\n2020-01-01T00:00,8.9\n2020-01-02T00:00,-999",
BytesIO,
pd.DataFrame(
{
"time": [
pd.Timestamp("2020-01-01T00:00"),
pd.Timestamp("2020-01-02T00:00"),
],
"datas": [8.9, None],
}
),
),
# not valid later, but rely on dataframe validation to check dtypes
(
b"multi,header\ntime,datas\n2020-01-01T00:00,8.9\n2020-01-02T00:00,-999",
BytesIO,
pd.DataFrame(
{
"multi": ["time", "2020-01-01T00:00", "2020-01-02T00:00"],
"header": ["datas", "8.9", np.nan],
}
),
),
# no header row
httpfail(
b"2020-01-01T00:00,8.9\n2020-01-02T00:00,-999",
BytesIO,
None,
),
httpfail(
"",
StringIO,
None,
),
httpfail(
"empty",
StringIO,
None,
),
httpfail(
"notenoughheaders,\na,b",
StringIO,
None,
),
httpfail(
"a,b\n0,1,2\n0,1,3,4,5,6",
StringIO,
None,
),
),
)
def test_read_csv(inp, typ, exp):
out = utils.read_csv(typ(inp))
pd.testing.assert_frame_equal(out, exp)
@pytest.mark.parametrize(
"tbl,exp",
(
(
pa.Table.from_arrays([[1.0, 2, 3], [4.0, 5, 6]], ["a", "b"]),
pd.DataFrame({"a": [1, 2, 3.0], "b": [4, 5, 6.0]}),
),
# complex types to test to_pandas
(
pa.Table.from_arrays(
[pa.array([1.0, 2, 3]), pa.array([[], [5, 6], [7, 8]])], ["a", "b"]
),
pd.DataFrame({"a": [1, 2, 3.0], "b": [[], [5, 6], [7, 8]]}),
),
httpfail(
b"notanarrowfile",
None,
),
),
)
def test_read_arrow(tbl, exp):
if isinstance(tbl, bytes):
tblbytes = BytesIO(tbl)
else:
tblbytes = BytesIO(utils.dump_arrow_bytes(tbl))
out = utils.read_arrow(tblbytes)
pd.testing.assert_frame_equal(out, exp)
@pytest.mark.parametrize(
"inp,exp",
(
("text/csv", utils.read_csv),
("application/vnd.ms-excel", utils.read_csv),
("application/vnd.apache.arrow.file", utils.read_arrow),
("application/octet-stream", utils.read_arrow),
httpfail("application/json", None),
),
)
def test_verify_content_type(inp, exp):
out = utils.verify_content_type(inp)
assert out == exp
@pytest.mark.parametrize(
"inp,cols,exp",
(
(pd.DataFrame({"a": [0, 1], "b": [1, 2]}), ["a", "b"], set()),
(
pd.DataFrame(
{"time": [pd.Timestamp("2020-01-01")], "b": [0.8], "c": ["notnumeric"]}
),
["time", "b"],
{"c"},
),
httpfail(
pd.DataFrame({"time": [pd.Timestamp("2020-01-01")], "b": ["willfail"]}),
["time", "b"],
set(),
),
httpfail(pd.DataFrame({"a": [0, 1], "b": [1, 2]}), ["c"], {"a", "b"}),
httpfail(pd.DataFrame({"time": [0, 1], "b": [1, 2]}), ["time", "b"], set()),
(
pd.DataFrame(
{
"time": [
pd.Timestamp.now(),
pd.Timestamp("2020-01-01T00:00:01.09230"),
],
"b": [1, 2],
}
),
["time", "b"],
set(),
),
httpfail(
pd.DataFrame(
{
"time": [pd.Timestamp("2020-01-01"), pd.Timestamp("2020-01-01")],
"b": [0.8, 1],
},
),
["time", "b"],
set(),
),
(
pd.DataFrame(
{
"month": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
"other": list(range(12)),
}
),
["month", "other"],
set(),
),
(
pd.DataFrame(
{
"month": [
1.0,
2.0,
3.0,
4.0,
5.0,
6.0,
7.0,
8.0,
9.0,
10.0,
11.0,
12.0,
],
"other": list(range(12)),
}
),
["month", "other"],
set(),
),
(
pd.DataFrame(
{
"month": [f"{i}." for i in range(1, 13)],
"other": list(range(12)),
}
),
["month", "other"],
set(),
),
(
pd.DataFrame(
{
"month": [str(i) for i in range(1, 13)],
"other": list(range(12)),
}
),
["month", "other"],
set(),
),
(
pd.DataFrame(
{
"month": [f"{i}.0" for i in range(1, 13)],
"other": list(range(12)),
}
),
["month", "other"],
set(),
),
(
pd.DataFrame(
{
"month": [
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec",
],
"other": list(range(12)),
}
),
["month"],
{"other"},
),
(
pd.DataFrame(
{
"month": [
"January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December",
],
}
),
["month"],
set(),
),
(
pd.DataFrame(
{
"month": [
"jan.",
"feb.",
"mar.",
"apr.",
"may",
"jun.",
"jul.",
"aug.",
"sep.",
"oct.",
"nov.",
"dec.",
],
"other": list(range(12)),
}
),
["month"],
{"other"},
),
(
pd.DataFrame(
{
"month": [
"January",
"february",
"march",
"april",
"may",
"june",
"july",
"August",
"september",
"October",
"November",
"December",
],
}
),
["month"],
set(),
),
httpfail(
pd.DataFrame(
{
"month": [
"October",
"November",
"December",
],
}
),
["month"],
set(),
),
httpfail(
pd.DataFrame({"month": range(0, 13)}),
["month"],
set(),
),
httpfail(
pd.DataFrame(
{
"month": [
"January",
"february",
"march",
"april",
"may",
"june",
"julio", # bad
"August",
"september",
"October",
"November",
"December",
],
}
),
["month"],
set(),
),
),
)
def test_validate_dataframe(inp, cols, exp):
out = utils.validate_dataframe(inp, cols)
assert out == exp
@pytest.mark.parametrize(
"inp,slc",
(
(
pd.DataFrame(
{
"month": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
"other": list(range(12)),
}
),
slice(None),
),
(
pd.DataFrame(
{
"month": [
1.0,
2.0,
3.0,
4.0,
5.0,
6.0,
7.0,
8.0,
9.0,
10.0,
11.0,
12.0,
],
"other": list(range(12)),
}
),
slice(None),
),
(
pd.DataFrame(
{
"month": [str(i) for i in range(1, 13)],
"other": list(range(12)),
}
),
slice(None),
),
(
pd.DataFrame(
{
"month": [
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec",
],
"other": list(range(12)),
}
),
slice(None),
),
(
pd.DataFrame(
{
"month": [
"January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December",
],
}
),
slice(None),
),
(
pd.DataFrame(
{
"month": [
"jan.",
"feb.",
"mar.",
"apr.",
"may",
"jun.",
"jul.",
"aug.",
"sep.",
"oct.",
"nov.",
"dec.",
],
"other": list(range(12)),
}
),
slice(None),
),
(
pd.DataFrame(
{
"month": [
"January",
"february",
"march",
"april",
"may",
"june",
"july",
"August",
"september",
"October",
"November",
"December",
],
}
),
slice(None),
),
(
pd.DataFrame(
{
"month": [
"October",
"November",
"December",
],
},
index=[9, 10, 11],
),
slice(9, None),
),
),
)
def test_standardize_months(inp, slc):
exp = pd.Series(
[
"January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December",
],
name="month",
)
out = utils.standardize_months(inp)["month"]
pd.testing.assert_series_equal(out, exp[slc])
def test_standardize_months_fail():
out0 = utils.standardize_months(pd.DataFrame({"month": range(0, 13)}))["month"]
assert not pd.isna(out0[1:]).any()
assert pd.isna(out0[:1]).all()
out1 = utils.standardize_months(
pd.DataFrame(
{
"month": [
"January",
"february",
"march",
"april",
"may",
"june",
"julio", # bad
"August",
"september",
"October",
"November",
"December",
],
}
)
)
pd.testing.assert_series_equal(
out1["month"],
pd.Series(
[
"January",
"February",
"March",
"April",
"May",
"June",
None,
"August",
"September",
"October",
"November",
"December",
],
name="month",
),
)
@pytest.mark.parametrize(
"df,tbl",
(
(
pd.DataFrame({"a": [0.1, 0.2]}, dtype="float64"),
pa.Table.from_arrays(
[pa.array([0.1, 0.2], type=pa.float32())], names=["a"]
),
),
(
pd.DataFrame({"a": [0.1, 0.2]}, dtype="float32"),
pa.Table.from_arrays(
[pa.array([0.1, 0.2], type=pa.float32())], names=["a"]
),
),
(
pd.DataFrame(
{
"a": [0.1, 0.2],
"time": [
pd.Timestamp("2020-01-01T00:00Z"),
pd.Timestamp("2020-01-02T00:00Z"),
],
},
),
pa.Table.from_arrays(
[
pa.array([0.1, 0.2], type=pa.float32()),
pa.array(
[
dt.datetime(2020, 1, 1, tzinfo=dt.timezone.utc),
dt.datetime(2020, 1, 2, tzinfo=dt.timezone.utc),
],
type=pa.timestamp("s", tz="UTC"),
),
],
names=["a", "time"],
),
),
(
pd.DataFrame(
{
"b": [-999, 129],
"time": [
pd.Timestamp("2020-01-01T00:00Z"),
pd.Timestamp("2020-01-02T00:00Z"),
],
"a": [0.1, 0.2],
},
),
pa.Table.from_arrays(
[
pa.array([-999, 129], type=pa.int64()),
pa.array(
[
dt.datetime(2020, 1, 1, tzinfo=dt.timezone.utc),
dt.datetime(2020, 1, 2, tzinfo=dt.timezone.utc),
],
type=pa.timestamp("s", tz="UTC"),
),
pa.array([0.1, 0.2], type=pa.float32()),
],
names=["b", "time", "a"],
),
),
(
pd.DataFrame(
{"a": [0.1, 0.2], "time": ["one", "two"]},
),
pa.Table.from_arrays(
[
pa.array([0.1, 0.2], type=pa.float32()),
pa.array(["one", "two"]),
],
names=["a", "time"],
),
),
# non-localized ok
(
pd.DataFrame(
{
"b": [-999, 129],
"time": [
pd.Timestamp("2020-01-01T00:00"),
pd.Timestamp("2020-01-02T00:00"),
],
"a": [0.1, 0.2],
},
),
pa.Table.from_arrays(
[
pa.array([-999, 129], type=pa.int64()),
pa.array(
[
dt.datetime(2020, 1, 1),
dt.datetime(2020, 1, 2),
],
type=pa.timestamp("s"),
),
pa.array([0.1, 0.2], type=pa.float32()),
],
names=["b", "time", "a"],
),
),
(
pd.DataFrame(
{"nanfloat": [None, 1.0], "nans": [pd.NA, pd.NA], "str": ["a", "b"]}
),
pa.Table.from_arrays(
[
pa.array([None, 1.0], type=pa.float32()),
pa.array([None, None], type=pa.null()),
pa.array(["a", "b"], type=pa.string()),
],
names=["nanfloat", "nans", "str"],
),
),
httpfail(
pd.DataFrame(
{
"nanint": [pd.NA, 3], # arrow doesn't like this
}
),
None,
),
httpfail(
pd.DataFrame(
{
"nanstr": [pd.NA, "string"],
}
),
None,
),
),
)
def test_convert_to_arrow(df, tbl):
out = utils.convert_to_arrow(df)
assert out == tbl
@pytest.mark.parametrize(
"df",
(
pd.DataFrame(),
pd.DataFrame({"a": [0, 1992.9]}),
pd.DataFrame(
{
"b": [-999, 129],
"time": [
pd.Timestamp("2020-01-01T00:00"),
pd.Timestamp("2020-01-02T00:00"),
],
"a": [0.1, 0.2],
},
),
pd.DataFrame(
{
"b": [-999, 129],
"time": [
pd.Timestamp("2020-01-01T00:00Z"),
| pd.Timestamp("2020-01-02T00:00Z") | pandas.Timestamp |
import datetime
import json
import mmh3
import os
import sys
import time
from uuid import uuid4
import findspark
import numpy as np
import pandas as pd
import pyspark.sql.functions as F
import pyspark.sql.types as T
from pyspark.sql import SparkSession
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from objects.task import Task
from objects.task_state import TaskState
from objects.workflow import Workflow
from objects.resource import Resource
from objects.workload import Workload
USAGE = 'Usage: python(3) ./two_sigma_traces_to_parquet_with_spark.py path_to_dir'
NAME = 'Two_Sigma'
TARGET_DIR = os.path.join(os.path.dirname(os.getcwd()), 'output_parquet', NAME)
def parse(path_to_dir):
global TARGET_DIR
TARGET_DIR = os.path.join(TARGET_DIR, os.path.split(path_to_dir)[1])
if 'DAS5' in os.environ: # If we want to execute it on the DAS-5 super computer
print("We are on DAS5, {0} is master.".format(os.environ['HOSTNAME'] + ".ib.cluster"))
spark = SparkSession.builder \
.master("spark://" + os.environ['HOSTNAME'] + ".ib.cluster:7077") \
.appName("WTA parser") \
.config("spark.executor.memory", "28G") \
.config("spark.executor.cores", "8") \
.config("spark.executor.instances", "10") \
.config("spark.driver.memory", "40G") \
.config("spark.sql.execution.arrow.enabled", "true") \
.getOrCreate()
else:
findspark.init(spark_home="<path to spark>")
spark = SparkSession.builder \
.master("local[8]") \
.appName("WTA parser") \
.config("spark.executor.memory", "20G") \
.config("spark.driver.memory", "8G") \
.getOrCreate()
if not os.path.exists(os.path.join(TARGET_DIR, Task.output_path())):
print("######\nStart parsing Tasks\n######")
task_df = spark.read.format('com.databricks.spark.csv').options(header='true', inferschema='true').load(
os.path.join(path_to_dir, '*.csv.processed'))
# Drop the pref table, saving memory and filter out unsuccessful jobs as their information is not reliable
task_df = task_df.drop('pref').filter(task_df.status == ":instance.status/success").drop('status').cache()
@F.pandas_udf(T.LongType(), F.PandasUDFType.SCALAR)
def sub_two_datetimes(s1, s2):
arr = []
for i in s1.keys():
d1 = datetime.datetime.strptime(s1[i], '%a %b %d %H:%M:%S %Z %Y')
d2 = datetime.datetime.strptime(s2[i], '%a %b %d %H:%M:%S %Z %Y')
arr.append(int((d2 - d1).total_seconds() * 1000))
return pd.Series(arr)
task_df = task_df \
.withColumn('wait_time', sub_two_datetimes(F.col('submit-time'), F.col('start-time'))) \
.withColumn('runtime', sub_two_datetimes(F.col('start-time'), F.col('end-time')))
@F.pandas_udf(T.LongType(), F.PandasUDFType.SCALAR)
def date_time_to_unix(series):
arr = []
epoch = datetime.datetime.utcfromtimestamp(0)
for i in series.keys():
arr.append(
np.int64(
(datetime.datetime.strptime(series[i], '%a %b %d %H:%M:%S %Z %Y') - epoch).total_seconds() * 1000)
)
return pd.Series(arr)
task_df = task_df.withColumn('submit-time', date_time_to_unix(F.col('submit-time'))).withColumnRenamed(
'submit-time', "ts_submit").drop(
'start-time').drop('end-time').cache()
min_ts = task_df.agg({"ts_submit": "min"}).collect()[0][0]
task_df = task_df.withColumn('ts_submit', F.col('ts_submit') - F.lit(min_ts))
@F.pandas_udf(T.DoubleType(), F.PandasUDFType.SCALAR)
def convert_to_kb(v):
return v * 1024
task_df = task_df.withColumn('memory', convert_to_kb(task_df.memory)).withColumnRenamed("memory",
"memory_consumption")
@F.pandas_udf(T.IntegerType(), F.PandasUDFType.SCALAR)
def string_to_int(v):
arr = []
for i in v.keys():
arr.append(mmh3.hash(v[i], signed=True))
return pd.Series(arr)
@F.pandas_udf(T.LongType(), F.PandasUDFType.SCALAR)
def string_to_long(v):
arr = []
for i in v.keys():
arr.append(mmh3.hash64(v[i], signed=True)[0])
return pd.Series(arr)
@F.pandas_udf(T.LongType(), F.PandasUDFType.SCALAR)
def assign_workflow_ids(v):
arr = []
for i in v.keys():
if v[i]:
arr.append(mmh3.hash64(v[i], signed=True)[0])
else:
arr.append(mmh3.hash64(uuid4().bytes, signed=True)[0]) # Assign a UUID, collision chance is negligible.
return | pd.Series(arr) | pandas.Series |
from itertools import product
from string import ascii_uppercase
import pandas as pd
from pandas.tseries.offsets import MonthBegin
from .futures import CMES_CODE_TO_MONTH
def make_rotating_equity_info(num_assets,
first_start,
frequency,
periods_between_starts,
asset_lifetime,
exchange='TEST'):
"""
Create a DataFrame representing lifetimes of assets that are constantly
rotating in and out of existence.
Parameters
----------
num_assets : int
How many assets to create.
first_start : pd.Timestamp
The start date for the first asset.
frequency : str or pd.tseries.offsets.Offset (e.g. trading_day)
Frequency used to interpret next two arguments.
periods_between_starts : int
Create a new asset every `frequency` * `periods_between_new`
asset_lifetime : int
Each asset exists for `frequency` * `asset_lifetime` days.
exchange : str, optional
The exchange name.
Returns
-------
info : pd.DataFrame
DataFrame representing newly-created assets.
"""
return pd.DataFrame(
{
'symbol': [chr(ord('A') + i) for i in range(num_assets)],
# Start a new asset every `periods_between_starts` days.
'start_date': pd.date_range(
first_start,
freq=(periods_between_starts * frequency),
periods=num_assets,
),
# Each asset lasts for `asset_lifetime` days.
'end_date': pd.date_range(
first_start + (asset_lifetime * frequency),
freq=(periods_between_starts * frequency),
periods=num_assets,
),
'exchange': exchange,
},
index=range(num_assets),
)
def make_simple_equity_info(sids,
start_date,
end_date,
symbols=None,
names=None,
exchange='TEST'):
"""
Create a DataFrame representing assets that exist for the full duration
between `start_date` and `end_date`.
Parameters
----------
sids : array-like of int
start_date : pd.Timestamp, optional
end_date : pd.Timestamp, optional
symbols : list, optional
Symbols to use for the assets.
If not provided, symbols are generated from the sequence 'A', 'B', ...
names : list, optional
Names to use for the assets.
If not provided, names are generated by adding " INC." to each of the
symbols (which might also be auto-generated).
exchange : str, optional
The exchange name.
Returns
-------
info : pd.DataFrame
DataFrame representing newly-created assets.
"""
num_assets = len(sids)
if symbols is None:
symbols = list(ascii_uppercase[:num_assets])
else:
symbols = list(symbols)
if names is None:
names = [str(s) + " INC." for s in symbols]
return pd.DataFrame(
{
'symbol': symbols,
'start_date': pd.to_datetime([start_date] * num_assets),
'end_date': pd.to_datetime([end_date] * num_assets),
'asset_name': list(names),
'exchange': exchange,
},
index=sids,
columns=(
'start_date',
'end_date',
'symbol',
'exchange',
'asset_name',
),
)
def make_simple_multi_country_equity_info(countries_to_sids,
countries_to_exchanges,
start_date,
end_date):
"""Create a DataFrame representing assets that exist for the full duration
between `start_date` and `end_date`, from multiple countries.
"""
sids = []
symbols = []
exchanges = []
for country, country_sids in countries_to_sids.items():
exchange = countries_to_exchanges[country]
for i, sid in enumerate(country_sids):
sids.append(sid)
symbols.append('-'.join([country, str(i)]))
exchanges.append(exchange)
return pd.DataFrame(
{
'symbol': symbols,
'start_date': start_date,
'end_date': end_date,
'asset_name': symbols,
'exchange': exchanges,
},
index=sids,
columns=(
'start_date',
'end_date',
'symbol',
'exchange',
'asset_name',
),
)
def make_jagged_equity_info(num_assets,
start_date,
first_end,
frequency,
periods_between_ends,
auto_close_delta):
"""
Create a DataFrame representing assets that all begin at the same start
date, but have cascading end dates.
Parameters
----------
num_assets : int
How many assets to create.
start_date : pd.Timestamp
The start date for all the assets.
first_end : pd.Timestamp
The date at which the first equity will end.
frequency : str or pd.tseries.offsets.Offset (e.g. trading_day)
Frequency used to interpret the next argument.
periods_between_ends : int
Starting after the first end date, end each asset every
`frequency` * `periods_between_ends`.
Returns
-------
info : pd.DataFrame
DataFrame representing newly-created assets.
"""
frame = pd.DataFrame(
{
'symbol': [chr(ord('A') + i) for i in range(num_assets)],
'start_date': start_date,
'end_date': pd.date_range(
first_end,
freq=(periods_between_ends * frequency),
periods=num_assets,
),
'exchange': 'TEST',
},
index=range(num_assets),
)
# Explicitly pass None to disable setting the auto_close_date column.
if auto_close_delta is not None:
frame['auto_close_date'] = frame['end_date'] + auto_close_delta
return frame
def make_future_info(first_sid,
root_symbols,
years,
notice_date_func,
expiration_date_func,
start_date_func,
month_codes=None,
multiplier=500):
"""
Create a DataFrame representing futures for `root_symbols` during `year`.
Generates a contract per triple of (symbol, year, month) supplied to
`root_symbols`, `years`, and `month_codes`.
Parameters
----------
first_sid : int
The first sid to use for assigning sids to the created contracts.
root_symbols : list[str]
A list of root symbols for which to create futures.
years : list[int or str]
Years (e.g. 2014), for which to produce individual contracts.
notice_date_func : (Timestamp) -> Timestamp
Function to generate notice dates from first of the month associated
with asset month code. Return NaT to simulate futures with no notice
date.
expiration_date_func : (Timestamp) -> Timestamp
Function to generate expiration dates from first of the month
associated with asset month code.
start_date_func : (Timestamp) -> Timestamp, optional
Function to generate start dates from first of the month associated
with each asset month code. Defaults to a start_date one year prior
to the month_code date.
month_codes : dict[str -> [1..12]], optional
Dictionary of month codes for which to create contracts. Entries
should be strings mapped to values from 1 (January) to 12 (December).
Default is zipline.futures.CMES_CODE_TO_MONTH
multiplier : int
The contract multiplier.
Returns
-------
futures_info : pd.DataFrame
DataFrame of futures data suitable for passing to an AssetDBWriter.
"""
if month_codes is None:
month_codes = CMES_CODE_TO_MONTH
year_strs = list(map(str, years))
years = [pd.Timestamp(s, tz='UTC') for s in year_strs]
# Pairs of string/date like ('K06', 2006-05-01) sorted by year/month
# `MonthBegin(month_num - 1)` since the year already starts at month 1.
contract_suffix_to_beginning_of_month = tuple(
(month_code + year_str[-2:], year + MonthBegin(month_num - 1))
for ((year, year_str), (month_code, month_num))
in product(
zip(years, year_strs),
sorted(list(month_codes.items()), key=lambda item: item[1]),
)
)
contracts = []
parts = product(root_symbols, contract_suffix_to_beginning_of_month)
for sid, (root_sym, (suffix, month_begin)) in enumerate(parts, first_sid):
contracts.append({
'sid': sid,
'root_symbol': root_sym,
'symbol': root_sym + suffix,
'start_date': start_date_func(month_begin),
'notice_date': notice_date_func(month_begin),
'expiration_date': expiration_date_func(month_begin),
'multiplier': multiplier,
'exchange': "TEST",
})
return pd.DataFrame.from_records(contracts, index='sid')
def make_commodity_future_info(first_sid,
root_symbols,
years,
month_codes=None,
multiplier=500):
"""
Make futures testing data that simulates the notice/expiration date
behavior of physical commodities like oil.
Parameters
----------
first_sid : int
The first sid to use for assigning sids to the created contracts.
root_symbols : list[str]
A list of root symbols for which to create futures.
years : list[int or str]
Years (e.g. 2014), for which to produce individual contracts.
month_codes : dict[str -> [1..12]], optional
Dictionary of month codes for which to create contracts. Entries
should be strings mapped to values from 1 (January) to 12 (December).
Default is zipline.futures.CMES_CODE_TO_MONTH
multiplier : int
The contract multiplier.
Expiration dates are on the 20th of the month prior to the month code.
Notice dates are are on the 20th two months prior to the month code.
Start dates are one year before the contract month.
See Also
--------
make_future_info
"""
nineteen_days = pd.Timedelta(days=19)
one_year = | pd.Timedelta(days=365) | pandas.Timedelta |
# -*- coding: utf-8 -*-
# aalen additive
if __name__ == "__main__":
import pandas as pd
import numpy as np
import time
from lifelines.fitters.aalen_additive_fitter import AalenAdditiveFitter
from lifelines.datasets import load_rossi
df = load_rossi()
df = | pd.concat([df] * 1) | pandas.concat |
from collections import (
Counter,
) # Si conta la ricorrenza per ogni parola all'interno della lista
from sklearn.linear_model import LinearRegression
import pandas as pd
import numpy as np
import math
import seaborn as sns
from scipy import stats
import statsmodels.api as sm
import matplotlib.pyplot as plt
from statsmodels.formula.api import ols
from statsmodels.graphics.regressionplots import *
# from year to decade
def etichetta(row):
anno = row["anno"].strip()
if anno in (
"2010",
"2011",
"2012",
"2013",
"2014",
"2015",
"2016",
"2017",
"2018",
"2019",
"2020",
"2021",
):
return "2010-2021"
elif anno in (
"2000",
"2001",
"2002",
"2003",
"2004",
"2005",
"2006",
"2007",
"2008",
"2009",
):
return "2000-2009"
elif anno in (
"1990",
"1991",
"1992",
"1993",
"1994",
"1995",
"1996",
"1997",
"1998",
"1999",
):
return "1990-1999"
elif anno in (
"1980",
"1981",
"1982",
"1983",
"1984",
"1985",
"1986",
"1987",
"1988",
"1989",
):
return "1980-1989"
elif anno in (
"1970",
"1971",
"1972",
"1973",
"1974",
"1975",
"1976",
"1977",
"1978",
"1979",
):
return "1970-1979"
elif anno in (
"1960",
"1961",
"1962",
"1963",
"1964",
"1965",
"1966",
"1967",
"1968",
"1969",
):
return "1960-1969"
elif anno in (
"1950",
"1951",
"1952",
"1953",
"1954",
"1955",
"1956",
"1957",
"1958",
"1959",
):
return "1950-1959"
elif anno in (
"1940",
"1941",
"1942",
"1943",
"1944",
"1945",
"1946",
"1947",
"1948",
"1949",
):
return "1940-1949"
elif anno in (
"1930",
"1931",
"1932",
"1933",
"1934",
"1935",
"1936",
"1937",
"1938",
"1939",
):
return "1930-1939"
elif anno in (
"1920",
"1921",
"1922",
"1923",
"1924",
"1925",
"1926",
"1927",
"1928",
"1929",
):
return "1920-1929"
elif anno in (
"1900",
"1901",
"1902",
"1903",
"1904",
"1905",
"1906",
"1907",
"1908",
"1909",
"1910",
"1911",
"1912",
"1913",
"1914",
"1915",
"1916",
"1917",
"1918",
"1919",
):
return "1900-1919"
elif anno in ("1847", "1865", "1880", "1883", "1886"):
return "1840-1899"
else:
return "other"
# from sub-category to books' category
def rename(row):
if row["physical_format"] == "Brossura":
return "Brossura"
if row["physical_format"] == "Rilegato":
return "Copertina Rigida"
if row["physical_format"] == "Libro":
return "Tascabile"
if row["physical_format"] == "hardcover":
return "Copertina Rigida"
if row["physical_format"] == "Illustrato":
return "Copertina Rigida"
if row["physical_format"] == "Cartonato":
return "Copertina Rigida"
if row["physical_format"] == "paperback":
return "Tascabile"
if row["physical_format"] == "Paperback / softback":
return "Tascabile"
if row["physical_format"] == "[electronic resource]":
return "Ebook"
if row["physical_format"] == "Libro + altro":
return "Tascabile"
if row["physical_format"] == "Hardback":
return "Copertina Rigida"
if row["physical_format"] == "unknown binding":
return "Altro"
if row["physical_format"] == "Libro + CD-Rom":
return "Tascabile"
if row["physical_format"] == "board book":
return "Copertina Rigida"
if row["physical_format"] == "pamphlet":
return "Tascabile"
if row["physical_format"] == "Paperback":
return "Tascabile"
if row["physical_format"] == "calendar":
return "Spiralato"
if row["physical_format"] == "Tascabile":
return "Tascabile"
if row["physical_format"] == "map":
return "Tascabile"
if row["physical_format"] == "spiral-bound":
return "Spiralato"
if row["physical_format"] == "mass market paperback":
return "Tascabile"
if row["physical_format"] == "library binding":
return "Copertina Rigida"
if row["physical_format"] == "pop-up":
return "pop-up"
if row["physical_format"] == "turtleback":
return "Copertina Rigida"
if row["physical_format"] == "cards":
return "Tascabile"
if row["physical_format"] == "paperback":
return "Tascabile"
return "Other"
# from key words to books' subjects
def assignment_cat(row):
subject_old = row["subjects"].lower().strip().split(" ")
lista_viaggi = [
"viaggi",
"travel",
"turismo",
"holiday",
"places",
"place",
"guide",
"guidebooks",
"cartine",
"guides",
"foreign",
"museum",
"turistiche",
"world",
]
lista_arte = [
"art",
"arte",
"arts",
"buildings",
"pittura",
"photography",
"exhibitions",
"landscape",
"ceramics",
"music",
"urban",
"catalogs",
"museo",
"scultura",
"moda",
"symphony",
"design",
"fashion",
"architettura",
"beni",
"culturali",
"individual",
"architects",
"photographs",
"photographers",
"fotografia",
"cinema",
"musica",
"artists",
"viviani",
]
lista_sport = ["sport", "sports"]
lista_storia_filosofia = ["storia", "history", "filosofia"]
lista_biografie = ["biografie", "biographies", "biography"]
lista_istruzione = [
"english",
"grammatica",
"dizionari",
"vocabulary",
"translating",
"manual",
"manuals",
"lingue",
"languages",
"università",
"study",
"scuola",
"psycholinguistics",
]
lista_attualità_politica_economia = [
"società",
"politics",
"rights",
"philosophy",
"immigration",
"emigration",
"business",
"economia",
"finanza",
"management",
"marketing",
"politica",
"diritto",
"lavoro",
"econometrics",
]
lista_bamibini_ragazzi = [
"bambini",
"ragazzi",
"children",
"childrens",
"fumetti",
"babypreschool",
]
lista_narrativa = [
"fantasy",
"family",
"fiction",
"romance",
"mistery",
"crime",
"horror",
"gothic",
"readers",
"narrativa",
"gialli",
"noir",
"avventura",
"passione",
"sentimenti",
]
lista_letteratura = [
"letteratura",
"criticism",
"literature",
"drama",
"letterature",
"poetry",
"romanzi",
"tolstoy",
"bronte",
"austen",
"defoe",
"dickens",
]
lista_scienza = [
"scienza",
"ambiente",
"animali",
"geology",
"tecnologia",
"technology",
"science",
"physics",
"nature",
"informatica",
"web",
"machine",
"learning",
"computer",
"combustion",
"engine",
]
lista_religione = [
"religione",
"spiritualità",
"religion",
"gnosticism",
"mind",
"spirit",
"christian",
"bible",
"church",
]
lista_gastronomia_cucina = [
"gastronomia",
"cucina",
"cook",
"wine",
"salute",
"benessere",
"cookery",
]
lista_hobby = ["hobby", "tempo"]
lista_categorie = [
lista_viaggi,
lista_arte,
lista_sport,
lista_storia_filosofia,
lista_biografie,
lista_istruzione,
lista_attualità_politica_economia,
lista_bamibini_ragazzi,
lista_narrativa,
lista_letteratura,
lista_scienza,
lista_religione,
lista_gastronomia_cucina,
lista_hobby,
]
nome_categorie = [
"viaggi",
"arte",
"sport",
"storia e filosofia",
"biografie",
"istruzione",
"attualità,politica ed economia",
"bambini e ragazzi",
"narrativa",
"letteratura",
"scienza e tecnologia",
"religione",
"gastronomia e cucina",
"hobby e tempo libero",
]
dizionario = zip(nome_categorie, lista_categorie)
max_intersection = 0
categoria_risultante = ""
for nome, lista_parole in dizionario:
intersection = len(list(set(lista_parole) & set(subject_old)))
if intersection > max_intersection:
max_intersection = intersection
categoria_risultante = nome
return categoria_risultante
def analysis_data(data_set_path, corrected_year_path, corrected_category_path):
# enriched dataset
data = pd.read_csv(data_set_path)
# dataset with fixed year
con_mille = pd.read_excel(corrected_year_path)
# dataset with fixed subjects
categorie = pd.read_excel(corrected_category_path)
# not useful columns
data2 = data.drop(
labels=[
"level_0",
"index",
"isbn_13",
"language",
"description",
"mondadori_url",
"hoepli_url",
"languages",
],
axis=1,
)
# drop all rows with at least a null
data2 = data2.dropna()
# from string to categorical column
data2["physical_format"] = pd.Categorical(data2["physical_format"])
# drop unuseful books
data2 = data2[
(data2.physical_format != "Game")
& (data2.physical_format != "audio cassette")
& (data2.physical_format != "Audio CD")
& (data2.physical_format != "Long Playing Mix")
& (data2.physical_format != "audio cd")
& (data2.physical_format != "cd-rom")
& (data2.physical_format != "CD-ROM")
& (data2.physical_format != "unknown binding")
]
# associate a subject to each book based on key words
data2["physical_format"] = data2.apply(lambda row: rename(row), axis=1)
# assign subject to each book
data2["Categoria"] = data2.apply(lambda row: assignment_cat(row), axis=1)
b = data2[data2["Categoria"] == ""]
c = data2[data2["Categoria"] != ""]
# cancatenate datasets which subjects where assigned manually and automatically
finale = pd.concat([c, categorie])
# split date
a = finale["publish_date"].str.rsplit("/", n=1, expand=True)
# Prendo le colonne dello split precedente e con un ciclo assegno ai valori None della seconda colonna
# il corrispondente anno della prima colonna.
d = a[0].tolist()
e = a[1].tolist()
conta = 0
for item in e:
if item is None:
e[conta] = d[conta]
conta += 1
else:
conta += 1
# Aggiungo la colonna anno sfruttando la lista precedente.
finale["anno"] = e
# manually cleaned
senza_mille = finale[finale["anno"] != "1000"]
senza_mille.drop(["publish_date", "subjects"], axis=1, inplace=True)
mille = finale[finale["anno"] == "1000"]
finale = pd.concat([senza_mille, con_mille])
finale.drop(["publish_date", "subjects"], axis=1, inplace=True)
finale["anno"] = finale["anno"].astype("str")
# year to decade
finale["anno"] = finale.apply(lambda row: etichetta(row), axis=1)
# manually clean some incorrect price
finale["price"].iloc[479] = 1000.00
finale["price"].iloc[974] = 1000.00
finale["price"].iloc[1467] = 5000.00
# violin plot: number_of_pages~physical_format
sns.set(style="whitegrid")
ax = sns.violinplot(
data=finale,
x="physical_format",
y="number_of_pages",
palette="Set2",
split=True,
)
ax.set_xlabel("")
sns.set(rc={"figure.figsize": (15, 15)})
# violin plot: price~physical_format
finale["price"] = | pd.to_numeric(finale["price"], downcast="float") | pandas.to_numeric |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import json
import seaborn as sns
from sklearn.preprocessing import LabelEncoder, StandardScaler, OneHotEncoder
import random, sys, os
import tensorflow as tf
import pickle
np.random.seed(13)
random.seed(22)
tf.set_random_seed(13)
classification = False
DUMMY = 22
def RMSE(act, pred):
'''
accept two numpy arrays
'''
return np.sqrt(np.mean(np.square(act - pred)))
from scipy.stats import pearsonr
def Pearson(act, pred):
return pearsonr(act, pred)[0]
from scipy.stats import spearmanr
def Spearman(act, pred):
'''
Note: there is no need to use spearman correlation for now
'''
return spearmanr(act, pred)[0]
def Delta_t95(act, pred):
num95 = int(np.ceil(len(act) * 0.95))
return 2 * sorted(abs(act - pred))[num95 - 1]
def Delta_tr95(act, pred):
return Delta_t95(act, pred) / (max(act) - min(act))
def one_hot_dataset(dat, label_encoder, onehot_encoder, timesteps, num_input, middle = True):
oh_dat = np.zeros([len(dat), timesteps, num_input])
for c, el in enumerate(dat):
ie = label_encoder.transform(el)
#print(ie)
ie = ie.reshape(len(ie), 1)
oe = np.array(onehot_encoder.transform(ie))
#oh_dat[c, 0:oe.shape[0], :] = oe
if middle:
oh_dat[c, ((60-oe.shape[0])//2): ((60-oe.shape[0])//2)+oe.shape[0], :] = oe
else:
oh_dat[c, 0:oe.shape[0], :] = oe
return oh_dat
def int_dataset(dat, timesteps, num_input, middle = True):
oh_dat = (np.ones([len(dat), timesteps, 1], dtype=np.int32)*DUMMY).astype(np.int32)
cnt = 0
for c, row in dat.iterrows():
ie = np.array(row['encseq'])
oe = ie.reshape(len(ie), 1)
if middle:
oh_dat[cnt, ((60-oe.shape[0])//2): ((60-oe.shape[0])//2)+oe.shape[0], :] = oe
else:
oh_dat[cnt, 0:oe.shape[0], :] = oe
cnt += 1
return oh_dat
def count_dataset(dat, timesteps, num_input, middle=False):
ds = np.zeros([len(dat), num_input], dtype=np.int32)
cnt = 0
for c, row in dat.iterrows():
seq = row['encseq']
for v in seq:
ds[cnt, v] += 1
ds[cnt, -1] = np.sum(ds[cnt])
cnt += 1
if middle:
raise NotImplementedError
return ds
def scale(lab, min_lab, max_lab):
return (lab - min_lab) / (max_lab - min_lab)
def unscale(lab, min_lab, max_lab):
return (max_lab - min_lab) * lab + min_lab
#convert to int encoded dataset, tf one hot for training
def get_data_set(model_params, return_array=False, middle=False):
do_one_hot = False
reverse = False
intrinsic_test = model_params['train_file'] == model_params['test_file']
sname = 'Modified_sequence'
data=pd.read_pickle(model_params['train_file'])
data = data.sample(frac=1).reset_index(drop=True)
if not intrinsic_test:
data_test = pd.read_pickle(model_params['test_file'])
data = data.sample(frac=1).reset_index(drop=True)
test_from = len(data)
data = | pd.concat([data, data_test], ignore_index=True, sort=False) | pandas.concat |
# Ref: https://towardsdatascience.com/data-apps-with-pythons-streamlit-b14aaca7d083
#/app.py
import streamlit as st
import json
import requests
# import sys
# import os
import pandas as pd
import numpy as np
import re
from datetime import datetime as dt
from pandas_profiling import ProfileReport
from streamlit_pandas_profiling import st_profile_report
from matplotlib import pyplot as plt
import seaborn as sns
# Initial setup
st.set_page_config(layout="wide")
with open('./env_variable.json','r') as j:
json_data = json.load(j)
#SLACK_BEARER_TOKEN = os.environ.get('SLACK_BEARER_TOKEN') ## Get in setting of Streamlit Share
SLACK_BEARER_TOKEN = json_data['SLACK_BEARER_TOKEN']
DTC_GROUPS_URL = ('https://raw.githubusercontent.com/anhdanggit/atom-assignments/main/data/datacracy_groups.csv')
#st.write(json_data['SLACK_BEARER_TOKEN'])
@st.cache
def load_users_df():
# Slack API User Data
endpoint = "https://slack.com/api/users.list"
headers = {"Authorization": "Bearer {}".format(json_data['SLACK_BEARER_TOKEN'])}
response_json = requests.post(endpoint, headers=headers).json()
user_dat = response_json['members']
# Convert to CSV
user_dict = {'user_id':[],'name':[],'display_name':[],'real_name':[],'title':[],'is_bot':[]}
for i in range(len(user_dat)):
user_dict['user_id'].append(user_dat[i]['id'])
user_dict['name'].append(user_dat[i]['name'])
user_dict['display_name'].append(user_dat[i]['profile']['display_name'])
user_dict['real_name'].append(user_dat[i]['profile']['real_name_normalized'])
user_dict['title'].append(user_dat[i]['profile']['title'])
user_dict['is_bot'].append(int(user_dat[i]['is_bot']))
user_df = pd.DataFrame(user_dict)
# Read dtc_group hosted in github
dtc_groups = pd.read_csv(DTC_GROUPS_URL)
user_df = user_df.merge(dtc_groups, how='left', on='name')
return user_df
@st.cache
def load_channel_df():
endpoint2 = "https://slack.com/api/conversations.list"
data = {'types': 'public_channel,private_channel'} # -> CHECK: API Docs https://api.slack.com/methods/conversations.list/test
headers = {"Authorization": "Bearer {}".format(SLACK_BEARER_TOKEN)}
response_json = requests.post(endpoint2, headers=headers, data=data).json()
channel_dat = response_json['channels']
channel_dict = {'channel_id':[], 'channel_name':[], 'is_channel':[],'creator':[],'created_at':[],'topics':[],'purpose':[],'num_members':[]}
for i in range(len(channel_dat)):
channel_dict['channel_id'].append(channel_dat[i]['id'])
channel_dict['channel_name'].append(channel_dat[i]['name'])
channel_dict['is_channel'].append(channel_dat[i]['is_channel'])
channel_dict['creator'].append(channel_dat[i]['creator'])
channel_dict['created_at'].append(dt.fromtimestamp(float(channel_dat[i]['created'])))
channel_dict['topics'].append(channel_dat[i]['topic']['value'])
channel_dict['purpose'].append(channel_dat[i]['purpose']['value'])
channel_dict['num_members'].append(channel_dat[i]['num_members'])
channel_df = pd.DataFrame(channel_dict)
return channel_df
@st.cache(allow_output_mutation=True)
def load_msg_dict(user_df,channel_df):
endpoint3 = "https://slack.com/api/conversations.history"
headers = {"Authorization": "Bearer {}".format(SLACK_BEARER_TOKEN)}
msg_dict = {'channel_id':[],'msg_id':[], 'msg_ts':[], 'user_id':[], 'latest_reply':[],'reply_user_count':[],'reply_users':[],'github_link':[],'text':[]}
for channel_id, channel_name in zip(channel_df['channel_id'], channel_df['channel_name']):
print('Channel ID: {} - Channel Name: {}'.format(channel_id, channel_name))
try:
data = {"channel": channel_id}
response_json = requests.post(endpoint3, data=data, headers=headers).json()
msg_ls = response_json['messages']
for i in range(len(msg_ls)):
if 'client_msg_id' in msg_ls[i].keys():
msg_dict['channel_id'].append(channel_id)
msg_dict['msg_id'].append(msg_ls[i]['client_msg_id'])
msg_dict['msg_ts'].append(dt.fromtimestamp(float(msg_ls[i]['ts'])))
msg_dict['latest_reply'].append(dt.fromtimestamp(float(msg_ls[i]['latest_reply'] if 'latest_reply' in msg_ls[i].keys() else 0))) ## -> No reply: 1970-01-01
msg_dict['user_id'].append(msg_ls[i]['user'])
msg_dict['reply_user_count'].append(msg_ls[i]['reply_users_count'] if 'reply_users_count' in msg_ls[i].keys() else 0)
msg_dict['reply_users'].append(msg_ls[i]['reply_users'] if 'reply_users' in msg_ls[i].keys() else 0)
msg_dict['text'].append(msg_ls[i]['text'] if 'text' in msg_ls[i].keys() else 0)
## -> Censor message contains tokens
text = msg_ls[i]['text']
github_link = re.findall('(?:https?://)?(?:www[.])?github[.]com/[\w-]+/?', text)
msg_dict['github_link'].append(github_link[0] if len(github_link) > 0 else None)
except:
print('====> '+ str(response_json))
msg_df = pd.DataFrame(msg_dict)
return msg_df
def process_msg_data(msg_df, user_df, channel_df):
## Extract 2 reply_users
msg_df['reply_user1'] = msg_df['reply_users'].apply(lambda x: x[0] if x != 0 else '')
msg_df['reply_user2'] = msg_df['reply_users'].apply(lambda x: x[1] if x != 0 and len(x) > 1 else '')
## Merge to have a nice name displayed
msg_df = msg_df.merge(user_df[['user_id','name','DataCracy_role']].rename(columns={'name':'submit_name'}), \
how='left',on='user_id')
msg_df = msg_df.merge(user_df[['user_id','name']].rename(columns={'name':'reply1_name','user_id':'reply1_id'}), \
how='left', left_on='reply_user1', right_on='reply1_id')
msg_df = msg_df.merge(user_df[['user_id','name']].rename(columns={'name':'reply2_name','user_id':'reply2_id'}), \
how='left', left_on='reply_user2', right_on='reply2_id')
## Merge for nice channel name
msg_df = msg_df.merge(channel_df[['channel_id','channel_name','created_at']], how='left',on='channel_id')
## Format datetime cols
msg_df['created_at'] = msg_df['created_at'].dt.strftime('%Y-%m-%d')
msg_df['msg_date'] = msg_df['msg_ts'].dt.strftime('%Y-%m-%d')
msg_df['msg_time'] = msg_df['msg_ts'].dt.strftime('%H:%M')
msg_df['msg_weekday'] = msg_df['msg_ts'].dt.strftime('%w')
msg_df['msg_hour'] = msg_df['msg_ts'].dt.strftime('%H')
msg_df['wordcount'] = msg_df.text.apply(lambda s: len(s.split()))
return msg_df
def get_submission(p_msg_df, user_id):
'''Return 'channel_name', 'created_at','msg_date','msg_time','reply_user_count', 'reply1_name' of
submission
'''
## Submission
submit_df = p_msg_df[p_msg_df.channel_name.str.contains('assignment')]
submit_df = submit_df[submit_df.DataCracy_role.str.contains('Learner')]
submit_df = submit_df[submit_df.user_id == user_id]
latest_ts = submit_df.groupby(['channel_name', 'user_id']).msg_ts.idxmax() ## -> Latest ts
submit_df = submit_df.loc[latest_ts]
dis_cols1 = ['channel_name', 'created_at','msg_date','msg_weekday','msg_time','msg_hour','reply_user_count', 'reply1_name']
return(submit_df[dis_cols1])
def get_review(p_msg_df, user_id):
''''Return channel_name', 'created_at','msg_date','msg_time','reply_user_count','submit_name'
review
'''
# Review
review_df = p_msg_df[p_msg_df.user_id != user_id] ##-> Remove the case self-reply
review_df = p_msg_df[p_msg_df.channel_name.str.contains('assignment')]
review_df = review_df[review_df.DataCracy_role.str.contains('Learner')]
dis_cols2 = ['channel_name', 'created_at','msg_date','msg_time','reply_user_count','submit_name']
return(review_df [dis_cols2])
def get_discussion(p_msg_df):
'''''Return channel_name','msg_date', 'msg_time','wordcount','reply_user_count','reply1_name' of
discussion
'''
## Discussion
discuss_df = p_msg_df[p_msg_df.channel_name.str.contains('discuss')]
discuss_df = discuss_df.sort_values(['msg_date','msg_time'])
dis_cols3 = ['channel_name','msg_date', 'msg_time','wordcount','reply_user_count','reply1_name']
return(discuss_df[dis_cols3])
def get_report(p_msg_df, user_id):
'''''Return 'user_id', 'submit_name', 'DataCracy_role', 'submit_cnt', 'review_cnt', 'reviewed_rate', 'word_count',
'submit_weekday', 'submit_hour' of given user_id
report_cols = ['user_id', 'submit_name', 'DataCracy_role', 'submit_cnt', 'review_cnt', 'reviewed_rate', 'word_count',
'submit_weekday', 'submit_hour']
'''
### chỉ lấy những message liên quan user_id theo mức user_id là người tạo, hoặc là replier1/ repliers
filter_msg_df = p_msg_df[(p_msg_df.user_id == user_id) | (p_msg_df.reply_user1 == user_id) | (p_msg_df.reply_user2 == user_id)]
submit_df = get_submission(filter_msg_df, user_id)
review_df = get_review(filter_msg_df, user_id)
discuss_df = get_discussion(filter_msg_df)
## Thổng kê data
### Số assignment đã nộp: submit_cnt
### % bài review : review_cnt
### % bài được review : reviewed_rate
### Số workcount đã thảo luận: word_count
### Extract thứ trong tuần (weekday) của ngày nộp bài: submit_weekday
### Extract giờ trong ngày nộp bài (hour): submit_hour
filter_report_df = filter_msg_df[filter_msg_df['user_id'] == user_id].head(1)[['user_id','submit_name', 'DataCracy_role']]
submit_cnt = len(submit_df)
review_cnt = len(review_df)
reviewed_rate = round(100 * len(submit_df[submit_df.reply_user_count > 0])/submit_cnt if submit_cnt > 0 else 0, 2)
word_count = round(sum(discuss_df['wordcount']),2)
submit_weekday =round(submit_df['msg_weekday'].astype('int32').mean(),2)
submit_hour = round(submit_df['msg_hour'].astype('int32').mean(),2)
filter_report_df['submit_cnt'] = submit_cnt
filter_report_df['review_cnt'] = review_cnt
filter_report_df['reviewed_rate'] = reviewed_rate
filter_report_df['word_count'] = word_count
filter_report_df['submit_weekday'] = submit_weekday
filter_report_df['submit_hour'] = submit_hour
return (filter_report_df)
def get_Atom_report(msg_df, user_df, channel_df):
# Table data
# user_df = load_users_df()
# channel_df = load_channel_df()
# msg_df = load_msg_dict(user_df,channel_df)
# processing data
p_msg_df = process_msg_data(msg_df, user_df, channel_df)
report_df = pd.DataFrame()
for user_id in p_msg_df[p_msg_df['DataCracy_role'].str.contains('Learner') & p_msg_df['channel_name'].str.contains('assignment')]['user_id'].unique():
filter_report_df = get_report(p_msg_df, user_id)
report_df = report_df.append(filter_report_df, ignore_index=True)
return (report_df)
# def get_df(file):
# # get extension and read file
# extension = file.name.split('.')[1]
# if extension.upper() == 'CSV':
# df = pd.read_csv(file)
# elif extension.upper() == 'XLSX':
# df = pd.read_excel(file, engine='openpyxl')
# elif extension.upper() == 'PICKLE':
# df = pd.read_pickle(file)
# return df
# Function to explore the data
def summary(df, nrows = 5):
# DATA
#st.write('Data:')
#st.write(df.head(nrows))
# SUMMARY
df_types = | pd.DataFrame(df.dtypes, columns=['Data Type']) | pandas.DataFrame |
# Some utilites functions for loading the data, adding features
import numpy as np
import pandas as pd
from functools import reduce
from sklearn.preprocessing import MinMaxScaler
def load_csv(path):
"""Load dataframe from a csv file
Args:
path (STR): File path
"""
# Load the file
df = pd.read_csv(path)
# Lowercase column names
df.rename(columns=lambda x: x.lower().strip(), inplace=True)
return df
def fill_missing_values(df):
"""Fill the missing data points
Args:
df: Input dataframe
Return: the modified dataframe
"""
# Get datetime col
df['ds'] = pd.to_datetime(df['update_time']) + df['hour_id'].astype('timedelta64[h]')
pdlist = []
for z in df.zone_code.unique():
zone = df[df['zone_code'] == z]
r = pd.date_range(zone.ds.min(), zone.ds.max(), freq='H')
ds_range = pd.DataFrame({'ds': r, 'zone_code': z})
zone_merged = ds_range.merge(zone, how='left', on=['ds', 'zone_code'])
zone_merged['hour_id'] = zone_merged['ds'].dt.hour
# Fill the null values
for col in ['bandwidth_total', 'max_user']:
for index, row in zone_merged[zone_merged[col].isnull()].iterrows():
shifted_index = index - (24*7)
flag = True
while flag:
fill_val = zone_merged.loc[shifted_index, col]
if pd.isnull(fill_val):
shifted_index -= (24*7)
continue
zone_merged.loc[index, col] = fill_val
flag = False
pdlist.append(zone_merged)
out = pd.concat(pdlist)
out.drop(['update_time'], axis=1, inplace=True)
assert not out.isnull().values.any(), 'Error in asserting. There are still nans.'
return out
def add_time_features(df, test=False):
"""Add time features for the data
Args:
df (DataFrame): Input dataframe
Return: the modified df
"""
if test:
df['ds'] = pd.to_datetime(df['update_time']) + df['hour_id'].astype('timedelta64[h]')
else:
df['update_time'] = df['ds'].dt.date
df['dow'] = df['ds'].dt.dayofweek
df['month'] = df['ds'].dt.month
df['doy'] = df['ds'].dt.dayofyear
df['year'] = df['ds'].dt.year
df['day'] = df['ds'].dt.day
df['week'] = df['ds'].dt.week
# df['weekend'] = df['dow'] // 5 == 1
# Normalise day of week col
week_period = 7 / (2 * np.pi)
df['dow_norm'] = df.dow.values / week_period
return df
def add_time_periods(df):
"""Add time periods of a day
Args:
df (DataFrame): Input dataframe
Return: the modified df
"""
df['hour_id'] = pd.to_numeric(df['hour_id'])
conditions = [
(df['hour_id'] >= 21) | (df['hour_id'] < 1),
(df['hour_id'] >= 1) & (df['hour_id'] < 6),
(df['hour_id'] >= 6) & (df['hour_id'] < 11),
(df['hour_id'] >= 11) & (df['hour_id'] < 14),
(df['hour_id'] >= 14) & (df['hour_id'] < 17),
(df['hour_id'] >= 17) & (df['hour_id'] < 19),
(df['hour_id'] >= 19) & (df['hour_id'] < 21),
]
choices = ['21h-1h', '1h-6h', '6h-11h', '11h-14h', '14h-17h', '17h-19h', '19h-21h']
df['time_period'] = 'default'
for cond, ch in zip(conditions, choices):
df.loc[cond, 'time_period'] = ch
return df
def add_special_days_features(df):
"""Add special events and holidays features
Args:
df (DataFrame): Input dataframe
Return: the modified df
"""
# Days when there were sudden decrease/increase in bandwidth/max users
range1 = pd.date_range('2018-02-10', '2018-02-27')
range2 = pd.date_range('2019-01-30', '2019-02-12')
abnormals = range1.union(range2)
# Init 2 new columns
df['abnormal_bw'], df['abnormal_u'] = 0,0
# Set the abnormal weights for each zone (negative if decrease, positive if increase)
# For total bandwidth
df.loc[df['zone_code'].isin(['ZONE01']) ,'abnormal_bw'] = df[df['zone_code'].isin(['ZONE01'])].update_time.apply(lambda date: -1 if pd.to_datetime(date) in abnormals else 0)
df.loc[df['zone_code'].isin(['ZONE02']) ,'abnormal_bw'] = df[df['zone_code'].isin(['ZONE02'])].update_time.apply(lambda date: 0.8 if pd.to_datetime(date) in abnormals else 0)
df.loc[df['zone_code'].isin(['ZONE03']) ,'abnormal_bw'] = df[df['zone_code'].isin(['ZONE03'])].update_time.apply(lambda date: 0.2 if pd.to_datetime(date) in abnormals else 0)
# For max users
df.loc[df['zone_code'].isin(['ZONE01']) ,'abnormal_u'] = df[df['zone_code'].isin(['ZONE01'])].update_time.apply(lambda date: -1 if pd.to_datetime(date) in abnormals else 0)
df.loc[df['zone_code'].isin(['ZONE02']) ,'abnormal_u'] = df[df['zone_code'].isin(['ZONE02'])].update_time.apply(lambda date: 0.8 if pd.to_datetime(date) in abnormals else 0)
df.loc[df['zone_code'].isin(['ZONE03']) ,'abnormal_u'] = df[df['zone_code'].isin(['ZONE03'])].update_time.apply(lambda date: 0.6 if pd.to_datetime(date) in abnormals else 0)
# Holidays
holidays = pd.to_datetime(['2018-01-01', '2017-12-23', '2017-12-24', '2017-12-25',
'2018-02-14', '2018-02-15', '2018-02-16', '2018-02-17', '2018-02-18', '2018-02-19', '2018-02-20',
'2018-03-27', '2018-04-30', '2018-05-01', '2018-09-02', '2018-09-03', '2018-12-31',
'2019-01-01', '2019-02-04', '2019-02-05', '2019-02-06', '2019-02-07', '2019-02-08',
'2019-04-15',
'2019-04-29', '2019-04-30', '2019-05-01', '2019-09-02',
])
df['holiday'] = df.update_time.apply(lambda date: 1 if pd.to_datetime(date) in holidays else 0)
return df
def zone_features(df, zfeatures, aufeatures):
"""Create zone features from the data
Args:
df (DataFrame): Input dataframe
zfeatures (list): List of zone median features
aufeatures (list): List of zone autocorr features
Return: 2 dataframes
"""
# Medians from the last 1,3,6,12 months
zones_1y = df[(df['ds'] >= '2018-03-09') & (df['ds'] < '2019-03-10')].groupby(['zone_code'], as_index=False).agg({
'max_user': 'median',
'bandwidth_total': 'median'
})
zones_1y.columns = ['zone_code','median_user_1y','median_bw_1y']
zones_1y['median_bw_per_user_1y'] = zones_1y['median_bw_1y'] / zones_1y['median_user_1y']
zones_1m = df[(df['ds'] >= '2019-02-09') & (df['ds'] < '2019-03-10')].groupby(['zone_code'], as_index=False).agg({
'max_user': 'median',
'bandwidth_total': 'median'
})
zones_1m.columns = ['zone_code','median_user_1m','median_bw_1m']
zones_1m['median_bw_per_user_1m'] = zones_1m['median_bw_1m'] / zones_1m['median_user_1m']
zones_3m = df[(df['ds'] >= '2018-12-09') & (df['ds'] < '2019-03-10')].groupby(['zone_code'], as_index=False).agg({
'max_user': 'median',
'bandwidth_total': 'median'
})
zones_3m.columns = ['zone_code','median_user_3m','median_bw_3m']
zones_3m['median_bw_per_user_3m'] = zones_3m['median_bw_3m'] / zones_3m['median_user_3m']
zones_6m = df[(df['ds'] >= '2018-09-09') & (df['ds'] < '2019-03-10')].groupby(['zone_code'], as_index=False).agg({
'max_user': 'median',
'bandwidth_total': 'median'
})
zones_6m.columns = ['zone_code','median_user_6m','median_bw_6m']
zones_6m['median_bw_per_user_6m'] = zones_6m['median_bw_6m'] / zones_6m['median_user_6m']
# Autocorrelation features
zones_autocorr = df[(df['ds'] >= '2018-12-09') & (df['ds'] < '2019-03-10')].groupby(['zone_code'], as_index=False).agg({
'max_user': {
'lag_user_1d' :lambda x: pd.Series.autocorr(x, 24),
'lag_user_3d' :lambda x: pd.Series.autocorr(x, 3*24),
'lag_user_1w' :lambda x: pd.Series.autocorr(x, 24*7),
},
'bandwidth_total': {
'lag_bw_1d' :lambda x: | pd.Series.autocorr(x, 24) | pandas.Series.autocorr |
import os
import jpeg4py as jpeg
import numpy as np
import pandas as pd
import torch
from pathlib import Path
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
from utils import make_mask
class TrainDataset(Dataset):
def __init__(self, df, data_folder, phase, transforms, num_classes, return_fnames):
self.df = df
self.root = data_folder
self.phase = phase
self.transforms = transforms
self.fnames = self.df.index.tolist()
self.num_classes = num_classes
self.return_fnames = return_fnames
def __getitem__(self, idx):
image_id, mask = make_mask(idx, self.df)
image_path = expand_path(image_id)
img = jpeg.JPEG(str(image_path)).decode()
augmented = self.transforms(image=img, mask=mask)
img = augmented['image']
mask = augmented['mask'] # 1x256x1600x4
mask = mask[0].permute(2, 0, 1) # 1x4x256x1600
if self.num_classes == 5:
mask_0 = (mask.sum(axis=0, keepdims=True) == 0).float()
mask = torch.cat([mask_0, mask], axis=0)
if self.return_fnames:
return img, mask, image_id
else:
return img, mask
def __len__(self):
return len(self.fnames)
class ClsTrainDataset(Dataset):
def __init__(self, df, data_folder, phase, transforms, num_classes=4, return_fnames=False):
self.df = df
self.root = data_folder
self.phase = phase
self.transforms = transforms
self.fnames = self.df.index.tolist()
self.num_classes = num_classes
self.return_fnames = return_fnames
def __getitem__(self, idx):
image_id = self.df.iloc[idx].name
if self.num_classes == 4:
label = self.df.iloc[idx, :4].notnull().values.astype('f')
else:
label = np.zeros(5)
label[1:5] = self.df.iloc[idx, :4].notnull()
label[0] = label[1:5].sum() <= 0
label = label.astype('f')
image_path = os.path.join(self.root, image_id)
img = jpeg.JPEG(image_path).decode()
augmented = self.transforms(image=img)
img = augmented['image']
if self.return_fnames:
return img, label, image_id
else:
return img, label
def __len__(self):
return len(self.fnames)
class TestDataset(Dataset):
def __init__(self, root, df, transforms):
self.root = root
df['ImageId'] = df['ImageId_ClassId'].apply(lambda x: x.split('_')[0])
self.fnames = df['ImageId'].unique().tolist()
self.num_samples = len(self.fnames)
self.transforms = transforms
def __getitem__(self, idx):
fname = self.fnames[idx]
image_path = os.path.join(self.root, fname)
img = jpeg.JPEG(image_path).decode()
images = self.transforms(image=img)["image"]
return fname, images
def __len__(self):
return self.num_samples
class FilteredTestDataset(Dataset):
def __init__(self, root, df, transform):
self.root = root
df = df[(df > 0.5).sum(axis=1) > 0] # screen no defect images
self.fnames = df.index.tolist()
self.num_samples = len(self.fnames)
self.transform = transform
def __getitem__(self, idx):
fname = self.fnames[idx]
image_path = os.path.join(self.root, fname)
img = jpeg.JPEG(image_path).decode()
images = self.transform(image=img)["image"]
return fname, images
def __len__(self):
return self.num_samples
def expand_path(p):
train_dir = Path('../Datasets/caliche/severstal-steel-defect-detection/train_images')
test_dir = Path('../Datasets/caliche/severstal-steel-defect-detection/test_images')
if (train_dir / p).exists():
return train_dir / p
elif (test_dir / p).exists():
return test_dir / p
def make_loader(
data_folder,
df_path,
phase,
batch_size=8,
num_workers=2,
idx_fold=None,
transforms=None,
num_classes=4,
pseudo_label_path=None,
task='seg', # choice of ['cls', 'seg'],
return_fnames=False,
debug=False,
):
if debug:
num_rows = 100
else:
num_rows = None
df = pd.read_csv(df_path, nrows=num_rows)
if phase == 'test':
image_dataset = TestDataset(data_folder, df, transforms)
is_shuffle = False
elif phase == 'filtered_test':
df = pd.read_csv(df_path, nrows=num_rows, index_col=0)
image_dataset = FilteredTestDataset(data_folder, df, transforms)
is_shuffle = False
else: # train or valid
if os.path.exists('folds.csv'):
folds = pd.read_csv('folds.csv', index_col='ImageId', nrows=num_rows)
else:
raise Exception('You need to run split_folds.py beforehand.')
if phase == "train":
folds = folds[folds['fold'] != idx_fold]
if os.path.exists(pseudo_label_path):
pseudo_df = pd.read_csv(pseudo_label_path)
pseudo_df['ImageId'], pseudo_df['ClassId'] = zip(*pseudo_df['ImageId_ClassId'].str.split('_'))
pseudo_df['ClassId'] = pseudo_df['ClassId'].astype(int)
pseudo_df['exists'] = pseudo_df['EncodedPixels'].notnull().astype(int)
pseudo_df['ClassId0'] = [row.ClassId if row.exists else 0 for row in pseudo_df.itertuples()]
pv_df = pseudo_df.pivot(index='ImageId', columns='ClassId', values='EncodedPixels')
folds = | pd.concat([folds, pv_df], axis=0) | pandas.concat |
import pandas as pd
import numpy as np
import datetime
import sys
import time
import xgboost as xgb
from add_feture import *
FEATURE_EXTRACTION_SLOT = 10
LabelDay = datetime.datetime(2014,12,18,0,0,0)
Data = pd.read_csv("../../../../data/fresh_comp_offline/drop1112_sub_item.csv")
Data['daystime'] = Data['days'].map(lambda x: time.strptime(x, "%Y-%m-%d")).map(lambda x: datetime.datetime(*x[:6]))
def get_train(train_user,end_time):
# 取出label day 前一天的记录作为打标记录
data_train = train_user[(train_user['daystime'] == (end_time-datetime.timedelta(days=1)))]#&((train_user.behavior_type==3)|(train_user.behavior_type==2))
# 训练样本中,删除重复的样本
data_train = data_train.drop_duplicates(['user_id', 'item_id'])
data_train_ui = data_train['user_id'] / data_train['item_id']
# print(len(data_train))
# 使用label day 的实际购买情况进行打标
data_label = train_user[train_user['daystime'] == end_time]
data_label_buy = data_label[data_label['behavior_type'] == 4]
data_label_buy_ui = data_label_buy['user_id'] / data_label_buy['item_id']
# 对前一天的交互记录进行打标
data_train_labeled = data_train_ui.isin(data_label_buy_ui)
dict = {True: 1, False: 0}
data_train_labeled = data_train_labeled.map(dict)
data_train['label'] = data_train_labeled
return data_train[['user_id', 'item_id','item_category', 'label']]
def get_label_testset(train_user,LabelDay):
# 测试集选为上一天所有的交互数据
data_test = train_user[(train_user['daystime'] == LabelDay)]#&((train_user.behavior_type==3)|(train_user.behavior_type==2))
data_test = data_test.drop_duplicates(['user_id', 'item_id'])
return data_test[['user_id', 'item_id','item_category']]
def item_category_feture(data,end_time,beforeoneday):
# data = Data[(Data['daystime']<LabelDay) & (Data['daystime']>LabelDay-datetime.timedelta(days=FEATURE_EXTRACTION_SLOT))]
item_count = pd.crosstab(data.item_category,data.behavior_type)
item_count_before5=None
if (((end_time-datetime.timedelta(days=5))<datetime.datetime(2014,12,13,0,0,0))&((end_time-datetime.timedelta(days=5))>datetime.datetime(2014,12,10,0,0,0))):
beforefiveday = data[data['daystime']>=end_time-datetime.timedelta(days=5+2)]
item_count_before5 = pd.crosstab(beforefiveday.item_category,beforefiveday.behavior_type)
else:
beforefiveday = data[data['daystime']>=end_time-datetime.timedelta(days=5)]
item_count_before5 = pd.crosstab(beforefiveday.item_category,beforefiveday.behavior_type)
item_count_before_3=None
if (((end_time-datetime.timedelta(days=5))<datetime.datetime(2014,12,13,0,0,0))&((end_time-datetime.timedelta(days=5))>datetime.datetime(2014,12,10,0,0,0))):
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=3+2)]
item_count_before_3 = pd.crosstab(beforethreeday.item_category,beforethreeday.behavior_type)
else:
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=3)]
item_count_before_3 = pd.crosstab(beforethreeday.item_category,beforethreeday.behavior_type)
item_count_before_2=None
if (((end_time-datetime.timedelta(days=5))<datetime.datetime(2014,12,13,0,0,0))&((end_time-datetime.timedelta(days=5))>datetime.datetime(2014,12,10,0,0,0))):
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=7+2)]
item_count_before_2 = pd.crosstab(beforethreeday.item_category,beforethreeday.behavior_type)
else:
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=7)]
item_count_before_2 = pd.crosstab(beforethreeday.item_category,beforethreeday.behavior_type)
# beforeoneday = Data[Data['daystime'] == LabelDay-datetime.timedelta(days=1)]
beforeonedayitem_count = pd.crosstab(beforeoneday.item_category,beforeoneday.behavior_type)
countAverage = item_count/FEATURE_EXTRACTION_SLOT
buyRate = pd.DataFrame()
buyRate['click'] = item_count[1]/item_count[4]
buyRate['skim'] = item_count[2]/item_count[4]
buyRate['collect'] = item_count[3]/item_count[4]
buyRate.index = item_count.index
buyRate_2 = pd.DataFrame()
buyRate_2['click'] = item_count_before5[1]/item_count_before5[4]
buyRate_2['skim'] = item_count_before5[2]/item_count_before5[4]
buyRate_2['collect'] = item_count_before5[3]/item_count_before5[4]
buyRate_2.index = item_count_before5.index
buyRate_3 = pd.DataFrame()
buyRate_3['click'] = item_count_before_3[1]/item_count_before_3[4]
buyRate_3['skim'] = item_count_before_3[2]/item_count_before_3[4]
buyRate_3['collect'] = item_count_before_3[3]/item_count_before_3[4]
buyRate_3.index = item_count_before_3.index
buyRate = buyRate.replace([np.inf, -np.inf], 0)
buyRate_2 = buyRate_2.replace([np.inf, -np.inf], 0)
buyRate_3 = buyRate_3.replace([np.inf, -np.inf], 0)
item_category_feture = pd.merge(item_count,beforeonedayitem_count,how='left',right_index=True,left_index=True)
item_category_feture = pd.merge(item_category_feture,countAverage,how='left',right_index=True,left_index=True)
item_category_feture = pd.merge(item_category_feture,buyRate,how='left',right_index=True,left_index=True)
item_category_feture = pd.merge(item_category_feture,item_count_before5,how='left',right_index=True,left_index=True)
item_category_feture = pd.merge(item_category_feture,item_count_before_3,how='left',right_index=True,left_index=True)
item_category_feture = pd.merge(item_category_feture,item_count_before_2,how='left',right_index=True,left_index=True)
# item_category_feture = pd.merge(item_category_feture,buyRate_2,how='left',right_index=True,left_index=True)
# item_category_feture = pd.merge(item_category_feture,buyRate_3,how='left',right_index=True,left_index=True)
item_category_feture.fillna(0,inplace=True)
return item_category_feture
def item_id_feture(data,end_time,beforeoneday):
# data = Data[(Data['daystime']<LabelDay) & (Data['daystime']>LabelDay-datetime.timedelta(days=FEATURE_EXTRACTION_SLOT))]
item_count = pd.crosstab(data.item_id,data.behavior_type)
item_count_before5=None
if (((end_time-datetime.timedelta(days=5))<datetime.datetime(2014,12,13,0,0,0))&((end_time-datetime.timedelta(days=5))>datetime.datetime(2014,12,10,0,0,0))):
beforefiveday = data[data['daystime']>=end_time-datetime.timedelta(days=5+2)]
item_count_before5 = pd.crosstab(beforefiveday.item_id,beforefiveday.behavior_type)
else:
beforefiveday = data[data['daystime']>=end_time-datetime.timedelta(days=5)]
item_count_before5 = pd.crosstab(beforefiveday.item_id,beforefiveday.behavior_type)
item_count_before_3=None
if (((end_time-datetime.timedelta(days=5))<datetime.datetime(2014,12,13,0,0,0))&((end_time-datetime.timedelta(days=5))>datetime.datetime(2014,12,10,0,0,0))):
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=3+2)]
item_count_before_3 = pd.crosstab(beforethreeday.item_id,beforethreeday.behavior_type)
else:
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=3)]
item_count_before_3 = pd.crosstab(beforethreeday.item_id,beforethreeday.behavior_type)
item_count_before_2=None
if (((end_time-datetime.timedelta(days=5))<datetime.datetime(2014,12,13,0,0,0))&((end_time-datetime.timedelta(days=5))>datetime.datetime(2014,12,10,0,0,0))):
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=7+2)]
item_count_before_2 = pd.crosstab(beforethreeday.item_id,beforethreeday.behavior_type)
else:
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=7)]
item_count_before_2 = pd.crosstab(beforethreeday.item_id,beforethreeday.behavior_type)
item_count_unq = data.groupby(by = ['item_id','behavior_type']).agg({"user_id":lambda x:x.nunique()});item_count_unq = item_count_unq.unstack()
# beforeoneday = Data[Data['daystime'] == LabelDay-datetime.timedelta(days=1)]
beforeonedayitem_count = pd.crosstab(beforeoneday.item_id,beforeoneday.behavior_type)
countAverage = item_count/FEATURE_EXTRACTION_SLOT
buyRate = pd.DataFrame()
buyRate['click'] = item_count[1]/item_count[4]
buyRate['skim'] = item_count[2]/item_count[4]
buyRate['collect'] = item_count[3]/item_count[4]
buyRate.index = item_count.index
buyRate_2 = pd.DataFrame()
buyRate_2['click'] = item_count_before5[1]/item_count_before5[4]
buyRate_2['skim'] = item_count_before5[2]/item_count_before5[4]
buyRate_2['collect'] = item_count_before5[3]/item_count_before5[4]
buyRate_2.index = item_count_before5.index
buyRate_3 = pd.DataFrame()
buyRate_3['click'] = item_count_before_3[1]/item_count_before_3[4]
buyRate_3['skim'] = item_count_before_3[2]/item_count_before_3[4]
buyRate_3['collect'] = item_count_before_3[3]/item_count_before_3[4]
buyRate_3.index = item_count_before_3.index
buyRate = buyRate.replace([np.inf, -np.inf], 0)
buyRate_2 = buyRate_2.replace([np.inf, -np.inf], 0)
buyRate_3 = buyRate_3.replace([np.inf, -np.inf], 0)
item_id_feture = pd.merge(item_count,beforeonedayitem_count,how='left',right_index=True,left_index=True)
item_id_feture = pd.merge(item_id_feture,countAverage,how='left',right_index=True,left_index=True)
item_id_feture = pd.merge(item_id_feture,buyRate,how='left',right_index=True,left_index=True)
item_id_feture = pd.merge(item_id_feture,item_count_unq,how='left',right_index=True,left_index=True)
item_id_feture = pd.merge(item_id_feture,item_count_before5,how='left',right_index=True,left_index=True)
item_id_feture = pd.merge(item_id_feture,item_count_before_3,how='left',right_index=True,left_index=True)
item_id_feture = pd.merge(item_id_feture,item_count_before_2,how='left',right_index=True,left_index=True)
# item_id_feture = pd.merge(item_id_feture,buyRate_2,how='left',right_index=True,left_index=True)
# item_id_feture = pd.merge(item_id_feture,buyRate_3,how='left',right_index=True,left_index=True)
item_id_feture.fillna(0,inplace=True)
return item_id_feture
def user_id_feture(data,end_time,beforeoneday):
# data = Data[(Data['daystime']<LabelDay) & (Data['daystime']>LabelDay-datetime.timedelta(days=FEATURE_EXTRACTION_SLOT))]
user_count = pd.crosstab(data.user_id,data.behavior_type)
user_count_before5=None
if (((end_time-datetime.timedelta(days=5))<datetime.datetime(2014,12,13,0,0,0))&((end_time-datetime.timedelta(days=5))>datetime.datetime(2014,12,10,0,0,0))):
beforefiveday = data[data['daystime']>=end_time-datetime.timedelta(days=5+2)]
user_count_before5 = pd.crosstab(beforefiveday.user_id,beforefiveday.behavior_type)
else:
beforefiveday = data[data['daystime']>=end_time-datetime.timedelta(days=5)]
user_count_before5 = pd.crosstab(beforefiveday.user_id,beforefiveday.behavior_type)
user_count_before_3=None
if (((end_time-datetime.timedelta(days=5))<datetime.datetime(2014,12,13,0,0,0))&((end_time-datetime.timedelta(days=5))>datetime.datetime(2014,12,10,0,0,0))):
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=3+2)]
user_count_before_3 = pd.crosstab(beforethreeday.user_id,beforethreeday.behavior_type)
else:
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=3)]
user_count_before_3 = pd.crosstab(beforethreeday.user_id,beforethreeday.behavior_type)
user_count_before_2=None
if (((end_time-datetime.timedelta(days=5))<datetime.datetime(2014,12,13,0,0,0))&((end_time-datetime.timedelta(days=5))>datetime.datetime(2014,12,10,0,0,0))):
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=7+2)]
user_count_before_2 = pd.crosstab(beforethreeday.user_id,beforethreeday.behavior_type)
else:
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=7)]
user_count_before_2 = pd.crosstab(beforethreeday.user_id,beforethreeday.behavior_type)
# beforeoneday = Data[Data['daystime'] == LabelDay-datetime.timedelta(days=1)]
beforeonedayuser_count = pd.crosstab(beforeoneday.user_id,beforeoneday.behavior_type)
countAverage = user_count/FEATURE_EXTRACTION_SLOT
buyRate = pd.DataFrame()
buyRate['click'] = user_count[1]/user_count[4]
buyRate['skim'] = user_count[2]/user_count[4]
buyRate['collect'] = user_count[3]/user_count[4]
buyRate.index = user_count.index
buyRate_2 = pd.DataFrame()
buyRate_2['click'] = user_count_before5[1]/user_count_before5[4]
buyRate_2['skim'] = user_count_before5[2]/user_count_before5[4]
buyRate_2['collect'] = user_count_before5[3]/user_count_before5[4]
buyRate_2.index = user_count_before5.index
buyRate_3 = pd.DataFrame()
buyRate_3['click'] = user_count_before_3[1]/user_count_before_3[4]
buyRate_3['skim'] = user_count_before_3[2]/user_count_before_3[4]
buyRate_3['collect'] = user_count_before_3[3]/user_count_before_3[4]
buyRate_3.index = user_count_before_3.index
buyRate = buyRate.replace([np.inf, -np.inf], 0)
buyRate_2 = buyRate_2.replace([np.inf, -np.inf], 0)
buyRate_3 = buyRate_3.replace([np.inf, -np.inf], 0)
long_online = pd.pivot_table(beforeoneday,index=['user_id'],values=['hours'],aggfunc=[np.min,np.max,np.ptp])
user_id_feture = pd.merge(user_count,beforeonedayuser_count,how='left',right_index=True,left_index=True)
user_id_feture = pd.merge(user_id_feture,countAverage,how='left',right_index=True,left_index=True)
user_id_feture = | pd.merge(user_id_feture,buyRate,how='left',right_index=True,left_index=True) | pandas.merge |
import sys, os
sys.path.insert(1, "../")
sys.path.append("../../../")
sys.path.append("../../../competitors/AIF360/")
import numpy as np
np.random.seed(0)
from aif360.datasets import SalaryDataset, BinaryLabelDataset, StructuredDataset
from aif360.metrics import BinaryLabelDatasetMetric
from aif360.metrics import ClassificationMetric
from aif360.algorithms.inprocessing.adversarial_debiasing import AdversarialDebiasing
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler, MaxAbsScaler
from sklearn.metrics import accuracy_score
import tensorflow as tf
import load_salary as load_file
dist = load_file.dist
perm = int(sys.argv[1])
ordering = load_file.permutations(perm)
biased_test_points = np.load(f"{os.path.dirname(os.path.realpath(__file__))}/../../salary/salary_biased_points_dist{dist}.npy")
debiased_test = bool(int(sys.argv[2]))
dataset_orig = SalaryDataset(
protected_attribute_names=['sex'],
privileged_classes=[[1]],
normalized = False,
permute=perm
)
train_examples = 40
dataset_orig_train, dataset_orig_test = dataset_orig.split([train_examples], shuffle=False)
assert(len(dataset_orig_train.convert_to_dataframe()[0]) == train_examples)
if debiased_test:
test_points = np.array(ordering[train_examples:])
mask = np.in1d(test_points, biased_test_points) # True if the point is biased
mask_new = ~mask
x = mask_new.astype(int).nonzero()[0]
dataset_orig_test = dataset_orig_test.subset(x)
assert(len(dataset_orig_test.convert_to_dataframe()[0]) < 52 - train_examples)
else:
assert(len(dataset_orig_test.convert_to_dataframe()[0]) == 52 - train_examples)
privileged_groups = [{'sex': 1}]
unprivileged_groups = [{'sex': 0}]
min_max_scaler = MaxAbsScaler()
dataset_orig_train.features = min_max_scaler.fit_transform(dataset_orig_train.features)
dataset_orig_test.features = min_max_scaler.transform(dataset_orig_test.features)
sess = tf.Session()
debiased_model = AdversarialDebiasing(privileged_groups = privileged_groups,
unprivileged_groups = unprivileged_groups,
scope_name='debiased_classifier',
debias=True,
sess=sess, num_epochs=200)
debiased_model.fit(dataset_orig_train)
dataset_debiasing_train = debiased_model.predict(dataset_orig_train)
dataset_debiasing_test = debiased_model.predict(dataset_orig_test)
classified_metric_debiasing_test = ClassificationMetric(dataset_orig_test,
dataset_debiasing_test,
unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups)
classified_metric_debiasing_train = ClassificationMetric(dataset_orig_train,
dataset_debiasing_train,
unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups)
train_acc = classified_metric_debiasing_train.accuracy()
test_acc = classified_metric_debiasing_test.accuracy()
# import ipdb; ipdb.set_trace()
# if dataset_orig_test.convert_to_dataframe()[0]
diff = classified_metric_debiasing_test.statistical_parity_difference()
def find_discm_examples(class0_data, class1_data, print_file, scheme):
import pandas as pd
assert class0_data.shape[0] == class1_data.shape[0]
cols = ['sex','rank','year','degree','Experience']
df0 = | pd.DataFrame(data=class0_data, columns=cols, dtype='float') | pandas.DataFrame |
#!/usr/bin/python
# -*-coding: utf-8 -*-
# Author: <NAME>
# Email : <EMAIL>
def EffectSizeDataFramePlotter(EffectSizeDataFrame, **plot_kwargs):
"""
Custom function that creates an estimation plot from an EffectSizeDataFrame.
Keywords
--------
EffectSizeDataFrame: A `dabest` EffectSizeDataFrame object.
**plot_kwargs:
color_col=None
raw_marker_size=6, es_marker_size=9,
swarm_label=None, contrast_label=None,
swarm_ylim=None, contrast_ylim=None,
custom_palette=None, swarm_desat=0.5, halfviolin_desat=1,
halfviolin_alpha=0.8,
float_contrast=True,
show_pairs=True,
group_summaries=None,
group_summaries_offset=0.1,
fig_size=None,
dpi=100,
ax=None,
swarmplot_kwargs=None,
violinplot_kwargs=None,
slopegraph_kwargs=None,
reflines_kwargs=None,
group_summary_kwargs=None,
legend_kwargs=None,
"""
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
from .misc_tools import merge_two_dicts
from .plot_tools import halfviolin, get_swarm_spans, gapped_lines
from ._stats_tools.effsize import _compute_standardizers, _compute_hedges_correction_factor
import logging
# Have to disable logging of warning when get_legend_handles_labels()
# tries to get from slopegraph.
logging.disable(logging.WARNING)
# Save rcParams that I will alter, so I can reset back.
original_rcParams = {}
_changed_rcParams = ['axes.grid']
for parameter in _changed_rcParams:
original_rcParams[parameter] = plt.rcParams[parameter]
plt.rcParams['axes.grid'] = False
ytick_color = plt.rcParams["ytick.color"]
axes_facecolor = plt.rcParams['axes.facecolor']
dabest_obj = EffectSizeDataFrame.dabest_obj
plot_data = EffectSizeDataFrame._plot_data
xvar = EffectSizeDataFrame.xvar
yvar = EffectSizeDataFrame.yvar
is_paired = EffectSizeDataFrame.is_paired
all_plot_groups = dabest_obj._all_plot_groups
idx = dabest_obj.idx
# Disable Gardner-Altman plotting if any of the idxs comprise of more than
# two groups.
float_contrast = plot_kwargs["float_contrast"]
effect_size_type = EffectSizeDataFrame.effect_size
if len(idx) > 1 or len(idx[0]) > 2:
float_contrast = False
if effect_size_type in ['cliffs_delta']:
float_contrast = False
# Disable slopegraph plotting if any of the idxs comprise of more than
# two groups.
if np.all([len(i)==2 for i in idx]) is False:
is_paired = False
# if paired is False, set show_pairs as False.
if is_paired is False:
show_pairs = False
else:
show_pairs = plot_kwargs["show_pairs"]
# Set default kwargs first, then merge with user-dictated ones.
default_swarmplot_kwargs = {'size': plot_kwargs["raw_marker_size"]}
if plot_kwargs["swarmplot_kwargs"] is None:
swarmplot_kwargs = default_swarmplot_kwargs
else:
swarmplot_kwargs = merge_two_dicts(default_swarmplot_kwargs,
plot_kwargs["swarmplot_kwargs"])
# Violinplot kwargs.
default_violinplot_kwargs = {'widths':0.5, 'vert':True,
'showextrema':False, 'showmedians':False}
if plot_kwargs["violinplot_kwargs"] is None:
violinplot_kwargs = default_violinplot_kwargs
else:
violinplot_kwargs = merge_two_dicts(default_violinplot_kwargs,
plot_kwargs["violinplot_kwargs"])
# slopegraph kwargs.
default_slopegraph_kwargs = {'lw':1, 'alpha':0.5}
if plot_kwargs["slopegraph_kwargs"] is None:
slopegraph_kwargs = default_slopegraph_kwargs
else:
slopegraph_kwargs = merge_two_dicts(default_slopegraph_kwargs,
plot_kwargs["slopegraph_kwargs"])
# Zero reference-line kwargs.
default_reflines_kwargs = {'linestyle':'solid', 'linewidth':0.75,
'zorder': 2,
'color': ytick_color}
if plot_kwargs["reflines_kwargs"] is None:
reflines_kwargs = default_reflines_kwargs
else:
reflines_kwargs = merge_two_dicts(default_reflines_kwargs,
plot_kwargs["reflines_kwargs"])
# Legend kwargs.
default_legend_kwargs = {'loc': 'upper left', 'frameon': False}
if plot_kwargs["legend_kwargs"] is None:
legend_kwargs = default_legend_kwargs
else:
legend_kwargs = merge_two_dicts(default_legend_kwargs,
plot_kwargs["legend_kwargs"])
gs_default = {'mean_sd', 'median_quartiles', None}
if plot_kwargs["group_summaries"] not in gs_default:
raise ValueError('group_summaries must be one of'
' these: {}.'.format(gs_default) )
default_group_summary_kwargs = {'zorder': 3, 'lw': 2,
'alpha': 1}
if plot_kwargs["group_summary_kwargs"] is None:
group_summary_kwargs = default_group_summary_kwargs
else:
group_summary_kwargs = merge_two_dicts(default_group_summary_kwargs,
plot_kwargs["group_summary_kwargs"])
# Create color palette that will be shared across subplots.
color_col = plot_kwargs["color_col"]
if color_col is None:
color_groups = pd.unique(plot_data[xvar])
bootstraps_color_by_group = True
else:
if color_col not in plot_data.columns:
raise KeyError("``{}`` is not a column in the data.".format(color_col))
color_groups = | pd.unique(plot_data[color_col]) | pandas.unique |
import ast
import time
import numpy as np
import pandas as pd
from copy import deepcopy
from typing import Any
from matplotlib import dates as mdates
from scipy import stats
from aistac.components.aistac_commons import DataAnalytics
from ds_discovery.components.transitioning import Transition
from ds_discovery.components.commons import Commons
from aistac.properties.abstract_properties import AbstractPropertyManager
from ds_discovery.components.discovery import DataDiscovery
from ds_discovery.intent.abstract_common_intent import AbstractCommonsIntentModel
__author__ = '<NAME>'
class AbstractBuilderIntentModel(AbstractCommonsIntentModel):
_INTENT_PARAMS = ['self', 'save_intent', 'column_name', 'intent_order',
'replace_intent', 'remove_duplicates', 'seed']
def __init__(self, property_manager: AbstractPropertyManager, default_save_intent: bool=None,
default_intent_level: [str, int, float]=None, default_intent_order: int=None,
default_replace_intent: bool=None):
"""initialisation of the Intent class.
:param property_manager: the property manager class that references the intent contract.
:param default_save_intent: (optional) The default action for saving intent in the property manager
:param default_intent_level: (optional) the default level intent should be saved at
:param default_intent_order: (optional) if the default behaviour for the order should be next available order
:param default_replace_intent: (optional) the default replace existing intent behaviour
"""
default_save_intent = default_save_intent if isinstance(default_save_intent, bool) else True
default_replace_intent = default_replace_intent if isinstance(default_replace_intent, bool) else True
default_intent_level = default_intent_level if isinstance(default_intent_level, (str, int, float)) else 'A'
default_intent_order = default_intent_order if isinstance(default_intent_order, int) else 0
intent_param_exclude = ['size']
intent_type_additions = [np.int8, np.int16, np.int32, np.int64, np.float32, np.float64, pd.Timestamp]
super().__init__(property_manager=property_manager, default_save_intent=default_save_intent,
intent_param_exclude=intent_param_exclude, default_intent_level=default_intent_level,
default_intent_order=default_intent_order, default_replace_intent=default_replace_intent,
intent_type_additions=intent_type_additions)
def run_intent_pipeline(self, canonical: Any=None, intent_levels: [str, int, list]=None, run_book: str=None,
seed: int=None, simulate: bool=None, **kwargs) -> pd.DataFrame:
"""Collectively runs all parameterised intent taken from the property manager against the code base as
defined by the intent_contract. The whole run can be seeded though any parameterised seeding in the intent
contracts will take precedence
:param canonical: a direct or generated pd.DataFrame. see context notes below
:param intent_levels: (optional) a single or list of intent_level to run in order given
:param run_book: (optional) a preset runbook of intent_level to run in order
:param seed: (optional) a seed value that will be applied across the run: default to None
:param simulate: (optional) returns a report of the order of run and return the indexed column order of run
:return: a pandas dataframe
"""
simulate = simulate if isinstance(simulate, bool) else False
col_sim = {"column": [], "order": [], "method": []}
# legacy
if 'size' in kwargs.keys():
canonical = kwargs.pop('size')
canonical = self._get_canonical(canonical)
size = canonical.shape[0] if canonical.shape[0] > 0 else 1000
# test if there is any intent to run
if self._pm.has_intent():
# get the list of levels to run
if isinstance(intent_levels, (str, list)):
column_names = Commons.list_formatter(intent_levels)
elif isinstance(run_book, str) and self._pm.has_run_book(book_name=run_book):
column_names = self._pm.get_run_book(book_name=run_book)
else:
# put all the intent in order of model, get, correlate, associate
_model = []
_get = []
_correlate = []
_frame_start = []
_frame_end = []
for column in self._pm.get_intent().keys():
for order in self._pm.get(self._pm.join(self._pm.KEY.intent_key, column), {}):
for method in self._pm.get(self._pm.join(self._pm.KEY.intent_key, column, order), {}).keys():
if str(method).startswith('get_'):
if column in _correlate + _frame_start + _frame_end:
continue
_get.append(column)
elif str(method).startswith('model_'):
_model.append(column)
elif str(method).startswith('correlate_'):
if column in _get:
_get.remove(column)
_correlate.append(column)
elif str(method).startswith('frame_'):
if column in _get:
_get.remove(column)
if str(method).startswith('frame_starter'):
_frame_start.append(column)
else:
_frame_end.append(column)
column_names = Commons.list_unique(_frame_start + _get + _model + _correlate + _frame_end)
for column in column_names:
level_key = self._pm.join(self._pm.KEY.intent_key, column)
for order in sorted(self._pm.get(level_key, {})):
for method, params in self._pm.get(self._pm.join(level_key, order), {}).items():
try:
if method in self.__dir__():
if simulate:
col_sim['column'].append(column)
col_sim['order'].append(order)
col_sim['method'].append(method)
continue
result = []
params.update(params.pop('kwargs', {}))
if isinstance(seed, int):
params.update({'seed': seed})
_ = params.pop('intent_creator', 'Unknown')
if str(method).startswith('get_'):
result = eval(f"self.{method}(size=size, save_intent=False, **params)",
globals(), locals())
elif str(method).startswith('correlate_'):
result = eval(f"self.{method}(canonical=canonical, save_intent=False, **params)",
globals(), locals())
elif str(method).startswith('model_'):
canonical = eval(f"self.{method}(canonical=canonical, save_intent=False, **params)",
globals(), locals())
continue
elif str(method).startswith('frame_starter'):
canonical = self._get_canonical(params.pop('canonical', canonical), deep_copy=False)
size = canonical.shape[0]
canonical = eval(f"self.{method}(canonical=canonical, save_intent=False, **params)",
globals(), locals())
continue
elif str(method).startswith('frame_'):
canonical = eval(f"self.{method}(canonical=canonical, save_intent=False, **params)",
globals(), locals())
continue
if 0 < size != len(result):
raise IndexError(f"The index size of '{column}' is '{len(result)}', "
f"should be {size}")
canonical[column] = result
except ValueError as ve:
raise ValueError(f"intent '{column}', order '{order}', method '{method}' failed with: {ve}")
except TypeError as te:
raise TypeError(f"intent '{column}', order '{order}', method '{method}' failed with: {te}")
if simulate:
return pd.DataFrame.from_dict(col_sim)
return canonical
def _get_number(self, from_value: [int, float]=None, to_value: [int, float]=None, relative_freq: list=None,
precision: int=None, ordered: str=None, at_most: int=None, size: int=None,
seed: int=None) -> list:
""" returns a number in the range from_value to to_value. if only to_value given from_value is zero
:param from_value: (signed) integer to start from
:param to_value: optional, (signed) integer the number sequence goes to but not include
:param relative_freq: a weighting pattern or probability that does not have to add to 1
:param precision: the precision of the returned number. if None then assumes int value else float
:param ordered: order the data ascending 'asc' or descending 'dec', values accepted 'asc' or 'des'
:param at_most: the most times a selection should be chosen
:param size: the size of the sample
:param seed: a seed value for the random function: default to None
"""
if not isinstance(from_value, (int, float)) and not isinstance(to_value, (int, float)):
raise ValueError(f"either a 'range_value' or a 'range_value' and 'to_value' must be provided")
if not isinstance(from_value, (float, int)):
from_value = 0
if not isinstance(to_value, (float, int)):
(from_value, to_value) = (0, from_value)
if to_value <= from_value:
raise ValueError("The number range must be a positive different, found to_value <= from_value")
at_most = 0 if not isinstance(at_most, int) else at_most
size = 1 if size is None else size
_seed = self._seed() if seed is None else seed
precision = 3 if not isinstance(precision, int) else precision
if precision == 0:
from_value = int(round(from_value, 0))
to_value = int(round(to_value, 0))
is_int = True if (isinstance(to_value, int) and isinstance(from_value, int)) else False
if is_int:
precision = 0
# build the distribution sizes
if isinstance(relative_freq, list) and len(relative_freq) > 1:
freq_dist_size = self._freq_dist_size(relative_freq=relative_freq, size=size, seed=_seed)
else:
freq_dist_size = [size]
# generate the numbers
rtn_list = []
generator = np.random.default_rng(seed=_seed)
dtype = int if is_int else float
bins = np.linspace(from_value, to_value, len(freq_dist_size) + 1, dtype=dtype)
for idx in np.arange(1, len(bins)):
low = bins[idx - 1]
high = bins[idx]
if low >= high:
continue
elif at_most > 0:
sample = []
for _ in np.arange(at_most, dtype=dtype):
count_size = freq_dist_size[idx - 1] * generator.integers(2, 4, size=1)[0]
sample += list(set(np.linspace(bins[idx - 1], bins[idx], num=count_size, dtype=dtype,
endpoint=False)))
if len(sample) < freq_dist_size[idx - 1]:
raise ValueError(f"The value range has insufficient samples to choose from when using at_most."
f"Try increasing the range of values to sample.")
rtn_list += list(generator.choice(sample, size=freq_dist_size[idx - 1], replace=False))
else:
if dtype == int:
rtn_list += generator.integers(low=low, high=high, size=freq_dist_size[idx - 1]).tolist()
else:
choice = generator.random(size=freq_dist_size[idx - 1], dtype=float)
choice = np.round(choice * (high-low)+low, precision).tolist()
# make sure the precision
choice = [high - 10**(-precision) if x >= high else x for x in choice]
rtn_list += choice
# order or shuffle the return list
if isinstance(ordered, str) and ordered.lower() in ['asc', 'des']:
rtn_list.sort(reverse=True if ordered.lower() == 'asc' else False)
else:
generator.shuffle(rtn_list)
return rtn_list
def _get_category(self, selection: list, relative_freq: list=None, size: int=None, at_most: int=None,
seed: int=None) -> list:
""" returns a category from a list. Of particular not is the at_least parameter that allows you to
control the number of times a selection can be chosen.
:param selection: a list of items to select from
:param relative_freq: a weighting pattern that does not have to add to 1
:param size: an optional size of the return. default to 1
:param at_most: the most times a selection should be chosen
:param seed: a seed value for the random function: default to None
:return: an item or list of items chosen from the list
"""
if not isinstance(selection, list) or len(selection) == 0:
return [None]*size
_seed = self._seed() if seed is None else seed
select_index = self._get_number(len(selection), relative_freq=relative_freq, at_most=at_most, size=size,
seed=_seed)
rtn_list = [selection[i] for i in select_index]
return list(rtn_list)
def _get_datetime(self, start: Any, until: Any, relative_freq: list=None, at_most: int=None, ordered: str=None,
date_format: str=None, as_num: bool=None, ignore_time: bool=None, size: int=None,
seed: int=None, day_first: bool=None, year_first: bool=None) -> list:
""" returns a random date between two date and/or times. weighted patterns can be applied to the overall date
range.
if a signed 'int' type is passed to the start and/or until dates, the inferred date will be the current date
time with the integer being the offset from the current date time in 'days'.
if a dictionary of time delta name values is passed this is treated as a time delta from the start time.
for example if start = 0, until = {days=1, hours=3} the date range will be between now and 1 days and 3 hours
Note: If no patterns are set this will return a linearly random number between the range boundaries.
:param start: the start boundary of the date range can be str, datetime, pd.datetime, pd.Timestamp or int
:param until: up until boundary of the date range can be str, datetime, pd.datetime, pd.Timestamp, pd.delta, int
:param relative_freq: (optional) A pattern across the whole date range.
:param at_most: the most times a selection should be chosen
:param ordered: order the data ascending 'asc' or descending 'dec', values accepted 'asc' or 'des'
:param ignore_time: ignore time elements and only select from Year, Month, Day elements. Default is False
:param date_format: the string format of the date to be returned. if not set then pd.Timestamp returned
:param as_num: returns a list of Matplotlib date values as a float. Default is False
:param size: the size of the sample to return. Default to 1
:param seed: a seed value for the random function: default to None
:param year_first: specifies if to parse with the year first
If True parses dates with the year first, eg 10/11/12 is parsed as 2010-11-12.
If both dayfirst and yearfirst are True, yearfirst is preceded (same as dateutil).
:param day_first: specifies if to parse with the day first
If True, parses dates with the day first, eg %d-%m-%Y.
If False default to the a preferred preference, normally %m-%d-%Y (but not strict)
:return: a date or size of dates in the format given.
"""
# pre check
if start is None or until is None:
raise ValueError("The start or until parameters cannot be of NoneType")
# Code block for intent
as_num = False if not isinstance(as_num, bool) else as_num
ignore_time = False if not isinstance(ignore_time, bool) else ignore_time
size = 1 if size is None else size
_seed = self._seed() if seed is None else seed
if isinstance(start, int):
start = (pd.Timestamp.now() + pd.Timedelta(days=start))
if isinstance(until, int):
until = (pd.Timestamp.now() + pd.Timedelta(days=until))
if isinstance(until, dict):
until = (start + pd.Timedelta(**until))
if start == until:
rtn_list = [self._convert_date2value(start, day_first=day_first, year_first=year_first)[0]] * size
else:
_dt_start = self._convert_date2value(start, day_first=day_first, year_first=year_first)[0]
_dt_until = self._convert_date2value(until, day_first=day_first, year_first=year_first)[0]
precision = 15
if ignore_time:
_dt_start = int(_dt_start)
_dt_until = int(_dt_until)
precision = 0
rtn_list = self._get_number(from_value=_dt_start, to_value=_dt_until, relative_freq=relative_freq,
at_most=at_most, ordered=ordered, precision=precision, size=size, seed=seed)
if not as_num:
rtn_list = mdates.num2date(rtn_list)
if isinstance(date_format, str):
rtn_list = pd.Series(rtn_list).dt.strftime(date_format).to_list()
else:
rtn_list = pd.Series(rtn_list).dt.tz_convert(None).to_list()
return rtn_list
def _get_intervals(self, intervals: list, relative_freq: list=None, precision: int=None, size: int=None,
seed: int=None) -> list:
""" returns a number based on a list selection of tuple(lower, upper) interval
:param intervals: a list of unique tuple pairs representing the interval lower and upper boundaries
:param relative_freq: a weighting pattern or probability that does not have to add to 1
:param precision: the precision of the returned number. if None then assumes int value else float
:param size: the size of the sample
:param seed: a seed value for the random function: default to None
:return: a random number
"""
# Code block for intent
size = 1 if size is None else size
if not isinstance(precision, int):
precision = 0 if all(isinstance(v[0], int) and isinstance(v[1], int) for v in intervals) else 3
_seed = self._seed() if seed is None else seed
if not all(isinstance(value, tuple) for value in intervals):
raise ValueError("The intervals list must be a list of tuples")
interval_list = self._get_category(selection=intervals, relative_freq=relative_freq, size=size, seed=_seed)
interval_counts = pd.Series(interval_list, dtype='object').value_counts()
rtn_list = []
for index in interval_counts.index:
size = interval_counts[index]
if size == 0:
continue
if len(index) == 2:
(lower, upper) = index
if index == 0:
closed = 'both'
else:
closed = 'right'
else:
(lower, upper, closed) = index
if lower == upper:
rtn_list += [round(lower, precision)] * size
continue
if precision == 0:
margin = 1
else:
margin = 10**(((-1)*precision)-1)
if str.lower(closed) == 'neither':
lower += margin
upper -= margin
elif str.lower(closed) == 'right':
lower += margin
elif str.lower(closed) == 'both':
upper += margin
# correct adjustments
if lower >= upper:
upper = lower + margin
rtn_list += self._get_number(lower, upper, precision=precision, size=size, seed=_seed)
np.random.default_rng(seed=_seed).shuffle(rtn_list)
return rtn_list
def _get_dist_normal(self, mean: float, std: float, size: int=None, seed: int=None) -> list:
"""A normal (Gaussian) continuous random distribution.
:param mean: The mean (“centre”) of the distribution.
:param std: The standard deviation (jitter or “width”) of the distribution. Must be >= 0
:param size: the size of the sample. if a tuple of intervals, size must match the tuple
:param seed: a seed value for the random function: default to None
:return: a random number
"""
size = 1 if size is None else size
_seed = self._seed() if seed is None else seed
generator = np.random.default_rng(seed=_seed)
rtn_list = list(generator.normal(loc=mean, scale=std, size=size))
return rtn_list
def _get_dist_logistic(self, mean: float, std: float, size: int=None, seed: int=None) -> list:
"""A logistic continuous random distribution.
:param mean: The mean (“centre”) of the distribution.
:param std: The standard deviation (jitter or “width”) of the distribution. Must be >= 0
:param size: the size of the sample. if a tuple of intervals, size must match the tuple
:param seed: a seed value for the random function: default to None
:return: a random number
"""
size = 1 if size is None else size
_seed = self._seed() if seed is None else seed
generator = np.random.default_rng(seed=_seed)
rtn_list = list(generator.logistic(loc=mean, scale=std, size=size))
return rtn_list
def _get_dist_exponential(self, scale: [int, float], size: int=None, seed: int=None) -> list:
"""An exponential continuous random distribution.
:param scale: The scale of the distribution.
:param size: the size of the sample. if a tuple of intervals, size must match the tuple
:param seed: a seed value for the random function: default to None
:return: a random number
"""
size = 1 if size is None else size
_seed = self._seed() if seed is None else seed
generator = np.random.default_rng(seed=_seed)
rtn_list = list(generator.exponential(scale=scale, size=size))
return rtn_list
def _get_dist_gumbel(self, mean: float, std: float, size: int=None, seed: int=None) -> list:
"""An gumbel continuous random distribution.
The Gumbel (or Smallest Extreme Value (SEV) or the Smallest Extreme Value Type I) distribution is one of
a class of Generalized Extreme Value (GEV) distributions used in modeling extreme value problems.
The Gumbel is a special case of the Extreme Value Type I distribution for maximums from distributions
with “exponential-like” tails.
:param mean: The mean (“centre”) of the distribution.
:param std: The standard deviation (jitter or “width”) of the distribution. Must be >= 0
:param size: the size of the sample. if a tuple of intervals, size must match the tuple
:param seed: a seed value for the random function: default to None
:return: a random number
"""
size = 1 if size is None else size
_seed = self._seed() if seed is None else seed
generator = np.random.default_rng(seed=_seed)
rtn_list = list(generator.gumbel(loc=mean, scale=std, size=size))
return rtn_list
def _get_dist_binomial(self, trials: int, probability: float, size: int=None, seed: int=None) -> list:
"""A binomial discrete random distribution. The Binomial Distribution represents the number of
successes and failures in n independent Bernoulli trials for some given value of n
:param trials: the number of trials to attempt, must be >= 0.
:param probability: the probability distribution, >= 0 and <=1.
:param size: the size of the sample. if a tuple of intervals, size must match the tuple
:param seed: a seed value for the random function: default to None
:return: a random number
"""
size = 1 if size is None else size
_seed = self._seed() if seed is None else seed
generator = np.random.default_rng(seed=_seed)
rtn_list = list(generator.binomial(n=trials, p=probability, size=size))
return rtn_list
def _get_dist_poisson(self, interval: float, size: int=None, seed: int=None) -> list:
"""A Poisson discrete random distribution.
The Poisson distribution
.. math:: f(k; \lambda)=\frac{\lambda^k e^{-\lambda}}{k!}
For events with an expected separation :math:`\lambda` the Poisson
distribution :math:`f(k; \lambda)` describes the probability of
:math:`k` events occurring within the observed
interval :math:`\lambda`.
Because the output is limited to the range of the C int64 type, a
ValueError is raised when `lam` is within 10 sigma of the maximum
representable value.
:param interval: Expectation of interval, must be >= 0.
:param size: the size of the sample.
:param seed: a seed value for the random function: default to None
:return: a random number
"""
size = 1 if size is None else size
_seed = self._seed() if seed is None else seed
generator = np.random.default_rng(seed=_seed)
rtn_list = list(generator.poisson(lam=interval, size=size))
return rtn_list
def _get_dist_bernoulli(self, probability: float, size: int=None, seed: int=None) -> list:
"""A Bernoulli discrete random distribution using scipy
:param probability: the probability occurrence
:param size: the size of the sample
:param seed: a seed value for the random function: default to None
:return: a random number
"""
size = 1 if size is None else size
_seed = self._seed() if seed is None else seed
rtn_list = list(stats.bernoulli.rvs(p=probability, size=size, random_state=_seed))
return rtn_list
def _get_dist_bounded_normal(self, mean: float, std: float, lower: float, upper: float, precision: int=None,
size: int=None, seed: int=None) -> list:
"""A bounded normal continuous random distribution.
:param mean: the mean of the distribution
:param std: the standard deviation
:param lower: the lower limit of the distribution
:param upper: the upper limit of the distribution
:param precision: the precision of the returned number. if None then assumes int value else float
:param size: the size of the sample
:param seed: a seed value for the random function: default to None
:return: a random number
"""
size = 1 if size is None else size
precision = precision if isinstance(precision, int) else 3
_seed = self._seed() if seed is None else seed
rtn_list = stats.truncnorm((lower-mean)/std, (upper-mean)/std, loc=mean, scale=std).rvs(size).round(precision)
return rtn_list
def _get_distribution(self, distribution: str, package: str=None, precision: int=None, size: int=None,
seed: int=None, **kwargs) -> list:
"""returns a number based the distribution type.
:param distribution: The string name of the distribution function from numpy random Generator class
:param package: (optional) The name of the package to use, options are 'numpy' (default) and 'scipy'.
:param precision: (optional) the precision of the returned number
:param size: (optional) the size of the sample
:param seed: (optional) a seed value for the random function: default to None
:return: a random number
"""
size = 1 if size is None else size
_seed = self._seed() if seed is None else seed
precision = 3 if precision is None else precision
if isinstance(package, str) and package == 'scipy':
rtn_list = eval(f"stats.{distribution}.rvs(size=size, random_state=_seed, **kwargs)", globals(), locals())
else:
generator = np.random.default_rng(seed=_seed)
rtn_list = eval(f"generator.{distribution}(size=size, **kwargs)", globals(), locals())
rtn_list = list(rtn_list.round(precision))
return rtn_list
def _get_selection(self, canonical: Any, column_header: str, relative_freq: list=None, sample_size: int=None,
selection_size: int=None, size: int=None, at_most: bool=None, shuffle: bool=None,
seed: int=None) -> list:
""" returns a random list of values where the selection of those values is taken from a connector source.
:param canonical: a pd.DataFrame as the reference dataframe
:param column_header: the name of the column header to correlate
:param relative_freq: (optional) a weighting pattern of the final selection
:param selection_size: (optional) the selection to take from the sample size, normally used with shuffle
:param sample_size: (optional) the size of the sample to take from the reference file
:param at_most: (optional) the most times a selection should be chosen
:param shuffle: (optional) if the selection should be shuffled before selection. Default is true
:param size: (optional) size of the return. default to 1
:param seed: (optional) a seed value for the random function: default to None
:return: list
The canonical is normally a connector contract str reference or a set of parameter instructions on how to
generate a pd.Dataframe but can be a pd.DataFrame. the description of each is:
- pd.Dataframe -> a deep copy of the pd.DataFrame
- pd.Series or list -> creates a pd.DataFrame of one column with the 'header' name or 'default' if not given
- str -> instantiates a connector handler with the connector_name and loads the DataFrame from the connection
- int -> generates an empty pd.Dataframe with an index size of the int passed.
- dict -> use canonical2dict(...) to help construct a dict with a 'method' to build a pd.DataFrame
methods:
- model_*(...) -> one of the SyntheticBuilder model methods and parameters
- @empty -> generates an empty pd.DataFrame where size and headers can be passed
:size sets the index size of the dataframe
:headers any initial headers for the dataframe
- @generate -> generate a synthetic file from a remote Domain Contract
:task_name the name of the SyntheticBuilder task to run
:repo_uri the location of the Domain Product
:size (optional) a size to generate
:seed (optional) if a seed should be applied
:run_book (optional) if specific intent should be run only
"""
canonical = self._get_canonical(canonical)
_seed = self._seed() if seed is None else seed
if isinstance(canonical, dict):
canonical = pd.DataFrame.from_dict(data=canonical)
if column_header not in canonical.columns:
raise ValueError(f"The column '{column_header}' not found in the canonical")
_values = canonical[column_header].iloc[:sample_size]
if isinstance(selection_size, float) and shuffle:
_values = _values.sample(frac=1, random_state=_seed).reset_index(drop=True)
if isinstance(selection_size, int) and 0 < selection_size < _values.size:
_values = _values.iloc[:selection_size]
return self._get_category(selection=_values.to_list(), relative_freq=relative_freq, size=size, at_most=at_most,
seed=_seed)
def _frame_starter(self, canonical: Any, selection: list=None, headers: [str, list]=None, drop: bool=None,
dtype: [str, list]=None, exclude: bool=None, regex: [str, list]=None, re_ignore_case: bool=None,
rename_map: dict=None, default_size: int=None, seed: int=None) -> pd.DataFrame:
""" Selects rows and/or columns changing the shape of the DatFrame. This is always run last in a pipeline
Rows are filtered before the column filter so columns can be referenced even though they might not be included
the final column list.
:param canonical: a pd.DataFrame as the reference dataframe
:param selection: a list of selections where conditions are filtered on, executed in list order
An example of a selection with the minimum requirements is: (see 'select2dict(...)')
[{'column': 'genre', 'condition': "=='Comedy'"}]
:param headers: a list of headers to drop or filter on type
:param drop: to drop or not drop the headers
:param dtype: the column types to include or exclusive. Default None else int, float, bool, object, 'number'
:param exclude: to exclude or include the dtypes
:param regex: a regular expression to search the headers. example '^((?!_amt).)*$)' excludes '_amt' columns
:param re_ignore_case: true if the regex should ignore case. Default is False
:param rename_map: a from: to dictionary of headers to rename
:param default_size: if the canonical fails return an empty dataframe with the default index size
:param seed: this is a place holder, here for compatibility across methods
:return: pd.DataFrame
The starter is a pd.DataFrame, a pd.Series or list, a connector contract str reference or a set of
parameter instructions on how to generate a pd.Dataframe. the description of each is:
- pd.Dataframe -> a deep copy of the pd.DataFrame
- pd.Series or list -> creates a pd.DataFrame of one column with the 'header' name or 'default' if not given
- str -> instantiates a connector handler with the connector_name and loads the DataFrame from the connection
- int -> generates an empty pd.Dataframe with an index size of the int passed.
- dict -> use canonical2dict(...) to help construct a dict with a 'method' to build a pd.DataFrame
methods:
- model_*(...) -> one of the SyntheticBuilder model methods and parameters
- @empty -> generates an empty pd.DataFrame where size and headers can be passed
:size sets the index size of the dataframe
:headers any initial headers for the dataframe
- @generate -> generate a synthetic file from a remote Domain Contract
:task_name the name of the SyntheticBuilder task to run
:repo_uri the location of the Domain Product
:size (optional) a size to generate
:seed (optional) if a seed should be applied
:run_book (optional) if specific intent should be run only
Selections are a list of dictionaries of conditions and optional additional parameters to filter.
To help build conditions there is a static helper method called 'select2dict(...)' that has parameter
options available to build a condition.
An example of a condition with the minimum requirements is
[{'column': 'genre', 'condition': "=='Comedy'"}]
an example of using the helper method
selection = [inst.select2dict(column='gender', condition="=='M'"),
inst.select2dict(column='age', condition=">65", logic='XOR')]
Using the 'select2dict' method ensure the correct keys are used and the dictionary is properly formed. It also
helps with building the logic that is executed in order
"""
canonical = self._get_canonical(canonical, size=default_size)
# not used but in place form method consistency
_seed = self._seed() if seed is None else seed
if isinstance(selection, list):
selection = deepcopy(selection)
# run the select logic
select_idx = self._selection_index(canonical=canonical, selection=selection)
canonical = canonical.iloc[select_idx].reset_index(drop=True)
drop = drop if isinstance(drop, bool) else False
exclude = exclude if isinstance(exclude, bool) else False
re_ignore_case = re_ignore_case if isinstance(re_ignore_case, bool) else False
rtn_frame = Commons.filter_columns(canonical, headers=headers, drop=drop, dtype=dtype, exclude=exclude,
regex=regex, re_ignore_case=re_ignore_case)
if isinstance(rename_map, dict):
rtn_frame.rename(mapper=rename_map, axis='columns', inplace=True)
return rtn_frame
def _frame_selection(self, canonical: Any, selection: list=None, headers: [str, list]=None,
drop: bool=None, dtype: [str, list]=None, exclude: bool=None, regex: [str, list]=None,
re_ignore_case: bool=None, seed: int=None) -> pd.DataFrame:
""" This method always runs at the start of the pipeline, taking a direct or generated pd.DataFrame,
see context notes below, as the foundation canonical of all subsequent steps of the pipeline.
:param canonical: a direct or generated pd.DataFrame. see context notes below
:param selection: a list of selections where conditions are filtered on, executed in list order
An example of a selection with the minimum requirements is: (see 'select2dict(...)')
[{'column': 'genre', 'condition': "=='Comedy'"}]
:param headers: a list of headers to drop or filter on type
:param drop: to drop or not drop the headers
:param dtype: the column types to include or exclusive. Default None else int, float, bool, object, 'number'
:param exclude: to exclude or include the dtypes
:param regex: a regular expression to search the headers. example '^((?!_amt).)*$)' excludes '_amt' columns
:param re_ignore_case: true if the regex should ignore case. Default is False
:param seed: this is a place holder, here for compatibility across methods
:return: pd.DataFrame
Selections are a list of dictionaries of conditions and optional additional parameters to filter.
To help build conditions there is a static helper method called 'select2dict(...)' that has parameter
options available to build a condition.
An example of a condition with the minimum requirements is
[{'column': 'genre', 'condition': "=='Comedy'"}]
an example of using the helper method
selection = [inst.select2dict(column='gender', condition="=='M'"),
inst.select2dict(column='age', condition=">65", logic='XOR')]
Using the 'select2dict' method ensure the correct keys are used and the dictionary is properly formed. It also
helps with building the logic that is executed in order
"""
return self._frame_starter(canonical=canonical, selection=selection, headers=headers, drop=drop, dtype=dtype,
exclude=exclude, regex=regex, re_ignore_case=re_ignore_case, seed=seed)
def _model_custom(self, canonical: Any, code_str: str, seed: int=None, **kwargs):
""" Commonly used for custom methods, takes code string that when executed changes the the canonical returning
the modified canonical. If the method passes returns a pd.Dataframe this will be returned else the assumption is
the canonical has been changed inplace and thus the modified canonical will be returned
When referencing the canonical in the code_str it should be referenced either by use parameter label 'canonical'
or the short cut '@' symbol. kwargs can also be passed into the code string but must be preceded by a '$' symbol
for example:
assume canonical['gender'] = ['M', 'F', 'U']
code_str ='''
\n@['new_gender'] = [True if x in $value else False for x in @[$header]]
\n@['value'] = [4, 5, 6]
'''
where kwargs are header="'gender'" and value=['M', 'F']
:param canonical: a pd.DataFrame as the reference dataframe
:param code_str: an action on those column values. to reference the canonical use '@'
:param seed: (optional) a seed value for the random function: default to None
:param kwargs: a set of kwargs to include in any executable function
:return: a list (optionally a pd.DataFrame
"""
canonical = self._get_canonical(canonical)
_seed = seed if isinstance(seed, int) else self._seed()
local_kwargs = locals()
for k, v in local_kwargs.pop('kwargs', {}).items():
local_kwargs.update({k: v})
code_str = code_str.replace(f'${k}', str(v))
code_str = code_str.replace('@', 'canonical')
df = exec(code_str, globals(), local_kwargs)
if df is None:
return canonical
return df
def _model_iterator(self, canonical: Any, marker_col: str=None, starting_frame: str=None, selection: list=None,
default_action: dict=None, iteration_actions: dict=None, iter_start: int=None,
iter_stop: int=None, seed: int=None) -> pd.DataFrame:
""" This method allows one to model repeating data subset that has some form of action applied per iteration.
The optional marker column must be included in order to apply actions or apply an iteration marker
An example of use might be a recommender generator where a cohort of unique users need to be selected, for
different recommendation strategies but users can be repeated across recommendation strategy
:param canonical: a pd.DataFrame as the reference dataframe
:param marker_col: (optional) the marker column name for the action outcome. default is to not include
:param starting_frame: (optional) a str referencing an existing connector contract name as the base DataFrame
:param selection: (optional) a list of selections where conditions are filtered on, executed in list order
An example of a selection with the minimum requirements is: (see 'select2dict(...)')
[{'column': 'genre', 'condition': "=='Comedy'"}]
:param default_action: (optional) a default action to take on all iterations. defaults to iteration value
:param iteration_actions: (optional) a dictionary of actions where the key is a specific iteration
:param iter_start: (optional) the start value of the range iteration default is 0
:param iter_stop: (optional) the stop value of the range iteration default is start iteration + 1
:param seed: (optional) this is a place holder, here for compatibility across methods
:return: pd.DataFrame
The starting_frame can be a pd.DataFrame, a pd.Series, int or list, a connector contract str reference or a
set of parameter instructions on how to generate a pd.Dataframe. the description of each is:
- pd.Dataframe -> a deep copy of the pd.DataFrame
- pd.Series or list -> creates a pd.DataFrame of one column with the 'header' name or 'default' if not given
- str -> instantiates a connector handler with the connector_name and loads the DataFrame from the connection
- int -> generates an empty pd.Dataframe with an index size of the int passed.
- dict -> use canonical2dict(...) to help construct a dict with a 'method' to build a pd.DataFrame
methods:
- model_*(...) -> one of the SyntheticBuilder model methods and parameters
- @empty -> generates an empty pd.DataFrame where size and headers can be passed
:size sets the index size of the dataframe
:headers any initial headers for the dataframe
- @generate -> generate a synthetic file from a remote Domain Contract
:task_name the name of the SyntheticBuilder task to run
:repo_uri the location of the Domain Product
:size (optional) a size to generate
:seed (optional) if a seed should be applied
:run_book (optional) if specific intent should be run only
Selections are a list of dictionaries of conditions and optional additional parameters to filter.
To help build conditions there is a static helper method called 'select2dict(...)' that has parameter
options available to build a condition.
An example of a condition with the minimum requirements is
[{'column': 'genre', 'condition': "=='Comedy'"}]
an example of using the helper method
selection = [inst.select2dict(column='gender', condition="=='M'"),
inst.select2dict(column='age', condition=">65", logic='XOR')]
Using the 'select2dict' method ensure the correct keys are used and the dictionary is properly formed. It also
helps with building the logic that is executed in order
Actions are the resulting outcome of the selection (or the default). An action can be just a value or a dict
that executes a intent method such as get_number(). To help build actions there is a helper function called
action2dict(...) that takes a method as a mandatory attribute.
With actions there are special keyword 'method' values:
@header: use a column as the value reference, expects the 'header' key
@constant: use a value constant, expects the key 'value'
@sample: use to get sample values, expected 'name' of the Sample method, optional 'shuffle' boolean
@eval: evaluate a code string, expects the key 'code_str' and any locals() required
An example of a simple action to return a selection from a list:
{'method': 'get_category', selection: ['M', 'F', 'U']}
This same action using the helper method would look like:
inst.action2dict(method='get_category', selection=['M', 'F', 'U'])
an example of using the helper method, in this example we use the keyword @header to get a value from another
column at the same index position:
inst.action2dict(method="@header", header='value')
We can even execute some sort of evaluation at run time:
inst.action2dict(method="@eval", code_str='sum(values)', values=[1,4,2,1])
"""
canonical = self._get_canonical(canonical)
rtn_frame = self._get_canonical(starting_frame)
_seed = self._seed() if seed is None else seed
iter_start = iter_start if isinstance(iter_start, int) else 0
iter_stop = iter_stop if isinstance(iter_stop, int) and iter_stop > iter_start else iter_start + 1
default_action = default_action if isinstance(default_action, dict) else 0
iteration_actions = iteration_actions if isinstance(iteration_actions, dict) else {}
for counter in range(iter_start, iter_stop):
df_count = canonical.copy()
# selection
df_count = self._frame_selection(df_count, selection=selection, seed=_seed)
# actions
if isinstance(marker_col, str):
if counter in iteration_actions.keys():
_action = iteration_actions.get(counter, None)
df_count[marker_col] = self._apply_action(df_count, action=_action, seed=_seed)
else:
default_action = default_action if isinstance(default_action, dict) else counter
df_count[marker_col] = self._apply_action(df_count, action=default_action, seed=_seed)
rtn_frame = pd.concat([rtn_frame, df_count], ignore_index=True)
return rtn_frame
def _model_group(self, canonical: Any, headers: [str, list], group_by: [str, list], aggregator: str=None,
list_choice: int=None, list_max: int=None, drop_group_by: bool=False, seed: int=None,
include_weighting: bool=False, freq_precision: int=None, remove_weighting_zeros: bool=False,
remove_aggregated: bool=False) -> pd.DataFrame:
""" returns the full column values directly from another connector data source. in addition the the
standard groupby aggregators there is also 'list' and 'set' that returns an aggregated list or set.
These can be using in conjunction with 'list_choice' and 'list_size' allows control of the return values.
if list_max is set to 1 then a single value is returned rather than a list of size 1.
:param canonical: a pd.DataFrame as the reference dataframe
:param headers: the column headers to apply the aggregation too
:param group_by: the column headers to group by
:param aggregator: (optional) the aggregator as a function of Pandas DataFrame 'groupby' or 'list' or 'set'
:param list_choice: (optional) used in conjunction with list or set aggregator to return a random n choice
:param list_max: (optional) used in conjunction with list or set aggregator restricts the list to a n size
:param drop_group_by: (optional) drops the group by headers
:param include_weighting: (optional) include a percentage weighting column for each
:param freq_precision: (optional) a precision for the relative_freq values
:param remove_aggregated: (optional) if used in conjunction with the weighting then drops the aggregator column
:param remove_weighting_zeros: (optional) removes zero values
:param seed: (optional) this is a place holder, here for compatibility across methods
:return: a pd.DataFrame
"""
canonical = self._get_canonical(canonical)
_seed = self._seed() if seed is None else seed
generator = np.random.default_rng(seed=_seed)
freq_precision = freq_precision if isinstance(freq_precision, int) else 3
aggregator = aggregator if isinstance(aggregator, str) else 'sum'
headers = Commons.list_formatter(headers)
group_by = Commons.list_formatter(group_by)
df_sub = Commons.filter_columns(canonical, headers=headers + group_by).dropna()
if aggregator.startswith('set') or aggregator.startswith('list'):
df_tmp = df_sub.groupby(group_by)[headers[0]].apply(eval(aggregator)).apply(lambda x: list(x))
df_tmp = df_tmp.reset_index()
for idx in range(1, len(headers)):
result = df_sub.groupby(group_by)[headers[idx]].apply(eval(aggregator)).apply(lambda x: list(x))
df_tmp = df_tmp.merge(result, how='left', left_on=group_by, right_index=True)
for idx in range(len(headers)):
header = headers[idx]
if isinstance(list_choice, int):
df_tmp[header] = df_tmp[header].apply(lambda x: generator.choice(x, size=list_choice))
if isinstance(list_max, int):
df_tmp[header] = df_tmp[header].apply(lambda x: x[0] if list_max == 1 else x[:list_max])
df_sub = df_tmp
else:
df_sub = df_sub.groupby(group_by, as_index=False).agg(aggregator)
if include_weighting:
df_sub['sum'] = df_sub.sum(axis=1, numeric_only=True)
total = df_sub['sum'].sum()
df_sub['weighting'] = df_sub['sum'].\
apply(lambda x: round((x / total), freq_precision) if isinstance(x, (int, float)) else 0)
df_sub = df_sub.drop(columns='sum')
if remove_weighting_zeros:
df_sub = df_sub[df_sub['weighting'] > 0]
df_sub = df_sub.sort_values(by='weighting', ascending=False)
if remove_aggregated:
df_sub = df_sub.drop(headers, axis=1)
if drop_group_by:
df_sub = df_sub.drop(columns=group_by, errors='ignore')
return df_sub
def _model_merge(self, canonical: Any, other: Any, left_on: str=None, right_on: str=None,
on: str=None, how: str=None, headers: list=None, suffixes: tuple=None, indicator: bool=None,
validate: str=None, seed: int=None) -> pd.DataFrame:
""" returns the full column values directly from another connector data source. The indicator parameter can be
used to mark the merged items.
:param canonical: a pd.DataFrame as the reference dataframe
:param other: a direct or generated pd.DataFrame. see context notes below
:param left_on: the canonical key column(s) to join on
:param right_on: the merging dataset key column(s) to join on
:param on: if th left and right join have the same header name this can replace left_on and right_on
:param how: (optional) One of 'left', 'right', 'outer', 'inner'. Defaults to inner. See below for more detailed
description of each method.
:param headers: (optional) a filter on the headers included from the right side
:param suffixes: (optional) A tuple of string suffixes to apply to overlapping columns. Defaults ('', '_dup').
:param indicator: (optional) Add a column to the output DataFrame called _merge with information on the source
of each row. _merge is Categorical-type and takes on a value of left_only for observations whose
merge key only appears in 'left' DataFrame or Series, right_only for observations whose merge key
only appears in 'right' DataFrame or Series, and both if the observation’s merge key is found
in both.
:param validate: (optional) validate : string, default None. If specified, checks if merge is of specified type.
“one_to_one” or “1:1”: checks if merge keys are unique in both left and right datasets.
“one_to_many” or “1:m”: checks if merge keys are unique in left dataset.
“many_to_one” or “m:1”: checks if merge keys are unique in right dataset.
“many_to_many” or “m:m”: allowed, but does not result in checks.
:param seed: this is a place holder, here for compatibility across methods
:return: a pd.DataFrame
The other is a pd.DataFrame, a pd.Series, int or list, a connector contract str reference or a set of
parameter instructions on how to generate a pd.Dataframe. the description of each is:
- pd.Dataframe -> a deep copy of the pd.DataFrame
- pd.Series or list -> creates a pd.DataFrame of one column with the 'header' name or 'default' if not given
- str -> instantiates a connector handler with the connector_name and loads the DataFrame from the connection
- int -> generates an empty pd.Dataframe with an index size of the int passed.
- dict -> use canonical2dict(...) to help construct a dict with a 'method' to build a pd.DataFrame
methods:
- model_*(...) -> one of the SyntheticBuilder model methods and parameters
- @empty -> generates an empty pd.DataFrame where size and headers can be passed
:size sets the index size of the dataframe
:headers any initial headers for the dataframe
- @generate -> generate a synthetic file from a remote Domain Contract
:task_name the name of the SyntheticBuilder task to run
:repo_uri the location of the Domain Product
:size (optional) a size to generate
:seed (optional) if a seed should be applied
:run_book (optional) if specific intent should be run only
"""
# Code block for intent
canonical = self._get_canonical(canonical)
other = self._get_canonical(other, size=canonical.shape[0])
_seed = self._seed() if seed is None else seed
how = how if isinstance(how, str) and how in ['left', 'right', 'outer', 'inner'] else 'inner'
indicator = indicator if isinstance(indicator, bool) else False
suffixes = suffixes if isinstance(suffixes, tuple) and len(suffixes) == 2 else ('', '_dup')
# Filter on the columns
if isinstance(headers, list):
headers.append(right_on if isinstance(right_on, str) else on)
other = Commons.filter_columns(other, headers=headers)
df_rtn = pd.merge(left=canonical, right=other, how=how, left_on=left_on, right_on=right_on, on=on,
suffixes=suffixes, indicator=indicator, validate=validate)
return df_rtn
def _model_concat(self, canonical: Any, other: Any, as_rows: bool=None, headers: [str, list]=None,
drop: bool=None, dtype: [str, list]=None, exclude: bool=None, regex: [str, list]=None,
re_ignore_case: bool=None, shuffle: bool=None, seed: int=None) -> pd.DataFrame:
""" returns the full column values directly from another connector data source.
:param canonical: a pd.DataFrame as the reference dataframe
:param other: a direct or generated pd.DataFrame. see context notes below
:param as_rows: (optional) how to concatenate, True adds the connector dataset as rows, False as columns
:param headers: (optional) a filter of headers from the 'other' dataset
:param drop: (optional) to drop or not drop the headers if specified
:param dtype: (optional) a filter on data type for the 'other' dataset. int, float, bool, object
:param exclude: (optional) to exclude or include the data types if specified
:param regex: (optional) a regular expression to search the headers. example '^((?!_amt).)*$)' excludes '_amt'
:param re_ignore_case: (optional) true if the regex should ignore case. Default is False
:param shuffle: (optional) if the rows in the loaded canonical should be shuffled
:param seed: this is a place holder, here for compatibility across methods
:return: a pd.DataFrame
The other is a pd.DataFrame, a pd.Series, int or list, a connector contract str reference or a set of
parameter instructions on how to generate a pd.Dataframe. the description of each is:
- pd.Dataframe -> a deep copy of the pd.DataFrame
- pd.Series or list -> creates a pd.DataFrame of one column with the 'header' name or 'default' if not given
- str -> instantiates a connector handler with the connector_name and loads the DataFrame from the connection
- int -> generates an empty pd.Dataframe with an index size of the int passed.
- dict -> use canonical2dict(...) to help construct a dict with a 'method' to build a pd.DataFrame
methods:
- model_*(...) -> one of the SyntheticBuilder model methods and parameters
- @empty -> generates an empty pd.DataFrame where size and headers can be passed
:size sets the index size of the dataframe
:headers any initial headers for the dataframe
- @generate -> generate a synthetic file from a remote Domain Contract
:task_name the name of the SyntheticBuilder task to run
:repo_uri the location of the Domain Product
:size (optional) a size to generate
:seed (optional) if a seed should be applied
:run_book (optional) if specific intent should be run only
"""
canonical = self._get_canonical(canonical)
other = self._get_canonical(other, size=canonical.shape[0])
_seed = self._seed() if seed is None else seed
shuffle = shuffle if isinstance(shuffle, bool) else False
as_rows = as_rows if isinstance(as_rows, bool) else False
# Filter on the columns
df_rtn = Commons.filter_columns(df=other, headers=headers, drop=drop, dtype=dtype, exclude=exclude,
regex=regex, re_ignore_case=re_ignore_case, copy=False)
if shuffle:
df_rtn.sample(frac=1, random_state=_seed).reset_index(drop=True)
if canonical.shape[0] <= df_rtn.shape[0]:
df_rtn = df_rtn.iloc[:canonical.shape[0]]
axis = 'index' if as_rows else 'columns'
return | pd.concat([canonical, df_rtn], axis=axis) | pandas.concat |
import glob
import datetime
import os
import pandas as pd
import numpy as np
import re
from tkinter import filedialog
from tkinter import *
from tkinter import ttk
from tkinter import messagebox
# pyinstaller --onefile --noconsole --icon GetCSV.ico Arca_GetCSVConverter_2-0-0.py
#for MMW 18-6 spreadsheets
probCol = False
#infer desktop
desktopPath = os.path.expanduser("~/Desktop/")
filelist=['']
probRecords = []
probColls = []
#filename = r'arms_modsonly_May9.csv'
col_names = ["IslandoraContentModel","BCRDHSimpleObjectPID",'imageLink','filename','directory','childKey','title', 'alternativeTitle', 'creator1', 'creator2','creator3']
col_names += ['corporateCreator1','corporateCreator2','contributor1','contributor2','corporateContributor1','publisher_original','publisher_location']
col_names += ['dateCreated','description','extent','topicalSubject1','topicalSubject2','topicalSubject3','topicalSubject4','topicalSubject5']
col_names += ['geographicSubject1','coordinates','personalSubject1','personalSubject2','corporateSubject1','corporateSubject2', 'dateIssued_start']
col_names += ['dateIssued_end','dateRange', 'frequency','genre','genreAuthority','type','internetMediaType','language1','language2','notes']
col_names += ['accessIdentifier','localIdentifier','ISBN','classification','URI']
col_names += ['source','rights','creativeCommons_URI','rightsStatement_URI','relatedItem_title','relatedItem_PID','recordCreationDate','recordOrigin']
pattern1 = r'^[A-Z][a-z]{2}-\d{2}$' #%b-%Y date (e.g. Jun-17)
pattern2 = r'^\d{2}-\d{2}-[1-2]\d{3}$'
contentModels = {
r"info:fedora/islandora:sp_large_image_cmodel": "Large Image",
r"info:fedora/islandora:sp_basic_image": "Basic Image",
r"info:fedora/islandora:bookCModel": "Book",
r"info:fedora/islandora:newspaperIssueCModel":"Newspaper - issue",
r"info:fedora/islandora:newspaperPageCModel":"Newspaper",
r"info:fedora/islandora:sp_PDF":"PDF",
r"info:fedora/islandora:sp-audioCModel":"Audio",
r"info:fedora/islandora:sp_videoCModel":"Video",
r"info:fedora/islandora:sp_compoundCModel":"Compound",
r"info:fedora/ir:citationCModel":"Citation"
}
def browse_button():
# Allow user to select a directory and store it in global var
# called folder_path1
lbl1['text'] = ""
csvname = filedialog.askopenfilename(initialdir = desktopPath,title = "Select file",filetypes = (("csv files","*.csv"),("all files","*.*")))
if ".csv" not in csvname:
lbl1['text'] = "**Please choose a file with a .csv extension!"
else:
filelist[0] = csvname
lbl1['text'] = csvname
def splitMultiHdgs(hdgs):
if pd.notna(hdgs):
hdgs = hdgs.replace("\\,",";")
hdgs = hdgs.split(",")
newhdgs = []
for hdg in hdgs:
newhdg = hdg.replace(";", ",")
newhdgs.append(newhdg)
return newhdgs
else:
return None
def getMultiVals(item, string, df, pd):
hdgs = df.filter(like=string).columns
for hdg in hdgs:
vals = df.at[item.Index,hdg]
if pd.notna(vals):
vals = splitMultiHdgs(vals)
return vals
return None
def convert_date(dt_str, letter_date):
"""
Converts an invalid formatted date into a proper date for ARCA Mods
Correct format: Y-m-d
Fixes:
Incorrect format: m-d-Y
Incorrect format (letter date): m-d e.g. Jun-17
:param dt_str: the date string
:param letter_date: whether the string is a letter date. Letter date is something like Jun-17
:return: the correctly formatted date
"""
if letter_date:
rev_date = datetime.datetime.strptime(dt_str, '%b-%y').strftime('%Y-%m') # convert date to yymm string format
rev_date_pts = rev_date.split("-")
year_num = int(rev_date_pts[0])
if year_num > 1999:
year_num = year_num - 100
year_str = str(year_num)
rev_date_pts[0] = year_str
revised = "-".join(rev_date_pts)
else:
revised = datetime.datetime.strptime(dt_str, '%d-%m-%Y').strftime(
'%Y-%m-%d') # convert date to YY-mm string format
return revised
def sortValues(lst):
for item in lst:
if pd.isna(item):
lst.remove(item)
lst = set(lst)
lst = list(lst)
return lst
def dropNullCols(df):
nullcols = []
for col in df.columns:
notNull = df[col].notna().sum()
if notNull < 1:
nullcols.append(col)
return nullcols
def convert():
probCol = False
df2 = pd.DataFrame(columns = col_names)
df2.append(pd.Series(), ignore_index=True)
f=filelist[0]
# if not os.path.exists(savePath): #if folder does not exist
# os.makedirs(savePath)
try:
df = pd.read_csv(f,dtype = "string", encoding = 'utf_7')
except UnicodeDecodeError:
df = pd.read_csv(f,dtype = "string", encoding = 'utf_8')
nullcols = dropNullCols(df)
df.drop(nullcols, axis=1, inplace=True)
i = 1
for item in df.itertuples():
#PID
df2.at[i, 'BCRDHSimpleObjectPID'] = item.PID
if 'mods_subject_name_personal_namePart_ms' in df.columns:
pNames = item.mods_subject_name_personal_namePart_ms
#ContentModel
cModel = item.RELS_EXT_hasModel_uri_s
df2.at[i,"IslandoraContentModel"] =contentModels[cModel]
#Local Identifier
if 'mods_identifier_local_ms' in df.columns:
localID = item.mods_identifier_local_ms
if pd.notna(localID) and localID != "None":
df2.at[i,'localIdentifier'] = localID
#Access Identifer
if 'mods_identifier_access_ms' in df.columns:
accessID = item.mods_identifier_access_ms
if pd.notna(accessID):
df2.at[i,'accessIdentifier'] = accessID
#Image Link
# Link to Image
PIDparts = item.PID.split(":")
repo = PIDparts[0] #repository code
num = PIDparts[1] #auto-generated accession number
imageLink = "https://bcrdh.ca/islandora/object/" + repo + "%3A" + num
df2.at[i, 'imageLink'] = imageLink
#Title
if 'mods_titleInfo_title_ms' in df.columns:
title = item.mods_titleInfo_title_ms
if pd.notna(title):
df2.at[i,'title'] = title.replace("\,",",")
#Alternative Title
if "mods_titleInfo_alternative_title_ms" in df.columns:
altTitle = item.mods_titleInfo_alternative_title_ms
if pd.notna(altTitle):
df2.at[i, 'alternativeTitle'] = altTitle.replace("\,",",")
#Date
if "mods_originInfo_dateIssued_ms" in df.columns:
dt = item.mods_originInfo_dateIssued_ms
if pd.notna(dt):
if (re.match(pattern1, dt)): #letter date, i.e. Jun-17
dt = convert_date(dt, True)
elif (re.match(pattern2, dt)): #reverse date
dt = convert_date(dt, False)
df2.at[i,'dateCreated'] = dt
#Date Issued Start
if 'mods_originInfo_encoding_w3cdtf_keyDate_yes_point_start_dateIssued_ms' in df.columns:
startDt = item.mods_originInfo_encoding_w3cdtf_keyDate_yes_point_start_dateIssued_ms
if pd.notna(startDt):
df2.at[i,'dateIssued_start'] = startDt
#Date Issued End
if 'mods_originInfo_encoding_w3cdtf_keyDate_yes_point_end_dateIssued_ms' in df.columns:
endDt = item.mods_originInfo_encoding_w3cdtf_keyDate_yes_point_end_dateIssued_ms
if pd.notna(endDt):
df2.at[i,'dateIssued_end'] = startDt
#Publisher
if 'mods_originInfo_publisher_ms' in df.columns:
pub = item.mods_originInfo_publisher_ms
if pd.notna(pub):
df2.at[i, 'publisher_original'] = pub
#Publisher Location
if 'mods_originInfo_place_placeTerm_text_ms' in df.columns:
place = item.mods_originInfo_place_placeTerm_text_ms
if pd.notna(place):
df2.at[i,'publisher_location'] = place
#Frequency (serials only)
if 'mods_originInfo_frequency_ms' in df.columns:
freq = item.mods_originInfo_frequency_ms
if pd.notna(freq):
df2.at[i,'frequency'] = freq
#Extent
if "mods_physicalDescription_extent_ms" in df.columns:
extent = item.mods_physicalDescription_extent_ms
if pd.notna(extent):
extent = extent.replace("\,",",")
df2.at[i, 'extent'] = extent
#Notes
if 'mods_note_ms' in df.columns:
notes = item.mods_note_ms
if pd.notna(notes):
notes = notes.replace("\,",",")
df2.at[i, 'notes'] = notes
#Description/Abstract
if "mods_abstract_ms" in df.columns:
descr = item.mods_abstract_ms
if pd.notna(descr):
#if descr is not None:
df2.at[i, 'description'] = descr.replace("\,",",")
#Personal Creators & Contributors
if 'mods_name_personal_namePart_ms' in df.columns:
names = item.mods_name_personal_namePart_ms
if pd.notna(names):
names = splitMultiHdgs(names)
roles = getMultiVals(item,"personal_role",df,pd)
if len(roles)==0:
pass
else:
creatorCount = 0
contribCount = 0
for x in range(len(names)):
if roles[x] == "creator":
creatorCount = creatorCount + 1
hdg = "creator" + str(creatorCount)
df2.at[i,hdg] = names[x].strip()
else:
contribCount = contribCount + 1
hdg = "contributor" + str(contribCount)
df2.at[i,hdg] = names[x].strip()
#Corporate Creators and Contributors
if 'mods_name_corporate_namePart_ms' in df.columns:
corpNames = item.mods_name_corporate_namePart_ms
if pd.notna(corpNames):
creatorCount = 0
contribCount = 0
if 'mods_name_corporaterole_roleTerm_ms' in df.columns:
corpRoles = item.mods_name_corporate_role_roleTerm_ms
if pd.notna(corpRoles):
corpRoles = corpRoles.split(",")
else:
corpRoles = np.nan
corpNames = splitMultiHdgs(corpNames)
count = 0
for corpName in corpNames:
if (pd.isna(corpRoles) or corpRoles[count]=='creator'):
creatorCount = creatorCount + 1
hdg = "corporateCreator" + str(creatorCount)
df2.at[i,hdg] = corpName
else:
contribCount = contribCount + 1
hdg = "corporateContributor" + str(contribCount)
df2.at[i,hdg] = corpName
#topical subjects
if 'mods_subject_topic_ms' in df.columns:
topics = item.mods_subject_topic_ms
if pd.notna(topics):
topics = splitMultiHdgs(topics)
for x in range(len(topics)):
hdg = "topicalSubject" + str(x+1)
df2.at[i, hdg] = topics[x]
#corporate subjects
if 'mods_subject_name_corporate_namePart_ms' in df.columns:
corpSubs = item.mods_subject_name_corporate_namePart_ms
if pd.notna(corpSubs):
corpSubs = splitMultiHdgs(corpSubs)
corpSubs = list(set(corpSubs)) #remove duplicates
for x in range(len(corpSubs)):
hdg = "corporateSubject" + str(x+1)
df2.at[i, hdg] = corpSubs[x]
# #personal subjects
if 'mods_subject_name_personal_namePart_ms' in df.columns:
pnames = item.mods_subject_name_personal_namePart_ms
if pd.notna(pnames):
pnames = splitMultiHdgs(pnames)
for x in range(len(pnames)):
hdg = "personalSubject" + str(x+1)
if pd.notna(pnames[x]):
df2.at[i,hdg] = pnames[x].strip()
#temporal subject (date range)
if 'mods_subject_temporal_ms' in df.columns:
tempSub = item.mods_subject_temporal_ms
if pd.notna(tempSub):
df2.at[i,'dateRange'] = tempSub
#geographic subject
if 'mods_subject_geographic_ms' in df.columns:
geosub = item.mods_subject_geographic_ms
if pd.notna(geosub):
geosubs = splitMultiHdgs(geosub)
for x in range(len(geosubs)):
hdg = "geographicSubject" + str(x+1)
df2.at[i, hdg] = geosubs[x]
#coordinates
if 'mods_subject_geographic_cartographics_ms' in df.columns:
coords = item.mods_subject_geographic_cartographics_ms
if pd.notna(coords):
df2.at[i,"coordinates"] = coords
#classification
if 'mods_classification_authority_lcc_ms' in df.columns:
lcClass = item.mods_classification_authority_lcc_ms
if pd.notna(lcClass):
df2.at[i,'classification'] = lcClass
#isbn
if 'mods_identifier_isbn_ms' in df.columns:
isbn = item.mods_identifier_isbn_ms
if pd.notna(isbn):
df2.at[i,'ISBN'] = isbn
#genre
if 'mods_genre_authority_aat_ms' in df.columns:
genre_aat = item.mods_genre_authority_aat_ms
if pd.notna(genre_aat):
df2.at[i, 'genre'] = genre_aat
df2.at[i, 'genreAuthority'] = "aat"
elif 'mods_genre_authority_marcgt_ms' in df.columns:
if pd.notna(item.mods_genre_authority_marcgt_ms):
df2.at[i, 'genre'] = item.mods_genre_authority_marcgt_ms
df2.at[i, 'genreAuthority'] = "marcgt"
elif 'mods_originInfo_genre_ms' in df.columns:
if pd.notna(item.mods_originInfo_genre_ms):
df2.at[i, 'genre'] = item.mods_originInfo_genre_ms
#type
if 'mods_typeOfResource_ms' in df.columns:
_type = item.mods_typeOfResource_ms
if pd.notna(_type):
df2.at[i, 'type'] = _type
#internet media type
if 'mods_physicalDescription_internetMediaType_ms' in df.columns:
mediaType = item.mods_physicalDescription_internetMediaType_ms
if isinstance (mediaType, str):
df2.at[i, 'internetMediaType'] = mediaType
#Languages
languages = None
langs = getMultiVals(item,"languageTerm",df,pd)
if langs is not None:
for x in range(len(langs)):
lang = langs[x]
hdg = "language" + str(x+1)
if pd.notna(lang):
df2.at[i,hdg] = lang
#Source
if 'mods_location_physicalLocation_ms' in df.columns:
source = item.mods_location_physicalLocation_ms
if pd.notna(source):
df2.at[i, 'source'] = source
#URI
if 'mods_identifier_uri_ms' in df.columns:
uri = item.mods_identifier_uri_ms
if pd.notna(uri):
df2.at[i, 'URI'] = uri
#Rights
if 'mods_accessCondition_use_and_reproduction_ms' in df.columns:
rights = item.mods_accessCondition_use_and_reproduction_ms
if isinstance(rights, str):
rights = splitMultiHdgs(item.mods_accessCondition_use_and_reproduction_ms)
for stmt in rights:
if "Permission to publish" in stmt:
df2.at[i, "rights"] = stmt
elif "rightsstatements" in stmt:
df2.at[i,"rightsStatement_URI"] = stmt
else:
df2.at[i,"creativeCommons_URI"] = stmt
#Related Item
if 'mods_relatedItem_host_titleInfo_title_ms' in df.columns:
coll_title = item.mods_relatedItem_host_titleInfo_title_ms
coll_PID = item.mods_relatedItem_host_identifier_PID_ms
if pd.notna(coll_title):
df2.at[i, "relatedItem_title"] = coll_title
if pd.notna(coll_PID):
df2.at[i, "relatedItem_PID"] = coll_PID
#Record Origin & Creation Date
if 'mods_recordInfo_recordOrigin_ms' in df.columns:
recOrigin = item.mods_recordInfo_recordOrigin_ms
if | pd.notna(recOrigin) | pandas.notna |
from typing import List, Tuple, Dict
import codecs
import json
from pathlib import Path
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
import numpy as np
import pandas as pd
import seaborn as sns
import plotly
import plotly.graph_objs as go
import plotly.figure_factory as ff
import plotly.express as px
from tom_lib.utils import save_topic_number_metrics_data
sns.set(rc={"lines.linewidth": 2})
sns.set_style("whitegrid")
mpl.use("Agg") # To be able to create figures on a headless server (no DISPLAY variable)
def split_string_sep(string: str, sep: str = None):
"""Split a string on spaces and put together with newlines
"""
if sep is None:
sep = ' '
string_new = sep.join(string.split(sep=sep)[:2])
if len(string.split()) > 2:
string_new = '\n'.join([string_new, sep.join(string.split()[2:4])])
if len(string.split()) > 4:
string_new = '\n'.join([string_new, sep.join(string.split()[4:7])])
if len(string.split()) > 7:
string_new = '\n'.join([string_new, sep.join(string.split()[7:])])
return string_new
def split_string_nchar(string: str, nchar: int = None):
"""Split a string into a given number of chunks based on number of characters
"""
if nchar is None:
nchar = 25
return '\n'.join([string[(i * nchar):(i + 1) * nchar] for i in range(int(np.ceil(len(string) / nchar)))])
class Visualization:
def __init__(self, topic_model, output_dir=None):
self.topic_model = topic_model
if output_dir is None:
if self.topic_model.trained:
self.output_dir = Path(f'output_{self.topic_model.model_type}_{self.topic_model.nb_topics}_topics')
else:
self.output_dir = Path(f'output_{self.topic_model.model_type}')
else:
if isinstance(output_dir, str):
self.output_dir = Path(output_dir)
elif isinstance(output_dir, Path):
self.output_dir = output_dir
else:
raise TypeError(f"'output_dir' of type {type(output_dir)} not a valid type")
if not self.output_dir.exists():
self.output_dir.mkdir(parents=True, exist_ok=True)
def plot_topic_distribution(self, doc_id, file_name='topic_distribution.png'):
file_path = self.output_dir / file_name
distribution = self.topic_model.topic_distribution_for_document(doc_id)
data_x = range(0, len(distribution))
plt.clf()
plt.xticks(np.arange(0, len(distribution), 1.0))
plt.bar(data_x, distribution, align='center')
plt.title('Topic distribution')
plt.ylabel('probability')
plt.xlabel('topic')
plt.savefig(file_path)
def plot_word_distribution(self, topic_id, nb_words=10, file_name='word_distribution.png'):
file_path = self.output_dir / file_name
data_x = []
data_y = []
distribution = self.topic_model.top_words(topic_id, nb_words)
for weighted_word in distribution:
data_x.append(weighted_word[0])
data_y.append(weighted_word[1])
plt.clf()
plt.bar(range(len(data_x)), data_y, align='center')
plt.xticks(range(len(data_x)), data_x, size='small', rotation='vertical')
plt.title('Word distribution')
plt.ylabel('probability')
plt.xlabel('word')
plt.savefig(file_path)
def plot_greene_metric(
self,
min_num_topics: int = 10,
max_num_topics: int = 20,
tao: int = 10,
step: int = 5,
top_n_words: int = 10,
sample=0.8,
verbose: int = 0,
nmf_init: str = None,
nmf_solver: str = None,
nmf_beta_loss: str = None,
nmf_max_iter: int = None,
nmf_alpha: float = None,
nmf_l1_ratio: float = None,
nmf_shuffle: bool = None,
lda_algorithm: str = None,
lda_alpha: float = None,
lda_eta: float = None,
lda_learning_method: str = None,
lda_n_jobs: int = None,
lda_n_iter: int = None,
random_state=None,
):
greene_stability = self.topic_model.greene_metric(
min_num_topics=min_num_topics,
max_num_topics=max_num_topics,
step=step,
top_n_words=top_n_words,
tao=tao,
sample=sample,
verbose=verbose,
nmf_init=nmf_init,
nmf_solver=nmf_solver,
nmf_beta_loss=nmf_beta_loss,
nmf_max_iter=nmf_max_iter,
nmf_alpha=nmf_alpha,
nmf_l1_ratio=nmf_l1_ratio,
nmf_shuffle=nmf_shuffle,
lda_algorithm=lda_algorithm,
lda_alpha=lda_alpha,
lda_eta=lda_eta,
lda_learning_method=lda_learning_method,
lda_n_jobs=lda_n_jobs,
lda_n_iter=lda_n_iter,
random_state=random_state,
)
num_topics_infer = range(min_num_topics, max_num_topics + 1, step)
plt.clf()
plt.plot(num_topics_infer, greene_stability, 'o-')
plt.xticks(num_topics_infer)
plt.title('Greene et al. metric (higher is better)')
plt.xlabel('Number of Topics')
plt.ylabel('Stability')
# find and annotate the maximum point on the plot
ymax = max(greene_stability)
xpos = greene_stability.index(ymax)
best_k = num_topics_infer[xpos]
plt.annotate(f'k={best_k}', xy=(best_k, ymax), xytext=(best_k, ymax), textcoords='offset points', fontsize=16)
file_path_fig = self.output_dir / 'greene.png'
file_path_data = self.output_dir / 'greene.tsv'
plt.savefig(file_path_fig)
save_topic_number_metrics_data(
file_path_data,
range_=(min_num_topics, max_num_topics),
data=greene_stability, step=step, metric_type='greene')
def plot_arun_metric(
self,
min_num_topics: int = 10,
max_num_topics: int = 20,
step: int = 5,
iterations: int = 10,
verbose: int = 0,
nmf_init: str = None,
nmf_solver: str = None,
nmf_beta_loss: str = None,
nmf_max_iter: int = None,
nmf_alpha: float = None,
nmf_l1_ratio: float = None,
nmf_shuffle: bool = None,
lda_algorithm: str = None,
lda_alpha: float = None,
lda_eta: float = None,
lda_learning_method: str = None,
lda_n_jobs: int = None,
lda_n_iter: int = None,
random_state=None,
):
symmetric_kl_divergence = self.topic_model.arun_metric(
min_num_topics=min_num_topics,
max_num_topics=max_num_topics,
step=step,
iterations=iterations,
verbose=verbose,
nmf_init=nmf_init,
nmf_solver=nmf_solver,
nmf_beta_loss=nmf_beta_loss,
nmf_max_iter=nmf_max_iter,
nmf_alpha=nmf_alpha,
nmf_l1_ratio=nmf_l1_ratio,
nmf_shuffle=nmf_shuffle,
lda_algorithm=lda_algorithm,
lda_alpha=lda_alpha,
lda_eta=lda_eta,
lda_learning_method=lda_learning_method,
lda_n_jobs=lda_n_jobs,
lda_n_iter=lda_n_iter,
random_state=random_state,
)
num_topics_infer = range(min_num_topics, max_num_topics + 1, step)
plt.clf()
plt.plot(num_topics_infer, symmetric_kl_divergence, 'o-')
plt.xticks(num_topics_infer)
plt.title('Arun et al. metric (lower is better)')
plt.xlabel('Number of Topics')
plt.ylabel('Symmetric KL Divergence')
# find and annotate the maximum point on the plot
ymin = min(symmetric_kl_divergence)
xpos = symmetric_kl_divergence.index(ymin)
best_k = num_topics_infer[xpos]
plt.annotate(f'k={best_k}', xy=(best_k, ymin), xytext=(best_k, ymin), textcoords='offset points', fontsize=16)
file_path_fig = self.output_dir / 'arun.png'
file_path_data = self.output_dir / 'arun.tsv'
plt.savefig(file_path_fig)
save_topic_number_metrics_data(
file_path_data,
range_=(min_num_topics, max_num_topics),
data=symmetric_kl_divergence, step=step, metric_type='arun')
def plot_brunet_metric(
self,
min_num_topics: int = 10,
max_num_topics: int = 20,
step: int = 5,
iterations: int = 10,
verbose: int = 0,
nmf_init: str = None,
nmf_solver: str = None,
nmf_beta_loss: str = None,
nmf_max_iter: int = None,
nmf_alpha: float = None,
nmf_l1_ratio: float = None,
nmf_shuffle: bool = None,
lda_algorithm: str = None,
lda_alpha: float = None,
lda_eta: float = None,
lda_learning_method: str = None,
lda_n_jobs: int = None,
lda_n_iter: int = None,
random_state=None,
):
cophenetic_correlation = self.topic_model.brunet_metric(
min_num_topics=min_num_topics,
max_num_topics=max_num_topics,
step=step,
iterations=iterations,
verbose=verbose,
nmf_init=nmf_init,
nmf_solver=nmf_solver,
nmf_beta_loss=nmf_beta_loss,
nmf_max_iter=nmf_max_iter,
nmf_alpha=nmf_alpha,
nmf_l1_ratio=nmf_l1_ratio,
nmf_shuffle=nmf_shuffle,
lda_algorithm=lda_algorithm,
lda_alpha=lda_alpha,
lda_eta=lda_eta,
lda_learning_method=lda_learning_method,
lda_n_jobs=lda_n_jobs,
lda_n_iter=lda_n_iter,
random_state=random_state,
)
num_topics_infer = range(min_num_topics, max_num_topics + 1, step)
plt.clf()
plt.plot(num_topics_infer, cophenetic_correlation, 'o-')
plt.xticks(num_topics_infer)
plt.title('Brunet et al. metric (higher is better)')
plt.xlabel('Number of Topics')
plt.ylabel('Cophenetic correlation coefficient')
# find and annotate the maximum point on the plot
ymax = max(cophenetic_correlation)
xpos = cophenetic_correlation.index(ymax)
best_k = num_topics_infer[xpos]
plt.annotate(f'k={best_k}', xy=(best_k, ymax), xytext=(best_k, ymax), textcoords='offset points', fontsize=16)
file_path_fig = self.output_dir / 'brunet.png'
file_path_data = self.output_dir / 'brunet.tsv'
plt.savefig(file_path_fig)
save_topic_number_metrics_data(
file_path_data,
range_=(min_num_topics, max_num_topics),
data=cophenetic_correlation, step=step, metric_type='brunet')
def plot_coherence_w2v_metric(
self,
min_num_topics: int = 10,
max_num_topics: int = 20,
step: int = 5,
top_n_words: int = 10,
w2v_size: int = None,
w2v_min_count: int = None,
# w2v_max_vocab_size: int = None,
w2v_max_final_vocab: int = None,
w2v_sg: int = None,
w2v_workers: int = None,
verbose: int = 0,
nmf_init: str = None,
nmf_solver: str = None,
nmf_beta_loss: str = None,
nmf_max_iter: int = None,
nmf_alpha: float = None,
nmf_l1_ratio: float = None,
nmf_shuffle: bool = None,
lda_algorithm: str = None,
lda_alpha: float = None,
lda_eta: float = None,
lda_learning_method: str = None,
lda_n_jobs: int = None,
lda_n_iter: int = None,
random_state=None,
):
coherence = self.topic_model.coherence_w2v_metric(
min_num_topics=min_num_topics,
max_num_topics=max_num_topics,
step=step,
top_n_words=top_n_words,
w2v_size=w2v_size,
w2v_min_count=w2v_min_count,
# w2v_max_vocab_size=w2v_max_vocab_size,
w2v_max_final_vocab=w2v_max_final_vocab,
w2v_sg=w2v_sg,
w2v_workers=w2v_workers,
verbose=verbose,
nmf_init=nmf_init,
nmf_solver=nmf_solver,
nmf_beta_loss=nmf_beta_loss,
nmf_max_iter=nmf_max_iter,
nmf_alpha=nmf_alpha,
nmf_l1_ratio=nmf_l1_ratio,
nmf_shuffle=nmf_shuffle,
lda_algorithm=lda_algorithm,
lda_alpha=lda_alpha,
lda_eta=lda_eta,
lda_learning_method=lda_learning_method,
lda_n_jobs=lda_n_jobs,
lda_n_iter=lda_n_iter,
random_state=random_state,
)
num_topics_infer = range(min_num_topics, max_num_topics + 1, step)
plt.clf()
# create the line plot
plt.plot(num_topics_infer, coherence, 'o-')
plt.xticks(num_topics_infer)
plt.title('Coherence-Word2Vec metric (higher is better)')
plt.xlabel('Number of Topics')
plt.ylabel('Mean Coherence')
# find and annotate the maximum point on the plot
ymax = max(coherence)
xpos = coherence.index(ymax)
best_k = num_topics_infer[xpos]
plt.annotate(f'k={best_k}', xy=(best_k, ymax), xytext=(best_k, ymax), textcoords='offset points', fontsize=16)
file_path_fig = self.output_dir / 'coherence_w2v.png'
file_path_data = self.output_dir / 'coherence_w2v.tsv'
plt.savefig(file_path_fig)
save_topic_number_metrics_data(
file_path_data,
range_=(min_num_topics, max_num_topics),
data=coherence, step=step, metric_type='coherence_w2v')
def plot_perplexity_metric(
self,
min_num_topics: int = 10,
max_num_topics: int = 20,
step: int = 5,
train_size: float = 0.7,
verbose: int = 0,
lda_algorithm: str = None,
lda_alpha: float = None,
lda_eta: float = None,
lda_learning_method: str = None,
lda_n_jobs: int = None,
lda_n_iter: int = None,
random_state=None,
):
train_perplexities, test_perplexities = self.topic_model.perplexity_metric(
min_num_topics=min_num_topics,
max_num_topics=max_num_topics,
step=step,
train_size=train_size,
verbose=verbose,
lda_algorithm=lda_algorithm,
lda_alpha=lda_alpha,
lda_eta=lda_eta,
lda_learning_method=lda_learning_method,
lda_n_jobs=lda_n_jobs,
lda_n_iter=lda_n_iter,
random_state=random_state,
)
num_topics_infer = range(min_num_topics, max_num_topics + 1, step)
if (len(train_perplexities) > 0) and (len(test_perplexities) > 0):
plt.clf()
plt.plot(num_topics_infer, train_perplexities, 'o-', label='Train')
plt.plot(num_topics_infer, test_perplexities, 'o-', label='Test')
plt.xticks(num_topics_infer)
plt.title('Perplexity metric (lower is better)')
plt.xlabel('Number of Topics')
plt.ylabel('Perplexity')
plt.legend(loc='best')
file_path_fig = self.output_dir / 'perplexity.png'
file_path_data_train = self.output_dir / 'perplexity_train.tsv'
file_path_data_test = self.output_dir / 'perplexity_test.tsv'
plt.savefig(file_path_fig)
save_topic_number_metrics_data(
file_path_data_train,
range_=(min_num_topics, max_num_topics),
data=train_perplexities, step=step, metric_type='perplexity')
save_topic_number_metrics_data(
file_path_data_test,
range_=(min_num_topics, max_num_topics),
data=test_perplexities, step=step, metric_type='perplexity')
def topic_cloud(self, file_name='topic_cloud.json'):
file_path = self.output_dir / file_name
json_graph = {}
json_nodes = []
json_links = []
for i in range(self.topic_model.nb_topics):
description = []
for weighted_word in self.topic_model.top_words(i, 5):
description.append(weighted_word[1])
json_nodes.append({'name': f'topic{i}',
'frequency': self.topic_model.topic_frequency(i),
'description': f"Topic {i}: {', '.join(description)}",
'group': i})
json_graph['nodes'] = json_nodes
json_graph['links'] = json_links
with codecs.open(file_path, 'w', encoding='utf-8') as fp:
json.dump(json_graph, fp, indent=4, separators=(',', ': '))
def plot_docs_over_time(
self,
freq: str = '1YS',
count=True,
by_affil: bool = False,
ma_window: int = None,
figsize: Tuple[int, int] = (12, 8),
savefig: bool = False,
dpi: int = 72,
figformat: str = 'png',
):
"""Plot count of documents per frequency window, optionally by affiliation
"""
if by_affil:
groupby = [pd.Grouper(freq=freq), self.topic_model.corpus._affiliation_col]
else:
groupby = [pd.Grouper(freq=freq)]
result_count = self.topic_model.corpus.data_frame.reset_index().set_index(
self.topic_model.corpus._date_col).groupby(
by=groupby).size()
if by_affil:
result_count = result_count.unstack().fillna(0)
if not count:
total_count = self.topic_model.corpus.data_frame.reset_index().set_index(
self.topic_model.corpus._date_col).groupby(
by=[pd.Grouper(freq=freq)]).size()
result_count = result_count.div(total_count, axis=0)
if ma_window:
result_count = result_count.rolling(window=ma_window, min_periods=1, center=True).mean()
fig, ax = plt.subplots(figsize=figsize)
result_count.plot(ax=ax, kind='line')
if count:
title_str = 'Document Counts'
else:
ax.yaxis.set_major_formatter(FuncFormatter(lambda y, _: f'{y:.0%}'))
title_str = 'Percent of Documents'
if by_affil:
ax.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
title_str += ' Per Affiliation'
title_str += ' Per Year'
ax.set_title(title_str)
fig.autofmt_xdate(bottom=0.2, rotation=30, ha='center')
fig.tight_layout()
if savefig:
if count:
plot_string = 'doc_count'
else:
plot_string = 'doc_percent'
if by_affil:
affil_string = 'affil'
else:
affil_string = 'overall'
if ma_window:
ma_string = f'_{ma_window}_MA'
else:
ma_string = ''
filename_out = f'{self.topic_model.corpus.name}_{plot_string}_{affil_string}{ma_string}.{figformat}'
# save image to disk
fig.savefig(self.output_dir / filename_out, dpi=dpi, transparent=False, bbox_inches='tight')
plt.close('all')
else:
filename_out = None
plt.show()
return fig, ax, filename_out
def plot_docs_above_thresh(
self,
topic_cols: List[str] = None,
normalized: bool = True,
thresh: float = 0.5,
kind: str = 'count',
n_words: int = 10,
figsize: Tuple[int, int] = (12, 8),
savefig: bool = False,
dpi: int = 72,
figformat: str = 'png',
):
"""Plot the number of documents associated with each topic, above some threshold
kind = 'count' or 'percent'
"""
fig, ax = plt.subplots(figsize=figsize)
topic_cols_all = [' '.join(tw) for tw in self.topic_model.top_words_topics(num_words=n_words)]
if not topic_cols:
topic_cols = topic_cols_all
if normalized:
norm_string = 'normalized'
else:
norm_string = ''
result = np.array(
(self.topic_model.topic_distribution_for_document(normalized=normalized) >= thresh).sum(axis=0)
)[0]
if kind == 'count':
ax.yaxis.set_major_formatter(FuncFormatter(lambda y, _: f'{y:,.0f}'))
ax.set_ylabel('Count of Documents')
elif kind == 'percent':
result = result / self.topic_model.corpus.size
ax.yaxis.set_major_formatter(FuncFormatter(lambda y, _: f'{y:.1%}'))
ax.set_ylabel('Percent of Documents')
result = result[[tc in topic_cols for tc in topic_cols_all]]
sns.barplot(x=topic_cols, y=result, ax=ax)
# result = pd.DataFrame(data=result, columns=topic_cols_all)[topic_cols]
# sns.barplot(ax=ax, data=result)
title_str = f'Documents above {thresh} topic loading'
if normalized:
title_str = f'{title_str} ({norm_string})'
title_str = f'{title_str}; {self.topic_model.corpus.size:,} total docs'
ax.set_title(title_str)
fig.autofmt_xdate()
if savefig:
plot_string = 'hist_above_thresh'
topics_string = f'{len(topic_cols)}_topics'
if normalized:
norm_string = f'_{norm_string}'
filename_out = f'{self.topic_model.corpus.name}_{plot_string}_{topics_string}{norm_string}_{kind}.{figformat}'
# save image to disk
fig.savefig(self.output_dir / filename_out, dpi=dpi, transparent=False, bbox_inches='tight')
plt.close('all')
else:
filename_out = None
plt.show()
return fig, ax, filename_out
def plot_heatmap(
self,
topic_cols: List[str] = None,
rename: Dict = None,
normalized: bool = True,
mask_thresh: float = None,
cmap=None,
vmax: float = None,
vmin: float = None,
fmt: str = '.2f',
annot_fontsize: int = 13,
n_words: int = 10,
figsize: Tuple[int, int] = None,
savefig: bool = False,
dpi: int = 72,
figformat: str = 'png',
):
"""Plot a heatmap of topic-topic Pearson correlation coefficient values
"""
topic_cols_all = [' '.join(tw) for tw in self.topic_model.top_words_topics(num_words=n_words)]
if not topic_cols:
topic_cols = topic_cols_all
if normalized:
norm_string = 'normalized'
else:
norm_string = ''
corr = pd.DataFrame(
data=np.corrcoef(self.topic_model.topic_distribution_for_document(normalized=normalized).T),
columns=topic_cols_all,
index=topic_cols_all,
)
corr = corr.loc[topic_cols, topic_cols]
if rename:
corr = corr.rename(columns=rename, index=rename)
topic_cols = list(rename.values())
if mask_thresh is None:
mask_thresh = 0
if figsize is None:
figsize = (max(25, min(len(topic_cols) // 1.1, 25)), max(15, min(len(topic_cols) // 1.2, 15)))
if cmap is None:
cmap = sns.diverging_palette(220, 10, as_cmap=True)
if vmax is None:
vmax = corr.max().max()
# vmax=0.25
# vmin=-vmax
if vmin is None:
vmin = corr.min().min()
fig, ax = plt.subplots(figsize=figsize)
ax = sns.heatmap(corr, ax=ax, center=0, annot=True, fmt=fmt, annot_kws={'fontsize': annot_fontsize},
vmin=vmin, vmax=vmax,
mask=((corr > -mask_thresh) & (corr < mask_thresh)),
cmap=cmap,
cbar_kws={'label': 'Pearson Correlation Coefficient'},
# square=True,
)
ax.hlines(range(1, corr.shape[0]), *ax.get_xlim(), lw=0.5)
ax.vlines(range(1, corr.shape[1]), *ax.get_ylim(), lw=0.5)
ax.set_xticklabels(ax.get_xticklabels(), rotation=30, ha='right', fontsize=18)
ax.set_yticklabels(ax.get_yticklabels(), fontsize=18)
if savefig:
plot_string = 'topic-topic_corr'
topics_string = f'{len(topic_cols)}_topics'
if normalized:
norm_string = f'_{norm_string}'
filename_out = f'{self.topic_model.corpus.name}_{plot_string}_{topics_string}{norm_string}.{figformat}'
# save image to disk
fig.savefig(self.output_dir / filename_out, dpi=dpi, transparent=False, bbox_inches='tight')
plt.close('all')
else:
filename_out = None
plt.show()
return fig, ax, filename_out
def plot_clustermap(
self,
topic_cols: List[str] = None,
rename: Dict = None,
normalized: bool = True,
mask_thresh: float = None,
cmap=None,
vmax: float = None,
vmin: float = None,
fmt: str = '.2f',
annot_fontsize: int = 13,
n_words: int = 10,
figsize: Tuple[int, int] = None,
savefig: bool = False,
dpi: int = 72,
figformat: str = 'png',
metric: str = None,
method: str = None,
):
"""Plot a hierarchical clustermap of topic-topic Pearson correlation coefficient values
(computed with np.corrcoef). Plot is made with Seaborn's clustermap.
"""
topic_cols_all = [' '.join(tw) for tw in self.topic_model.top_words_topics(num_words=n_words)]
if not topic_cols:
topic_cols = topic_cols_all
if normalized:
norm_string = 'normalized'
else:
norm_string = ''
corr = pd.DataFrame(
data=np.corrcoef(self.topic_model.topic_distribution_for_document(normalized=normalized).T),
columns=topic_cols_all,
index=topic_cols_all,
)
corr = corr.loc[topic_cols, topic_cols]
if rename:
corr = corr.rename(columns=rename, index=rename)
topic_cols = list(rename.values())
if mask_thresh is None:
mask_thresh = 0
if figsize is None:
figsize = (max(25, min(len(topic_cols) // 1.1, 25)), max(15, min(len(topic_cols) // 1.2, 15)))
if cmap is None:
cmap = sns.diverging_palette(220, 10, as_cmap=True)
if vmax is None:
vmax = corr.max().max()
# vmax=0.25
# vmin=-vmax
if vmin is None:
vmin = corr.min().min()
if metric is None:
metric = 'euclidean'
# metric = 'correlation'
if method is None:
# method = 'complete'
method = 'average'
# method = 'ward'
g = sns.clustermap(
corr,
center=0, annot=True, fmt=fmt, annot_kws={'fontsize': annot_fontsize},
metric=metric,
method=method,
vmin=vmin, vmax=vmax,
mask=((corr > -mask_thresh) & (corr < mask_thresh)),
cmap=cmap,
figsize=figsize,
cbar_kws={'label': '\n'.join('Pearson Correlation Coefficient'.split())},
)
g.ax_heatmap.hlines(range(1, corr.shape[0]), *g.ax_heatmap.get_xlim(), lw=0.5)
g.ax_heatmap.vlines(range(1, corr.shape[1]), *g.ax_heatmap.get_ylim(), lw=0.5)
g.ax_heatmap.set_xticklabels(g.ax_heatmap.get_xticklabels(), rotation=30, ha='right', fontsize=18)
g.ax_heatmap.set_yticklabels(g.ax_heatmap.get_yticklabels(), fontsize=18)
if savefig:
plot_string = 'topic-topic_corr_grouped'
topics_string = f'{len(topic_cols)}_topics'
if normalized:
norm_string = f'_{norm_string}'
filename_out = f'{self.topic_model.corpus.name}_{plot_string}_{topics_string}{norm_string}'
filename_out_img = f'{filename_out}.{figformat}'
filename_out_data = f'{filename_out}.csv'
# save image to disk
g.savefig(self.output_dir / filename_out_img, dpi=dpi, transparent=False, bbox_inches='tight')
# save values to csv
corr.iloc[g.dendrogram_row.reordered_ind, g.dendrogram_col.reordered_ind].to_csv(self.output_dir / filename_out_data)
plt.close('all')
else:
filename_out_img = None
plt.show()
return g, filename_out_img
def plot_topic_loading_hist(
self,
topic_cols: List[str] = None,
rename: Dict = None,
normalized: bool = True,
bins=None,
ncols: int = None,
n_words: int = 10,
nchar_title: int = None,
figsize_scale: int = None,
figsize: Tuple[int, int] = None,
savefig: bool = False,
dpi: int = 72,
figformat: str = 'png',
):
"""Plot histogram of document loading distributions per topic
"""
topic_cols_all = [' '.join(tw) for tw in self.topic_model.top_words_topics(num_words=n_words)]
if not topic_cols:
topic_cols = topic_cols_all
if normalized:
norm_string = 'normalized'
if bins is None:
bins = np.arange(0, 1.05, 0.05)
else:
norm_string = ''
if bins is None:
bins = 10
_df = pd.DataFrame(
data=self.topic_model.topic_distribution_for_document(normalized=normalized),
columns=topic_cols_all,
)
_df = _df[topic_cols]
if rename:
_df = _df.rename(columns=rename)
topic_cols = list(rename.values())
if ncols is None:
ncols = 5
if ncols > len(topic_cols):
ncols = len(topic_cols)
nrows = int(np.ceil(len(topic_cols) / ncols))
if figsize_scale is None:
figsize_scale = 3
if figsize is None:
figsize = (ncols * figsize_scale, nrows * figsize_scale)
fig, axes = plt.subplots(
figsize=figsize,
nrows=nrows, ncols=ncols,
sharey=True,
sharex=True,
)
for topic_col, ax in zip(topic_cols, axes.ravel()):
_df[topic_col].plot(ax=ax, kind='hist', bins=bins)
title = split_string_nchar(topic_col, nchar=nchar_title)
ax.set_title(title)
xlabel = 'Topic Loading'
if normalized:
ax.set_xlabel(f'{xlabel}\n({norm_string})')
ax.set_xlim((0, 1))
else:
ax.set_xlabel(xlabel)
# ax.yaxis.set_major_formatter(StrMethodFormatter('{x:,.0f}'))
ax.yaxis.set_major_formatter(FuncFormatter(lambda y, _: f'{y:,.0f}'))
# ax.set_yticklabels([f'{int(x):,}' for x in ax.get_yticks().tolist()]);
# show xticklabels on all axes
for topic_col, ax in zip(topic_cols, axes.ravel()):
plt.setp(ax.get_xticklabels(), visible=True)
# removed unused axes
for i in range(len(topic_cols), nrows * ncols):
axes.ravel()[i].axis('off')
fig.tight_layout()
if savefig:
plot_string = 'topic_loading_hist'
topics_string = f'{len(topic_cols)}_topics'
if normalized:
norm_string = f'_{norm_string}'
else:
norm_string = ''
filename_out = f'{self.topic_model.corpus.name}_{plot_string}_{topics_string}{norm_string}.{figformat}'
# save image to disk
fig.savefig(self.output_dir / filename_out, dpi=dpi, transparent=False, bbox_inches='tight')
plt.close('all')
else:
filename_out = None
plt.show()
return fig, axes, filename_out
def plot_topic_loading_boxplot(
self,
topic_cols: List[str] = None,
rename: Dict = None,
normalized: bool = True,
n_words: int = 10,
ylim: Tuple[float, float] = None,
figsize: Tuple[int, int] = (12, 8),
savefig: bool = False,
dpi: int = 72,
figformat: str = 'png',
):
"""Marginal distributions of topic loadings
Plot Boxplot of document loading distributions per topic
"""
topic_cols_all = [' '.join(tw) for tw in self.topic_model.top_words_topics(num_words=n_words)]
if not topic_cols:
topic_cols = topic_cols_all
fig, ax = plt.subplots(figsize=figsize)
if normalized:
ax.yaxis.set_major_formatter(FuncFormatter(lambda y, _: f'{y:.1%}'))
norm_string = 'normalized'
ax.set_ylabel(f'Topic Loading ({norm_string})')
else:
norm_string = ''
ax.set_ylabel('Topic Loading (absolute)')
_df = pd.DataFrame(
data=self.topic_model.topic_distribution_for_document(normalized=normalized),
columns=topic_cols_all,
)
_df = _df[topic_cols]
if rename:
_df = _df.rename(columns=rename)
topic_cols = list(rename.values())
ax = sns.boxplot(ax=ax, data=_df)
ax.set_title('Topic Loading Distribution (boxplot)')
if ylim:
ax.set_ylim(ylim)
fig.autofmt_xdate()
if savefig:
plot_string = 'topic_loading_boxplot'
topics_string = f'{len(topic_cols)}_topics'
if normalized:
norm_string = f'_{norm_string}'
else:
norm_string = ''
filename_out = f'{self.topic_model.corpus.name}_{plot_string}_{topics_string}{norm_string}.{figformat}'
# save image to disk
fig.savefig(self.output_dir / filename_out, dpi=dpi, transparent=False, bbox_inches='tight')
plt.close('all')
else:
filename_out = None
plt.show()
return fig, ax, filename_out
def plot_topic_loading_barplot(
self,
topic_cols: List[str] = None,
rename: Dict = None,
normalized: bool = True,
n_words: int = 10,
ylim: Tuple[float, float] = None,
figsize: Tuple[int, int] = (12, 8),
savefig: bool = False,
dpi: int = 72,
figformat: str = 'png',
):
"""Marginal distributions of topic loadings
Plot Barplot of document loading distributions per topic
"""
topic_cols_all = [' '.join(tw) for tw in self.topic_model.top_words_topics(num_words=n_words)]
if not topic_cols:
topic_cols = topic_cols_all
fig, ax = plt.subplots(figsize=figsize)
if normalized:
ax.yaxis.set_major_formatter(FuncFormatter(lambda y, _: f'{y:.1%}'))
norm_string = 'normalized'
ax.set_ylabel(f'Average Topic Loading ({norm_string})')
else:
norm_string = ''
ax.set_ylabel('Average Topic Loading (absolute)')
_df = pd.DataFrame(
data=self.topic_model.topic_distribution_for_document(normalized=normalized),
columns=topic_cols_all,
)
_df = _df[topic_cols]
if rename:
_df = _df.rename(columns=rename)
topic_cols = list(rename.values())
ax = sns.barplot(ax=ax, data=_df, estimator=np.mean)
ax.set_title('Topic Loading Distribution (barplot; 95% CI of the mean)')
if ylim:
ax.set_ylim(ylim)
fig.autofmt_xdate()
if savefig:
plot_string = 'topic_loading_barplot'
topics_string = f'{len(topic_cols)}_topics'
if normalized:
norm_string = f'_{norm_string}'
else:
norm_string = ''
filename_out = f'{self.topic_model.corpus.name}_{plot_string}_{topics_string}{norm_string}.{figformat}'
# save image to disk
fig.savefig(self.output_dir / filename_out, dpi=dpi, transparent=False, bbox_inches='tight')
plt.close('all')
else:
filename_out = None
plt.show()
return fig, ax, filename_out
def plot_one_topic_over_time_count(
self,
topic_col: str,
rename: Dict = None,
normalized: bool = True,
thresh: float = 0.1,
freq: str = '1YS',
n_words: int = 10,
):
topic_cols_all = [' '.join(tw) for tw in self.topic_model.top_words_topics(num_words=n_words)]
idx = topic_cols_all.index(topic_col)
addtl_cols = [self.topic_model.corpus._date_col]
if normalized:
norm_string = 'normalized'
else:
norm_string = ''
_df = pd.DataFrame(
data=self.topic_model.topic_distribution_for_document(normalized=normalized)[:, idx],
columns=[topic_col],
)
if rename:
_df = _df.rename(columns=rename)
topic_col = list(rename.values())
_df = pd.merge(_df, self.topic_model.corpus.data_frame[addtl_cols], left_index=True, right_index=True)
_df = _df.reset_index().set_index(self.topic_model.corpus._date_col)
result = _df[_df[topic_col] >= thresh].groupby(
pd.Grouper(freq=freq))[topic_col].size()
if result.empty:
print(f"No documents >= {thresh}")
fig = None
ax = None
else:
fig, ax = plt.subplots()
result.plot(ax=ax, kind='line', marker='o')
ax.set_title(topic_col)
ylabel = f"# of year's documents >= {thresh}"
if normalized:
# ax.set_ylim((-0.05, 1.05))
ylabel = f"{ylabel}\n({norm_string})"
ax.set_ylabel(ylabel)
ax.set_xlabel("Publication year")
plt.show()
return fig, ax
def plot_one_topic_over_time_percent(
self,
topic_col: str,
rename: Dict = None,
normalized: bool = True,
thresh: float = 0.1,
freq: str = '1YS',
n_words: int = 10,
):
topic_cols_all = [' '.join(tw) for tw in self.topic_model.top_words_topics(num_words=n_words)]
idx = topic_cols_all.index(topic_col)
addtl_cols = [self.topic_model.corpus._date_col]
if normalized:
norm_string = 'normalized'
else:
norm_string = ''
_df = pd.DataFrame(
data=self.topic_model.topic_distribution_for_document(normalized=normalized)[:, idx],
columns=[topic_col],
)
if rename:
_df = _df.rename(columns=rename)
topic_col = list(rename.values())
_df = pd.merge(_df, self.topic_model.corpus.data_frame[addtl_cols], left_index=True, right_index=True)
_df = _df.reset_index().set_index(self.topic_model.corpus._date_col)
result_total = _df.groupby(pd.Grouper(freq=freq))[topic_col].size()
result_thresh = _df[_df[topic_col] >= thresh].groupby(
pd.Grouper(freq=freq))[topic_col].size()
result = result_thresh / result_total
if result.empty:
print(f"No documents >= {thresh}")
fig = None
ax = None
else:
fig, ax = plt.subplots()
result.plot(ax=ax, kind='line', marker='o')
ax.set_title(topic_col)
ax.yaxis.set_major_formatter(FuncFormatter(lambda y, _: f'{y:.0%}'))
ylabel = f"% of year's documents >= {thresh}"
if normalized:
ylabel = f"{ylabel}\n({norm_string})"
ax.set_ylabel(ylabel)
ax.set_xlabel("Publication year")
plt.show()
return fig, ax
def plot_topic_over_time_count(
self,
topic_cols: List[str] = None,
rename: Dict = None,
merge_topics: Dict = None,
normalized: bool = True,
thresh: float = 0.1,
freq: str = '1YS',
n_words: int = 10,
nchar_title: int = None,
ncols: int = None,
ma_window: int = None,
by_affil: bool = False,
figsize_scale: int = None,
figsize: Tuple[int, int] = None,
savefig: bool = False,
dpi: int = 72,
figformat: str = 'png',
):
"""Plot count of documents >= a given threshold per frequency window
"""
topic_cols_all = [' '.join(tw) for tw in self.topic_model.top_words_topics(num_words=n_words)]
if not topic_cols:
topic_cols = topic_cols_all
addtl_cols = [self.topic_model.corpus._date_col, self.topic_model.corpus._affiliation_col]
if normalized:
norm_string = 'normalized'
else:
norm_string = ''
_df = pd.DataFrame(
data=self.topic_model.topic_distribution_for_document(normalized=normalized),
columns=topic_cols_all,
)
_df = _df[topic_cols]
if rename:
_df = _df.rename(columns=rename)
topic_cols = list(rename.values())
if merge_topics:
_df_merged = pd.DataFrame(index=_df.index, columns=merge_topics.keys())
for k, v in merge_topics.items():
_df_merged[k] = _df[v].sum(axis=1)
_df = _df_merged
topic_cols = list(merge_topics.keys())
_df = pd.merge(_df, self.topic_model.corpus.data_frame[addtl_cols], left_index=True, right_index=True)
_df = _df.reset_index().set_index(self.topic_model.corpus._date_col)
# so all have the same axes
idx = _df.groupby(
by=[pd.Grouper(freq=freq),
self.topic_model.corpus._affiliation_col,
])[topic_cols].size().unstack().index
if by_affil:
groupby = [pd.Grouper(freq=freq), self.topic_model.corpus._affiliation_col]
else:
groupby = [pd.Grouper(freq=freq)]
if ncols is None:
ncols = 5
if ncols > len(topic_cols):
ncols = len(topic_cols)
nrows = int(np.ceil(len(topic_cols) / ncols))
if figsize_scale is None:
figsize_scale = 3
if figsize is None:
figsize = (ncols * figsize_scale, nrows * figsize_scale)
fig, axes = plt.subplots(
figsize=figsize,
nrows=nrows, ncols=ncols,
sharey=True,
sharex=True,
)
for topic_col, ax in zip(topic_cols, axes.ravel()):
result_thresh = _df[_df[topic_col] >= thresh].groupby(
by=groupby)[topic_col].size()
result = pd.DataFrame(index=idx)
if by_affil:
result = result.merge(result_thresh.unstack(), how='outer',
left_index=True, right_index=True).fillna(0)
else:
result = result.merge(result_thresh, how='outer',
left_index=True, right_index=True).fillna(0)
if ma_window:
result = result.rolling(window=ma_window, min_periods=1, center=True).mean()
result.plot(ax=ax, kind='line', marker='', legend=None)
title = split_string_nchar(topic_col, nchar=nchar_title)
ax.set_title(title)
ylabel = f"# of year's documents >= {thresh}"
if normalized:
ylabel = f"{ylabel}\n({norm_string})"
ax.set_ylabel(ylabel)
ax.set_xlabel("Publication year")
# show xticklabels on all axes
for topic_col, ax in zip(topic_cols, axes.ravel()):
plt.setp(ax.get_xticklabels(), visible=True)
# removed unused axes
for i in range(len(topic_cols), nrows * ncols):
axes.ravel()[i].axis('off')
# for placing the affiliation legend
if by_affil:
handles, labels = ax.get_legend_handles_labels()
bbox_y = 1.0 + ((1.3**(-nrows)) * 0.25)
lgd = fig.legend(handles, labels, bbox_to_anchor=(0.5, bbox_y), loc='upper center')
fig.autofmt_xdate(bottom=0.2, rotation=30, ha='center')
fig.tight_layout()
if savefig:
plot_string = 'topic_count'
if by_affil:
affil_string = 'affil'
else:
affil_string = 'overall'
topics_string = f'{len(topic_cols)}_topics'
thresh_string = f'{int(thresh * 100)}_topicthresh'
if normalized:
norm_string = f'_{norm_string}'
else:
norm_string = ''
if ma_window:
ma_string = f'_{ma_window}_MA'
else:
ma_string = ''
if merge_topics:
merge_string = f'_merged'
else:
merge_string = ''
filename_out = f'{self.topic_model.corpus.name}_{plot_string}_{affil_string}_{topics_string}_{thresh_string}{norm_string}{ma_string}{merge_string}.{figformat}'
# save image to disk
if by_affil:
fig.savefig(self.output_dir / filename_out, dpi=dpi, transparent=False, bbox_inches='tight', bbox_extra_artists=(lgd,))
else:
fig.savefig(self.output_dir / filename_out, dpi=dpi, transparent=False, bbox_inches='tight')
plt.close('all')
else:
filename_out = None
plt.show()
return fig, axes, filename_out
def plot_topic_over_time_percent(
self,
topic_cols: List[str] = None,
rename: Dict = None,
merge_topics: Dict = None,
normalized: bool = True,
thresh: float = 0.1,
freq: str = '1YS',
n_words: int = 10,
nchar_title: int = None,
ncols: int = None,
ma_window: int = None,
by_affil: bool = False,
figsize_scale: int = None,
figsize: Tuple[int, int] = None,
savefig: bool = False,
dpi: int = 72,
figformat: str = 'png',
):
"""Plot the percent of documents above the threshold that are above the threshold for each topic, per year.
Each year across topics adds up to 100%.
One document can contribute to multiple topics.
"""
topic_cols_all = [' '.join(tw) for tw in self.topic_model.top_words_topics(num_words=n_words)]
if not topic_cols:
topic_cols = topic_cols_all
addtl_cols = [self.topic_model.corpus._date_col, self.topic_model.corpus._affiliation_col]
if normalized:
norm_string = 'normalized'
else:
norm_string = ''
_df = pd.DataFrame(
data=self.topic_model.topic_distribution_for_document(normalized=normalized),
columns=topic_cols_all,
)
_df = _df[topic_cols]
if rename:
_df = _df.rename(columns=rename)
topic_cols = list(rename.values())
if merge_topics:
_df_merged = pd.DataFrame(index=_df.index, columns=merge_topics.keys())
for k, v in merge_topics.items():
_df_merged[k] = _df[v].sum(axis=1)
_df = _df_merged
topic_cols = list(merge_topics.keys())
_df = pd.merge(_df, self.topic_model.corpus.data_frame[addtl_cols], left_index=True, right_index=True)
# join the date with boolean >= thresh
if by_affil:
groupby = [pd.Grouper(freq=freq), self.topic_model.corpus._affiliation_col]
result_thresh = _df[[self.topic_model.corpus._date_col, self.topic_model.corpus._affiliation_col]].join(
_df[topic_cols] >= thresh).reset_index().set_index(
self.topic_model.corpus._date_col).groupby(
by=groupby)[topic_cols].sum()
else:
groupby = [ | pd.Grouper(freq=freq) | pandas.Grouper |
import os
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from .. import read_sql
@pytest.fixture(scope="module") # type: ignore
def postgres_url() -> str:
conn = os.environ["POSTGRES_URL"]
return conn
@pytest.mark.xfail
def test_on_non_select(postgres_url: str) -> None:
query = "CREATE TABLE non_select(id INTEGER NOT NULL)"
df = read_sql(postgres_url, query)
def test_aggregation(postgres_url: str) -> None:
query = "SELECT test_bool, SUM(test_float) FROM test_table GROUP BY test_bool"
df = read_sql(postgres_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
"sum": pd.Series([10.9, 5.2, -10.0], dtype="float64")
}
)
assert_frame_equal(df, expected, check_names=True)
def test_partition_on_aggregation(postgres_url: str) -> None:
query = "SELECT test_bool, SUM(test_int) AS test_int FROM test_table GROUP BY test_bool"
df = read_sql(postgres_url, query,
partition_on="test_int", partition_num=2)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
"test_int": pd.Series([4, 5, 1315], dtype="Int64")
}
)
assert_frame_equal(df, expected, check_names=True)
def test_aggregation2(postgres_url: str) -> None:
query = "select DISTINCT(test_bool) from test_table"
df = read_sql(postgres_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
}
)
assert_frame_equal(df, expected, check_names=True)
def test_partition_on_aggregation2(postgres_url: str) -> None:
query = "select MAX(test_int), MIN(test_int) from test_table"
df = read_sql(postgres_url, query,
partition_on="max", partition_num=2)
expected = pd.DataFrame(
index=range(1),
data={
"max": pd.Series([1314], dtype="Int64"),
"min": pd.Series([0], dtype="Int64"),
}
)
assert_frame_equal(df, expected, check_names=True)
def test_udf(postgres_url: str) -> None:
query = "select increment(test_int) as test_int from test_table ORDER BY test_int"
df = read_sql(postgres_url, query,
partition_on="test_int", partition_num=2)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 2, 3, 4, 5, 1315], dtype="Int64"),
}
)
assert_frame_equal(df, expected, check_names=True)
def test_manual_partition(postgres_url: str) -> None:
queries = [
"SELECT * FROM test_table WHERE test_int < 2",
"SELECT * FROM test_table WHERE test_int >= 2",
]
df = read_sql(postgres_url, query=queries)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 0, 2, 3, 4, 1314], dtype="Int64"),
"test_nullint": pd.Series([3, 5, None, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["str1", "a", "str2", "b", "c", None], dtype="object"
),
"test_float": pd.Series([None, 3.1, 2.2, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[True, None, False, False, None, True], dtype="boolean"
),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_read_sql_without_partition(postgres_url: str) -> None:
query = "SELECT * FROM test_table"
df = read_sql(postgres_url, query)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 2, 0, 3, 4, 1314], dtype="Int64"),
"test_nullint": pd.Series([3, None, 5, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["str1", "str2", "a", "b", "c", None], dtype="object"
),
"test_float": pd.Series([None, 2.2, 3.1, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[True, False, None, False, None, True], dtype="boolean"
),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_read_sql_with_partition(postgres_url: str) -> None:
query = "SELECT * FROM test_table"
df = read_sql(
postgres_url,
query,
partition_on="test_int",
partition_range=(0, 2000),
partition_num=3,
)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 2, 0, 3, 4, 1314], dtype="Int64"),
"test_nullint": pd.Series([3, None, 5, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["str1", "str2", "a", "b", "c", None], dtype="object"
),
"test_float": pd.Series([None, 2.2, 3.1, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[True, False, None, False, None, True], dtype="boolean"
),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_read_sql_with_partition_without_partition_range(postgres_url: str) -> None:
query = "SELECT * FROM test_table where test_float > 3"
df = read_sql(
postgres_url,
query,
partition_on="test_int",
partition_num=3,
)
expected = pd.DataFrame(
index=range(2),
data={
"test_int": pd.Series([0, 4], dtype="Int64"),
"test_nullint": pd.Series([5, 9], dtype="Int64"),
"test_str": pd.Series(
["a", "c"], dtype="object"
),
"test_float": pd.Series([3.1, 7.8], dtype="float64"),
"test_bool": pd.Series(
[None, None], dtype="boolean"
),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_read_sql_with_partition_and_selection(postgres_url: str) -> None:
query = "SELECT * FROM test_table WHERE 1 = 3 OR 2 = 2"
df = read_sql(
postgres_url,
query,
partition_on="test_int",
partition_range=(0, 2000),
partition_num=3,
)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 2, 0, 3, 4, 1314], dtype="Int64"),
"test_nullint": pd.Series([3, None, 5, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["str1", "str2", "a", "b", "c", None], dtype="object"
),
"test_float": pd.Series([None, 2.2, 3.1, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[True, False, None, False, None, True], dtype="boolean"
),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_read_sql_with_partition_and_projection(postgres_url: str) -> None:
query = "SELECT test_int, test_float, test_str FROM test_table"
df = read_sql(
postgres_url,
query,
partition_on="test_int",
partition_range=(0, 2000),
partition_num=3,
)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": | pd.Series([1, 2, 0, 3, 4, 1314], dtype="Int64") | pandas.Series |
import pandas as pd
import numpy as np
from sklearn.pipeline import Pipeline
from sklearn.metrics import (confusion_matrix,f1_score,classification_report)
from sklearn.model_selection import (train_test_split, GridSearchCV)
from joblib import dump
from sklearn.preprocessing import (MinMaxScaler, StandardScaler)
from sklearn.neural_network import MLPClassifier as MLP
from tempfile import mkdtemp
from shutil import rmtree
from joblib import Memory
################################################################################
# Best score: 0.8784
# Using the following parameters:
# {'mlp__activation': 'logistic', 'mlp__alpha': 1e-06, 'mlp__hidden_layer_sizes': 11, 'mlp__max_iter': 1500}
# -----------------Scoring Model--------------------
# precision recall f1-score support
# 0 0.89 0.89 0.89 20999
# 1 0.87 0.87 0.87 18379
# accuracy 0.88 39378
# macro avg 0.88 0.88 0.88 39378
# weighted avg 0.88 0.88 0.88 39378
# [[18638 2361]
# [ 2356 16023]]
# function: RUN took 18564.0860s
################################################################################
def NN(df, *args, **kwargs):
unique_test_name = 'MinMaxScaler MLP GridSearchCV Optimised with SMOTE ENN'
# Create a temporary folder to store the transformers of the pipeline
cachedir = mkdtemp()
memory = Memory(location=cachedir, verbose=10)
y = df['QuoteConversion_Flag'].values
IDs = df.Quote_ID
X = df.drop(['QuoteConversion_Flag', 'Quote_ID'], axis=1).values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
param_grid = {
'mlp__activation': ["logistic", "relu"],
'mlp__alpha': 10.0 ** -np.arange(1, 8),
'mlp__hidden_layer_sizes': np.arange(5, 12),
'mlp__max_iter': [500,1000,1500],
}
mlp = MLP(random_state=1,solver='adam',learning_rate='adaptive',tol=1e-5)
model_pipe = Pipeline(steps=[('minmax_scaler', MinMaxScaler()), ('mlp', mlp)], memory=memory)
grid = GridSearchCV(model_pipe, param_grid, cv=10, iid=False, n_jobs=-1)
print(unique_test_name)
grid.fit(X_train, y_train)
print("-----------------Best Param Overview--------------------")
print("Best score: %0.4f" % grid.best_score_)
print("Using the following parameters:")
print(grid.best_params_)
prediction = grid.predict(X_test)
print("-----------------Scoring Model--------------------")
print(classification_report(prediction, y_test))
print(confusion_matrix(prediction, y_test), "\n")
prediction = | pd.DataFrame(data=prediction, columns=['QuoteConversion_Flag']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
import nose
import numpy as np
from numpy import nan
import pandas as pd
from distutils.version import LooseVersion
from pandas import (Index, Series, DataFrame, Panel, isnull,
date_range, period_range)
from pandas.core.index import MultiIndex
import pandas.core.common as com
from pandas.compat import range, zip
from pandas import compat
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assert_panel_equal,
assert_equal)
import pandas.util.testing as tm
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
raise nose.SkipTest('scipy.interpolate.pchip missing')
# ----------------------------------------------------------------------
# Generic types test cases
class Generic(object):
_multiprocess_can_split_ = True
def setUp(self):
pass
@property
def _ndim(self):
return self._typ._AXIS_LEN
def _axes(self):
""" return the axes for my object typ """
return self._typ._AXIS_ORDERS
def _construct(self, shape, value=None, dtype=None, **kwargs):
""" construct an object for the given shape
if value is specified use that if its a scalar
if value is an array, repeat it as needed """
if isinstance(shape, int):
shape = tuple([shape] * self._ndim)
if value is not None:
if np.isscalar(value):
if value == 'empty':
arr = None
# remove the info axis
kwargs.pop(self._typ._info_axis_name, None)
else:
arr = np.empty(shape, dtype=dtype)
arr.fill(value)
else:
fshape = np.prod(shape)
arr = value.ravel()
new_shape = fshape / arr.shape[0]
if fshape % arr.shape[0] != 0:
raise Exception("invalid value passed in _construct")
arr = np.repeat(arr, new_shape).reshape(shape)
else:
arr = np.random.randn(*shape)
return self._typ(arr, dtype=dtype, **kwargs)
def _compare(self, result, expected):
self._comparator(result, expected)
def test_rename(self):
# single axis
for axis in self._axes():
kwargs = {axis: list('ABCD')}
obj = self._construct(4, **kwargs)
# no values passed
# self.assertRaises(Exception, o.rename(str.lower))
# rename a single axis
result = obj.rename(**{axis: str.lower})
expected = obj.copy()
setattr(expected, axis, list('abcd'))
self._compare(result, expected)
# multiple axes at once
def test_get_numeric_data(self):
n = 4
kwargs = {}
for i in range(self._ndim):
kwargs[self._typ._AXIS_NAMES[i]] = list(range(n))
# get the numeric data
o = self._construct(n, **kwargs)
result = o._get_numeric_data()
self._compare(result, o)
# non-inclusion
result = o._get_bool_data()
expected = self._construct(n, value='empty', **kwargs)
self._compare(result, expected)
# get the bool data
arr = np.array([True, True, False, True])
o = self._construct(n, value=arr, **kwargs)
result = o._get_numeric_data()
self._compare(result, o)
# _get_numeric_data is includes _get_bool_data, so can't test for
# non-inclusion
def test_get_default(self):
# GH 7725
d0 = "a", "b", "c", "d"
d1 = np.arange(4, dtype='int64')
others = "e", 10
for data, index in ((d0, d1), (d1, d0)):
s = Series(data, index=index)
for i, d in zip(index, data):
self.assertEqual(s.get(i), d)
self.assertEqual(s.get(i, d), d)
self.assertEqual(s.get(i, "z"), d)
for other in others:
self.assertEqual(s.get(other, "z"), "z")
self.assertEqual(s.get(other, other), other)
def test_nonzero(self):
# GH 4633
# look at the boolean/nonzero behavior for objects
obj = self._construct(shape=4)
self.assertRaises(ValueError, lambda: bool(obj == 0))
self.assertRaises(ValueError, lambda: bool(obj == 1))
self.assertRaises(ValueError, lambda: bool(obj))
obj = self._construct(shape=4, value=1)
self.assertRaises(ValueError, lambda: bool(obj == 0))
self.assertRaises(ValueError, lambda: bool(obj == 1))
self.assertRaises(ValueError, lambda: bool(obj))
obj = self._construct(shape=4, value=np.nan)
self.assertRaises(ValueError, lambda: bool(obj == 0))
self.assertRaises(ValueError, lambda: bool(obj == 1))
self.assertRaises(ValueError, lambda: bool(obj))
# empty
obj = self._construct(shape=0)
self.assertRaises(ValueError, lambda: bool(obj))
# invalid behaviors
obj1 = self._construct(shape=4, value=1)
obj2 = self._construct(shape=4, value=1)
def f():
if obj1:
com.pprint_thing("this works and shouldn't")
self.assertRaises(ValueError, f)
self.assertRaises(ValueError, lambda: obj1 and obj2)
self.assertRaises(ValueError, lambda: obj1 or obj2)
self.assertRaises(ValueError, lambda: not obj1)
def test_numpy_1_7_compat_numeric_methods(self):
# GH 4435
# numpy in 1.7 tries to pass addtional arguments to pandas functions
o = self._construct(shape=4)
for op in ['min', 'max', 'max', 'var', 'std', 'prod', 'sum', 'cumsum',
'cumprod', 'median', 'skew', 'kurt', 'compound', 'cummax',
'cummin', 'all', 'any']:
f = getattr(np, op, None)
if f is not None:
f(o)
def test_downcast(self):
# test close downcasting
o = self._construct(shape=4, value=9, dtype=np.int64)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, o)
o = self._construct(shape=4, value=9.)
expected = o.astype(np.int64)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, expected)
o = self._construct(shape=4, value=9.5)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, o)
# are close
o = self._construct(shape=4, value=9.000000000005)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
expected = o.astype(np.int64)
self._compare(result, expected)
def test_constructor_compound_dtypes(self):
# GH 5191
# compound dtypes should raise not-implementederror
def f(dtype):
return self._construct(shape=3, dtype=dtype)
self.assertRaises(NotImplementedError, f, [("A", "datetime64[h]"),
("B", "str"),
("C", "int32")])
# these work (though results may be unexpected)
f('int64')
f('float64')
f('M8[ns]')
def check_metadata(self, x, y=None):
for m in x._metadata:
v = getattr(x, m, None)
if y is None:
self.assertIsNone(v)
else:
self.assertEqual(v, getattr(y, m, None))
def test_metadata_propagation(self):
# check that the metadata matches up on the resulting ops
o = self._construct(shape=3)
o.name = 'foo'
o2 = self._construct(shape=3)
o2.name = 'bar'
# TODO
# Once panel can do non-trivial combine operations
# (currently there is an a raise in the Panel arith_ops to prevent
# this, though it actually does work)
# can remove all of these try: except: blocks on the actual operations
# ----------
# preserving
# ----------
# simple ops with scalars
for op in ['__add__', '__sub__', '__truediv__', '__mul__']:
result = getattr(o, op)(1)
self.check_metadata(o, result)
# ops with like
for op in ['__add__', '__sub__', '__truediv__', '__mul__']:
try:
result = getattr(o, op)(o)
self.check_metadata(o, result)
except (ValueError, AttributeError):
pass
# simple boolean
for op in ['__eq__', '__le__', '__ge__']:
v1 = getattr(o, op)(o)
self.check_metadata(o, v1)
try:
self.check_metadata(o, v1 & v1)
except (ValueError):
pass
try:
self.check_metadata(o, v1 | v1)
except (ValueError):
pass
# combine_first
try:
result = o.combine_first(o2)
self.check_metadata(o, result)
except (AttributeError):
pass
# ---------------------------
# non-preserving (by default)
# ---------------------------
# add non-like
try:
result = o + o2
self.check_metadata(result)
except (ValueError, AttributeError):
pass
# simple boolean
for op in ['__eq__', '__le__', '__ge__']:
# this is a name matching op
v1 = getattr(o, op)(o)
v2 = getattr(o, op)(o2)
self.check_metadata(v2)
try:
self.check_metadata(v1 & v2)
except (ValueError):
pass
try:
self.check_metadata(v1 | v2)
except (ValueError):
pass
def test_head_tail(self):
# GH5370
o = self._construct(shape=10)
# check all index types
for index in [tm.makeFloatIndex, tm.makeIntIndex, tm.makeStringIndex,
tm.makeUnicodeIndex, tm.makeDateIndex,
tm.makePeriodIndex]:
axis = o._get_axis_name(0)
setattr(o, axis, index(len(getattr(o, axis))))
# Panel + dims
try:
o.head()
except (NotImplementedError):
raise nose.SkipTest('not implemented on {0}'.format(
o.__class__.__name__))
self._compare(o.head(), o.iloc[:5])
self._compare(o.tail(), o.iloc[-5:])
# 0-len
self._compare(o.head(0), o.iloc[0:0])
self._compare(o.tail(0), o.iloc[0:0])
# bounded
self._compare(o.head(len(o) + 1), o)
self._compare(o.tail(len(o) + 1), o)
# neg index
self._compare(o.head(-3), o.head(7))
self._compare(o.tail(-3), o.tail(7))
def test_sample(self):
# Fixes issue: 2419
o = self._construct(shape=10)
###
# Check behavior of random_state argument
###
# Check for stability when receives seed or random state -- run 10
# times.
for test in range(10):
seed = np.random.randint(0, 100)
self._compare(
o.sample(n=4, random_state=seed), o.sample(n=4,
random_state=seed))
self._compare(
o.sample(frac=0.7, random_state=seed), o.sample(
frac=0.7, random_state=seed))
self._compare(
o.sample(n=4, random_state=np.random.RandomState(test)),
o.sample(n=4, random_state=np.random.RandomState(test)))
self._compare(
o.sample(frac=0.7, random_state=np.random.RandomState(test)),
o.sample(frac=0.7, random_state=np.random.RandomState(test)))
# Check for error when random_state argument invalid.
with tm.assertRaises(ValueError):
o.sample(random_state='astring!')
###
# Check behavior of `frac` and `N`
###
# Giving both frac and N throws error
with tm.assertRaises(ValueError):
o.sample(n=3, frac=0.3)
# Check that raises right error for negative lengths
with tm.assertRaises(ValueError):
o.sample(n=-3)
with tm.assertRaises(ValueError):
o.sample(frac=-0.3)
# Make sure float values of `n` give error
with tm.assertRaises(ValueError):
o.sample(n=3.2)
# Check lengths are right
self.assertTrue(len(o.sample(n=4) == 4))
self.assertTrue(len(o.sample(frac=0.34) == 3))
self.assertTrue(len(o.sample(frac=0.36) == 4))
###
# Check weights
###
# Weight length must be right
with tm.assertRaises(ValueError):
o.sample(n=3, weights=[0, 1])
with tm.assertRaises(ValueError):
bad_weights = [0.5] * 11
o.sample(n=3, weights=bad_weights)
with tm.assertRaises(ValueError):
bad_weight_series = Series([0, 0, 0.2])
o.sample(n=4, weights=bad_weight_series)
# Check won't accept negative weights
with tm.assertRaises(ValueError):
bad_weights = [-0.1] * 10
o.sample(n=3, weights=bad_weights)
# Check inf and -inf throw errors:
with tm.assertRaises(ValueError):
weights_with_inf = [0.1] * 10
weights_with_inf[0] = np.inf
o.sample(n=3, weights=weights_with_inf)
with tm.assertRaises(ValueError):
weights_with_ninf = [0.1] * 10
weights_with_ninf[0] = -np.inf
o.sample(n=3, weights=weights_with_ninf)
# All zeros raises errors
zero_weights = [0] * 10
with tm.assertRaises(ValueError):
o.sample(n=3, weights=zero_weights)
# All missing weights
nan_weights = [np.nan] * 10
with tm.assertRaises(ValueError):
o.sample(n=3, weights=nan_weights)
# A few dataframe test with degenerate weights.
easy_weight_list = [0] * 10
easy_weight_list[5] = 1
df = pd.DataFrame({'col1': range(10, 20),
'col2': range(20, 30),
'colString': ['a'] * 10,
'easyweights': easy_weight_list})
sample1 = df.sample(n=1, weights='easyweights')
assert_frame_equal(sample1, df.iloc[5:6])
# Ensure proper error if string given as weight for Series, panel, or
# DataFrame with axis = 1.
s = Series(range(10))
with tm.assertRaises(ValueError):
s.sample(n=3, weights='weight_column')
panel = pd.Panel(items=[0, 1, 2], major_axis=[2, 3, 4],
minor_axis=[3, 4, 5])
with tm.assertRaises(ValueError):
panel.sample(n=1, weights='weight_column')
with tm.assertRaises(ValueError):
df.sample(n=1, weights='weight_column', axis=1)
# Check weighting key error
with tm.assertRaises(KeyError):
df.sample(n=3, weights='not_a_real_column_name')
# Check np.nan are replaced by zeros.
weights_with_nan = [np.nan] * 10
weights_with_nan[5] = 0.5
self._compare(
o.sample(n=1, axis=0, weights=weights_with_nan), o.iloc[5:6])
# Check None are also replaced by zeros.
weights_with_None = [None] * 10
weights_with_None[5] = 0.5
self._compare(
o.sample(n=1, axis=0, weights=weights_with_None), o.iloc[5:6])
# Check that re-normalizes weights that don't sum to one.
weights_less_than_1 = [0] * 10
weights_less_than_1[0] = 0.5
tm.assert_frame_equal(
df.sample(n=1, weights=weights_less_than_1), df.iloc[:1])
###
# Test axis argument
###
# Test axis argument
df = pd.DataFrame({'col1': range(10), 'col2': ['a'] * 10})
second_column_weight = [0, 1]
assert_frame_equal(
df.sample(n=1, axis=1, weights=second_column_weight), df[['col2']])
# Different axis arg types
assert_frame_equal(df.sample(n=1, axis='columns',
weights=second_column_weight),
df[['col2']])
weight = [0] * 10
weight[5] = 0.5
assert_frame_equal(df.sample(n=1, axis='rows', weights=weight),
df.iloc[5:6])
assert_frame_equal(df.sample(n=1, axis='index', weights=weight),
df.iloc[5:6])
# Check out of range axis values
with tm.assertRaises(ValueError):
df.sample(n=1, axis=2)
with tm.assertRaises(ValueError):
df.sample(n=1, axis='not_a_name')
with tm.assertRaises(ValueError):
s = pd.Series(range(10))
s.sample(n=1, axis=1)
# Test weight length compared to correct axis
with tm.assertRaises(ValueError):
df.sample(n=1, axis=1, weights=[0.5] * 10)
# Check weights with axis = 1
easy_weight_list = [0] * 3
easy_weight_list[2] = 1
df = pd.DataFrame({'col1': range(10, 20),
'col2': range(20, 30),
'colString': ['a'] * 10})
sample1 = df.sample(n=1, axis=1, weights=easy_weight_list)
assert_frame_equal(sample1, df[['colString']])
# Test default axes
p = pd.Panel(items=['a', 'b', 'c'], major_axis=[2, 4, 6],
minor_axis=[1, 3, 5])
assert_panel_equal(
p.sample(n=3, random_state=42), p.sample(n=3, axis=1,
random_state=42))
assert_frame_equal(
df.sample(n=3, random_state=42), df.sample(n=3, axis=0,
random_state=42))
# Test that function aligns weights with frame
df = DataFrame(
{'col1': [5, 6, 7],
'col2': ['a', 'b', 'c'], }, index=[9, 5, 3])
s = Series([1, 0, 0], index=[3, 5, 9])
assert_frame_equal(df.loc[[3]], df.sample(1, weights=s))
# Weights have index values to be dropped because not in
# sampled DataFrame
s2 = Series([0.001, 0, 10000], index=[3, 5, 10])
assert_frame_equal(df.loc[[3]], df.sample(1, weights=s2))
# Weights have empty values to be filed with zeros
s3 = Series([0.01, 0], index=[3, 5])
assert_frame_equal(df.loc[[3]], df.sample(1, weights=s3))
# No overlap in weight and sampled DataFrame indices
s4 = Series([1, 0], index=[1, 2])
with tm.assertRaises(ValueError):
df.sample(1, weights=s4)
def test_size_compat(self):
# GH8846
# size property should be defined
o = self._construct(shape=10)
self.assertTrue(o.size == np.prod(o.shape))
self.assertTrue(o.size == 10 ** len(o.axes))
def test_split_compat(self):
# xref GH8846
o = self._construct(shape=10)
self.assertTrue(len(np.array_split(o, 5)) == 5)
self.assertTrue(len(np.array_split(o, 2)) == 2)
def test_unexpected_keyword(self): # GH8597
from pandas.util.testing import assertRaisesRegexp
df = DataFrame(np.random.randn(5, 2), columns=['jim', 'joe'])
ca = pd.Categorical([0, 0, 2, 2, 3, np.nan])
ts = df['joe'].copy()
ts[2] = np.nan
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
df.drop('joe', axis=1, in_place=True)
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
df.reindex([1, 0], inplace=True)
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
ca.fillna(0, inplace=True)
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
ts.fillna(0, in_place=True)
class TestSeries(tm.TestCase, Generic):
_typ = Series
_comparator = lambda self, x, y: assert_series_equal(x, y)
def setUp(self):
self.ts = tm.makeTimeSeries() # Was at top level in test_series
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
def test_rename_mi(self):
s = Series([11, 21, 31],
index=MultiIndex.from_tuples(
[("A", x) for x in ["a", "B", "c"]]))
s.rename(str.lower)
def test_get_numeric_data_preserve_dtype(self):
# get the numeric data
o = Series([1, 2, 3])
result = o._get_numeric_data()
self._compare(result, o)
o = Series([1, '2', 3.])
result = o._get_numeric_data()
expected = Series([], dtype=object, index=pd.Index([], dtype=object))
self._compare(result, expected)
o = Series([True, False, True])
result = o._get_numeric_data()
self._compare(result, o)
o = Series([True, False, True])
result = o._get_bool_data()
self._compare(result, o)
o = Series(date_range('20130101', periods=3))
result = o._get_numeric_data()
expected = Series([], dtype='M8[ns]', index=pd.Index([], dtype=object))
self._compare(result, expected)
def test_nonzero_single_element(self):
# allow single item via bool method
s = Series([True])
self.assertTrue(s.bool())
s = Series([False])
self.assertFalse(s.bool())
# single item nan to raise
for s in [Series([np.nan]), Series([pd.NaT]), Series([True]),
Series([False])]:
self.assertRaises(ValueError, lambda: bool(s))
for s in [Series([np.nan]), Series([pd.NaT])]:
self.assertRaises(ValueError, lambda: s.bool())
# multiple bool are still an error
for s in [Series([True, True]), Series([False, False])]:
self.assertRaises(ValueError, lambda: bool(s))
self.assertRaises(ValueError, lambda: s.bool())
# single non-bool are an error
for s in [Series([1]), Series([0]), Series(['a']), Series([0.0])]:
self.assertRaises(ValueError, lambda: bool(s))
self.assertRaises(ValueError, lambda: s.bool())
def test_metadata_propagation_indiv(self):
# check that the metadata matches up on the resulting ops
o = Series(range(3), range(3))
o.name = 'foo'
o2 = Series(range(3), range(3))
o2.name = 'bar'
result = o.T
self.check_metadata(o, result)
# resample
ts = Series(np.random.rand(1000),
index=date_range('20130101', periods=1000, freq='s'),
name='foo')
result = ts.resample('1T').mean()
self.check_metadata(ts, result)
result = ts.resample('1T').min()
self.check_metadata(ts, result)
result = ts.resample('1T').apply(lambda x: x.sum())
self.check_metadata(ts, result)
_metadata = Series._metadata
_finalize = Series.__finalize__
Series._metadata = ['name', 'filename']
o.filename = 'foo'
o2.filename = 'bar'
def finalize(self, other, method=None, **kwargs):
for name in self._metadata:
if method == 'concat' and name == 'filename':
value = '+'.join([getattr(
o, name) for o in other.objs if getattr(o, name, None)
])
object.__setattr__(self, name, value)
else:
object.__setattr__(self, name, getattr(other, name, None))
return self
Series.__finalize__ = finalize
result = pd.concat([o, o2])
self.assertEqual(result.filename, 'foo+bar')
self.assertIsNone(result.name)
# reset
Series._metadata = _metadata
Series.__finalize__ = _finalize
def test_interpolate(self):
ts = Series(np.arange(len(self.ts), dtype=float), self.ts.index)
ts_copy = ts.copy()
ts_copy[5:10] = np.NaN
linear_interp = ts_copy.interpolate(method='linear')
self.assert_numpy_array_equal(linear_interp, ts)
ord_ts = Series([d.toordinal() for d in self.ts.index],
index=self.ts.index).astype(float)
ord_ts_copy = ord_ts.copy()
ord_ts_copy[5:10] = np.NaN
time_interp = ord_ts_copy.interpolate(method='time')
self.assert_numpy_array_equal(time_interp, ord_ts)
# try time interpolation on a non-TimeSeries
# Only raises ValueError if there are NaNs.
non_ts = self.series.copy()
non_ts[0] = np.NaN
self.assertRaises(ValueError, non_ts.interpolate, method='time')
def test_interp_regression(self):
tm._skip_if_no_scipy()
_skip_if_no_pchip()
ser = Series(np.sort(np.random.uniform(size=100)))
# interpolate at new_index
new_index = ser.index.union(Index([49.25, 49.5, 49.75, 50.25, 50.5,
50.75]))
interp_s = ser.reindex(new_index).interpolate(method='pchip')
# does not blow up, GH5977
interp_s[49:51]
def test_interpolate_corners(self):
s = Series([np.nan, np.nan])
assert_series_equal(s.interpolate(), s)
s = Series([]).interpolate()
assert_series_equal(s.interpolate(), s)
tm._skip_if_no_scipy()
s = Series([np.nan, np.nan])
assert_series_equal(s.interpolate(method='polynomial', order=1), s)
s = Series([]).interpolate()
assert_series_equal(s.interpolate(method='polynomial', order=1), s)
def test_interpolate_index_values(self):
s = Series(np.nan, index=np.sort(np.random.rand(30)))
s[::3] = np.random.randn(10)
vals = s.index.values.astype(float)
result = s.interpolate(method='index')
expected = s.copy()
bad = isnull(expected.values)
good = ~bad
expected = Series(np.interp(vals[bad], vals[good],
s.values[good]),
index=s.index[bad])
assert_series_equal(result[bad], expected)
# 'values' is synonymous with 'index' for the method kwarg
other_result = s.interpolate(method='values')
assert_series_equal(other_result, result)
assert_series_equal(other_result[bad], expected)
def test_interpolate_non_ts(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
with tm.assertRaises(ValueError):
s.interpolate(method='time')
# New interpolation tests
def test_nan_interpolate(self):
s = Series([0, 1, np.nan, 3])
result = s.interpolate()
expected = Series([0., 1., 2., 3.])
assert_series_equal(result, expected)
tm._skip_if_no_scipy()
result = s.interpolate(method='polynomial', order=1)
assert_series_equal(result, expected)
def test_nan_irregular_index(self):
s = Series([1, 2, np.nan, 4], index=[1, 3, 5, 9])
result = s.interpolate()
expected = Series([1., 2., 3., 4.], index=[1, 3, 5, 9])
assert_series_equal(result, expected)
def test_nan_str_index(self):
s = Series([0, 1, 2, np.nan], index=list('abcd'))
result = s.interpolate()
expected = Series([0., 1., 2., 2.], index=list('abcd'))
assert_series_equal(result, expected)
def test_interp_quad(self):
tm._skip_if_no_scipy()
sq = Series([1, 4, np.nan, 16], index=[1, 2, 3, 4])
result = sq.interpolate(method='quadratic')
expected = Series([1., 4., 9., 16.], index=[1, 2, 3, 4])
assert_series_equal(result, expected)
def test_interp_scipy_basic(self):
tm._skip_if_no_scipy()
s = Series([1, 3, np.nan, 12, np.nan, 25])
# slinear
expected = Series([1., 3., 7.5, 12., 18.5, 25.])
result = s.interpolate(method='slinear')
assert_series_equal(result, expected)
result = s.interpolate(method='slinear', downcast='infer')
assert_series_equal(result, expected)
# nearest
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method='nearest')
assert_series_equal(result, expected.astype('float'))
result = s.interpolate(method='nearest', downcast='infer')
assert_series_equal(result, expected)
# zero
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method='zero')
assert_series_equal(result, expected.astype('float'))
result = s.interpolate(method='zero', downcast='infer')
assert_series_equal(result, expected)
# quadratic
expected = Series([1, 3., 6.769231, 12., 18.230769, 25.])
result = s.interpolate(method='quadratic')
assert_series_equal(result, expected)
result = s.interpolate(method='quadratic', downcast='infer')
assert_series_equal(result, expected)
# cubic
expected = Series([1., 3., 6.8, 12., 18.2, 25.])
result = s.interpolate(method='cubic')
assert_series_equal(result, expected)
def test_interp_limit(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
expected = Series([1., 3., 5., 7., np.nan, 11.])
result = s.interpolate(method='linear', limit=2)
assert_series_equal(result, expected)
def test_interp_limit_forward(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
# Provide 'forward' (the default) explicitly here.
expected = Series([1., 3., 5., 7., np.nan, 11.])
result = s.interpolate(method='linear', limit=2,
limit_direction='forward')
assert_series_equal(result, expected)
result = s.interpolate(method='linear', limit=2,
limit_direction='FORWARD')
assert_series_equal(result, expected)
def test_interp_limit_bad_direction(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
self.assertRaises(ValueError, s.interpolate, method='linear', limit=2,
limit_direction='abc')
# raises an error even if no limit is specified.
self.assertRaises(ValueError, s.interpolate, method='linear',
limit_direction='abc')
def test_interp_limit_direction(self):
# These tests are for issue #9218 -- fill NaNs in both directions.
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
expected = Series([1., 3., np.nan, 7., 9., 11.])
result = s.interpolate(method='linear', limit=2,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([1., 3., 5., np.nan, 9., 11.])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
# Check that this works on a longer series of nans.
s = Series([1, 3, np.nan, np.nan, np.nan, 7, 9, np.nan, np.nan, 12,
np.nan])
expected = Series([1., 3., 4., 5., 6., 7., 9., 10., 11., 12., 12.])
result = s.interpolate(method='linear', limit=2,
limit_direction='both')
assert_series_equal(result, expected)
expected = Series([1., 3., 4., np.nan, 6., 7., 9., 10., 11., 12., 12.])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_limit_to_ends(self):
# These test are for issue #10420 -- flow back to beginning.
s = Series([np.nan, np.nan, 5, 7, 9, np.nan])
expected = Series([5., 5., 5., 7., 9., np.nan])
result = s.interpolate(method='linear', limit=2,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([5., 5., 5., 7., 9., 9.])
result = s.interpolate(method='linear', limit=2,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_limit_before_ends(self):
# These test are for issue #11115 -- limit ends properly.
s = Series([np.nan, np.nan, 5, 7, np.nan, np.nan])
expected = Series([np.nan, np.nan, 5., 7., 7., np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='forward')
assert_series_equal(result, expected)
expected = Series([np.nan, 5., 5., 7., np.nan, np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([np.nan, 5., 5., 7., 7., np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_all_good(self):
# scipy
tm._skip_if_no_scipy()
s = Series([1, 2, 3])
result = s.interpolate(method='polynomial', order=1)
assert_series_equal(result, s)
# non-scipy
result = s.interpolate()
assert_series_equal(result, s)
def test_interp_multiIndex(self):
idx = MultiIndex.from_tuples([(0, 'a'), (1, 'b'), (2, 'c')])
s = Series([1, 2, np.nan], index=idx)
expected = s.copy()
expected.loc[2] = 2
result = s.interpolate()
assert_series_equal(result, expected)
tm._skip_if_no_scipy()
with tm.assertRaises(ValueError):
s.interpolate(method='polynomial', order=1)
def test_interp_nonmono_raise(self):
tm._skip_if_no_scipy()
s = Series([1, np.nan, 3], index=[0, 2, 1])
with tm.assertRaises(ValueError):
s.interpolate(method='krogh')
def test_interp_datetime64(self):
tm._skip_if_no_scipy()
df = Series([1, np.nan, 3], index=date_range('1/1/2000', periods=3))
result = df.interpolate(method='nearest')
expected = Series([1., 1., 3.],
index=date_range('1/1/2000', periods=3))
assert_series_equal(result, expected)
def test_interp_limit_no_nans(self):
# GH 7173
s = pd.Series([1., 2., 3.])
result = s.interpolate(limit=1)
expected = s
assert_series_equal(result, expected)
def test_describe(self):
self.series.describe()
self.ts.describe()
def test_describe_objects(self):
s = Series(['a', 'b', 'b', np.nan, np.nan, np.nan, 'c', 'd', 'a', 'a'])
result = s.describe()
expected = Series({'count': 7, 'unique': 4,
'top': 'a', 'freq': 3}, index=result.index)
assert_series_equal(result, expected)
dt = list(self.ts.index)
dt.append(dt[0])
ser = Series(dt)
rs = ser.describe()
min_date = min(dt)
max_date = max(dt)
xp = Series({'count': len(dt),
'unique': len(self.ts.index),
'first': min_date, 'last': max_date, 'freq': 2,
'top': min_date}, index=rs.index)
assert_series_equal(rs, xp)
def test_describe_empty(self):
result = pd.Series().describe()
self.assertEqual(result['count'], 0)
self.assertTrue(result.drop('count').isnull().all())
nanSeries = Series([np.nan])
nanSeries.name = 'NaN'
result = nanSeries.describe()
self.assertEqual(result['count'], 0)
self.assertTrue(result.drop('count').isnull().all())
def test_describe_none(self):
noneSeries = Series([None])
noneSeries.name = 'None'
expected = Series([0, 0], index=['count', 'unique'], name='None')
assert_series_equal(noneSeries.describe(), expected)
class TestDataFrame(tm.TestCase, Generic):
_typ = DataFrame
_comparator = lambda self, x, y: assert_frame_equal(x, y)
def test_rename_mi(self):
df = DataFrame([
11, 21, 31
], index=MultiIndex.from_tuples([("A", x) for x in ["a", "B", "c"]]))
df.rename(str.lower)
def test_nonzero_single_element(self):
# allow single item via bool method
df = DataFrame([[True]])
self.assertTrue(df.bool())
df = DataFrame([[False]])
self.assertFalse(df.bool())
df = DataFrame([[False, False]])
self.assertRaises(ValueError, lambda: df.bool())
self.assertRaises(ValueError, lambda: bool(df))
def test_get_numeric_data_preserve_dtype(self):
# get the numeric data
o = DataFrame({'A': [1, '2', 3.]})
result = o._get_numeric_data()
expected = DataFrame(index=[0, 1, 2], dtype=object)
self._compare(result, expected)
def test_interp_basic(self):
df = DataFrame({'A': [1, 2, np.nan, 4],
'B': [1, 4, 9, np.nan],
'C': [1, 2, 3, 5],
'D': list('abcd')})
expected = DataFrame({'A': [1., 2., 3., 4.],
'B': [1., 4., 9., 9.],
'C': [1, 2, 3, 5],
'D': list('abcd')})
result = df.interpolate()
assert_frame_equal(result, expected)
result = df.set_index('C').interpolate()
expected = df.set_index('C')
expected.loc[3, 'A'] = 3
expected.loc[5, 'B'] = 9
assert_frame_equal(result, expected)
def test_interp_bad_method(self):
df = DataFrame({'A': [1, 2, np.nan, 4],
'B': [1, 4, 9, np.nan],
'C': [1, 2, 3, 5],
'D': list('abcd')})
with tm.assertRaises(ValueError):
df.interpolate(method='not_a_method')
def test_interp_combo(self):
df = DataFrame({'A': [1., 2., np.nan, 4.],
'B': [1, 4, 9, np.nan],
'C': [1, 2, 3, 5],
'D': list('abcd')})
result = df['A'].interpolate()
expected = Series([1., 2., 3., 4.], name='A')
assert_series_equal(result, expected)
result = df['A'].interpolate(downcast='infer')
expected = Series([1, 2, 3, 4], name='A')
assert_series_equal(result, expected)
def test_interp_nan_idx(self):
df = DataFrame({'A': [1, 2, np.nan, 4], 'B': [np.nan, 2, 3, 4]})
df = df.set_index('A')
with tm.assertRaises(NotImplementedError):
df.interpolate(method='values')
def test_interp_various(self):
tm._skip_if_no_scipy()
df = DataFrame({'A': [1, 2, np.nan, 4, 5, np.nan, 7],
'C': [1, 2, 3, 5, 8, 13, 21]})
df = df.set_index('C')
expected = df.copy()
result = df.interpolate(method='polynomial', order=1)
expected.A.loc[3] = 2.66666667
expected.A.loc[13] = 5.76923076
assert_frame_equal(result, expected)
result = df.interpolate(method='cubic')
expected.A.loc[3] = 2.81621174
expected.A.loc[13] = 5.64146581
assert_frame_equal(result, expected)
result = df.interpolate(method='nearest')
expected.A.loc[3] = 2
expected.A.loc[13] = 5
assert_frame_equal(result, expected, check_dtype=False)
result = df.interpolate(method='quadratic')
expected.A.loc[3] = 2.82533638
expected.A.loc[13] = 6.02817974
assert_frame_equal(result, expected)
result = df.interpolate(method='slinear')
expected.A.loc[3] = 2.66666667
expected.A.loc[13] = 5.76923077
assert_frame_equal(result, expected)
result = df.interpolate(method='zero')
expected.A.loc[3] = 2.
expected.A.loc[13] = 5
assert_frame_equal(result, expected, check_dtype=False)
result = df.interpolate(method='quadratic')
expected.A.loc[3] = 2.82533638
expected.A.loc[13] = 6.02817974
assert_frame_equal(result, expected)
def test_interp_alt_scipy(self):
tm._skip_if_no_scipy()
df = DataFrame({'A': [1, 2, np.nan, 4, 5, np.nan, 7],
'C': [1, 2, 3, 5, 8, 13, 21]})
result = df.interpolate(method='barycentric')
expected = df.copy()
expected.ix[2, 'A'] = 3
expected.ix[5, 'A'] = 6
assert_frame_equal(result, expected)
result = df.interpolate(method='barycentric', downcast='infer')
assert_frame_equal(result, expected.astype(np.int64))
result = df.interpolate(method='krogh')
expectedk = df.copy()
expectedk['A'] = expected['A']
assert_frame_equal(result, expectedk)
_skip_if_no_pchip()
import scipy
result = df.interpolate(method='pchip')
expected.ix[2, 'A'] = 3
if LooseVersion(scipy.__version__) >= '0.17.0':
expected.ix[5, 'A'] = 6.0
else:
expected.ix[5, 'A'] = 6.125
assert_frame_equal(result, expected)
def test_interp_rowwise(self):
df = DataFrame({0: [1, 2, np.nan, 4],
1: [2, 3, 4, np.nan],
2: [np.nan, 4, 5, 6],
3: [4, np.nan, 6, 7],
4: [1, 2, 3, 4]})
result = df.interpolate(axis=1)
expected = df.copy()
expected.loc[3, 1] = 5
expected.loc[0, 2] = 3
expected.loc[1, 3] = 3
expected[4] = expected[4].astype(np.float64)
assert_frame_equal(result, expected)
# scipy route
tm._skip_if_no_scipy()
result = df.interpolate(axis=1, method='values')
assert_frame_equal(result, expected)
result = df.interpolate(axis=0)
expected = df.interpolate()
assert_frame_equal(result, expected)
def test_rowwise_alt(self):
df = DataFrame({0: [0, .5, 1., np.nan, 4, 8, np.nan, np.nan, 64],
1: [1, 2, 3, 4, 3, 2, 1, 0, -1]})
df.interpolate(axis=0)
def test_interp_leading_nans(self):
df = DataFrame({"A": [np.nan, np.nan, .5, .25, 0],
"B": [np.nan, -3, -3.5, np.nan, -4]})
result = df.interpolate()
expected = df.copy()
expected['B'].loc[3] = -3.75
assert_frame_equal(result, expected)
tm._skip_if_no_scipy()
result = df.interpolate(method='polynomial', order=1)
assert_frame_equal(result, expected)
def test_interp_raise_on_only_mixed(self):
df = DataFrame({'A': [1, 2, np.nan, 4],
'B': ['a', 'b', 'c', 'd'],
'C': [np.nan, 2, 5, 7],
'D': [np.nan, np.nan, 9, 9],
'E': [1, 2, 3, 4]})
with tm.assertRaises(TypeError):
df.interpolate(axis=1)
def test_interp_inplace(self):
df = DataFrame({'a': [1., 2., np.nan, 4.]})
expected = DataFrame({'a': [1., 2., 3., 4.]})
result = df.copy()
result['a'].interpolate(inplace=True)
assert_frame_equal(result, expected)
result = df.copy()
result['a'].interpolate(inplace=True, downcast='infer')
assert_frame_equal(result, expected.astype('int64'))
def test_interp_inplace_row(self):
# GH 10395
result = DataFrame({'a': [1., 2., 3., 4.],
'b': [np.nan, 2., 3., 4.],
'c': [3, 2, 2, 2]})
expected = result.interpolate(method='linear', axis=1, inplace=False)
result.interpolate(method='linear', axis=1, inplace=True)
assert_frame_equal(result, expected)
def test_interp_ignore_all_good(self):
# GH
df = DataFrame({'A': [1, 2, np.nan, 4],
'B': [1, 2, 3, 4],
'C': [1., 2., np.nan, 4.],
'D': [1., 2., 3., 4.]})
expected = DataFrame({'A': np.array(
[1, 2, 3, 4], dtype='float64'),
'B': np.array(
[1, 2, 3, 4], dtype='int64'),
'C': np.array(
[1., 2., 3, 4.], dtype='float64'),
'D': np.array(
[1., 2., 3., 4.], dtype='float64')})
result = df.interpolate(downcast=None)
assert_frame_equal(result, expected)
# all good
result = df[['B', 'D']].interpolate(downcast=None)
assert_frame_equal(result, df[['B', 'D']])
def test_describe(self):
tm.makeDataFrame().describe()
tm.makeMixedDataFrame().describe()
tm.makeTimeDataFrame().describe()
def test_describe_percentiles_percent_or_raw(self):
msg = 'percentiles should all be in the interval \\[0, 1\\]'
df = tm.makeDataFrame()
with tm.assertRaisesRegexp(ValueError, msg):
df.describe(percentiles=[10, 50, 100])
with tm.assertRaisesRegexp(ValueError, msg):
df.describe(percentiles=[2])
with tm.assertRaisesRegexp(ValueError, msg):
df.describe(percentiles=[-2])
def test_describe_percentiles_equivalence(self):
df = tm.makeDataFrame()
d1 = df.describe()
d2 = df.describe(percentiles=[.25, .75])
assert_frame_equal(d1, d2)
def test_describe_percentiles_insert_median(self):
df = tm.makeDataFrame()
d1 = df.describe(percentiles=[.25, .75])
d2 = df.describe(percentiles=[.25, .5, .75])
assert_frame_equal(d1, d2)
self.assertTrue('25%' in d1.index)
self.assertTrue('75%' in d2.index)
# none above
d1 = df.describe(percentiles=[.25, .45])
d2 = df.describe(percentiles=[.25, .45, .5])
assert_frame_equal(d1, d2)
self.assertTrue('25%' in d1.index)
self.assertTrue('45%' in d2.index)
# none below
d1 = df.describe(percentiles=[.75, 1])
d2 = df.describe(percentiles=[.5, .75, 1])
assert_frame_equal(d1, d2)
self.assertTrue('75%' in d1.index)
self.assertTrue('100%' in d2.index)
# edge
d1 = df.describe(percentiles=[0, 1])
d2 = df.describe(percentiles=[0, .5, 1])
assert_frame_equal(d1, d2)
self.assertTrue('0%' in d1.index)
self.assertTrue('100%' in d2.index)
def test_describe_no_numeric(self):
df = DataFrame({'A': ['foo', 'foo', 'bar'] * 8,
'B': ['a', 'b', 'c', 'd'] * 6})
desc = df.describe()
expected = DataFrame(dict((k, v.describe())
for k, v in compat.iteritems(df)),
columns=df.columns)
assert_frame_equal(desc, expected)
ts = tm.makeTimeSeries()
df = DataFrame({'time': ts.index})
desc = df.describe()
self.assertEqual(desc.time['first'], min(ts.index))
def test_describe_empty_int_columns(self):
df = DataFrame([[0, 1], [1, 2]])
desc = df[df[0] < 0].describe() # works
assert_series_equal(desc.xs('count'),
Series([0, 0], dtype=float, name='count'))
self.assertTrue(isnull(desc.ix[1:]).all().all())
def test_describe_objects(self):
df = DataFrame({"C1": ['a', 'a', 'c'], "C2": ['d', 'd', 'f']})
result = df.describe()
expected = DataFrame({"C1": [3, 2, 'a', 2], "C2": [3, 2, 'd', 2]},
index=['count', 'unique', 'top', 'freq'])
assert_frame_equal(result, expected)
df = DataFrame({"C1": pd.date_range('2010-01-01', periods=4, freq='D')
})
df.loc[4] = pd.Timestamp('2010-01-04')
result = df.describe()
expected = DataFrame({"C1": [5, 4, pd.Timestamp('2010-01-04'), 2,
pd.Timestamp('2010-01-01'),
pd.Timestamp('2010-01-04')]},
index=['count', 'unique', 'top', 'freq',
'first', 'last'])
assert_frame_equal(result, expected)
# mix time and str
df['C2'] = ['a', 'a', 'b', 'c', 'a']
result = df.describe()
expected['C2'] = [5, 3, 'a', 3, np.nan, np.nan]
assert_frame_equal(result, expected)
# just str
expected = DataFrame({'C2': [5, 3, 'a', 4]},
index=['count', 'unique', 'top', 'freq'])
result = df[['C2']].describe()
# mix of time, str, numeric
df['C3'] = [2, 4, 6, 8, 2]
result = df.describe()
expected = DataFrame({"C3": [5., 4.4, 2.607681, 2., 2., 4., 6., 8.]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
assert_frame_equal(result, expected)
assert_frame_equal(df.describe(), df[['C3']].describe())
assert_frame_equal(df[['C1', 'C3']].describe(), df[['C3']].describe())
assert_frame_equal(df[['C2', 'C3']].describe(), df[['C3']].describe())
def test_describe_typefiltering(self):
df = DataFrame({'catA': ['foo', 'foo', 'bar'] * 8,
'catB': ['a', 'b', 'c', 'd'] * 6,
'numC': np.arange(24, dtype='int64'),
'numD': np.arange(24.) + .5,
'ts': tm.makeTimeSeries()[:24].index})
descN = df.describe()
expected_cols = ['numC', 'numD', ]
expected = DataFrame(dict((k, df[k].describe())
for k in expected_cols),
columns=expected_cols)
assert_frame_equal(descN, expected)
desc = df.describe(include=['number'])
assert_frame_equal(desc, descN)
desc = df.describe(exclude=['object', 'datetime'])
assert_frame_equal(desc, descN)
desc = df.describe(include=['float'])
assert_frame_equal(desc, descN.drop('numC', 1))
descC = df.describe(include=['O'])
expected_cols = ['catA', 'catB']
expected = DataFrame(dict((k, df[k].describe())
for k in expected_cols),
columns=expected_cols)
assert_frame_equal(descC, expected)
descD = df.describe(include=['datetime'])
assert_series_equal(descD.ts, df.ts.describe())
desc = df.describe(include=['object', 'number', 'datetime'])
assert_frame_equal(desc.loc[:, ["numC", "numD"]].dropna(), descN)
assert_frame_equal(desc.loc[:, ["catA", "catB"]].dropna(), descC)
descDs = descD.sort_index() # the index order change for mixed-types
assert_frame_equal(desc.loc[:, "ts":].dropna().sort_index(), descDs)
desc = df.loc[:, 'catA':'catB'].describe(include='all')
assert_frame_equal(desc, descC)
desc = df.loc[:, 'numC':'numD'].describe(include='all')
assert_frame_equal(desc, descN)
desc = df.describe(percentiles=[], include='all')
cnt = Series(data=[4, 4, 6, 6, 6],
index=['catA', 'catB', 'numC', 'numD', 'ts'])
assert_series_equal(desc.count(), cnt)
self.assertTrue('count' in desc.index)
self.assertTrue('unique' in desc.index)
self.assertTrue('50%' in desc.index)
self.assertTrue('first' in desc.index)
desc = df.drop("ts", 1).describe(percentiles=[], include='all')
assert_series_equal(desc.count(), cnt.drop("ts"))
self.assertTrue('first' not in desc.index)
desc = df.drop(["numC", "numD"], 1).describe(percentiles=[],
include='all')
assert_series_equal(desc.count(), cnt.drop(["numC", "numD"]))
self.assertTrue('50%' not in desc.index)
def test_describe_typefiltering_category_bool(self):
df = DataFrame({'A_cat': pd.Categorical(['foo', 'foo', 'bar'] * 8),
'B_str': ['a', 'b', 'c', 'd'] * 6,
'C_bool': [True] * 12 + [False] * 12,
'D_num': np.arange(24.) + .5,
'E_ts': tm.makeTimeSeries()[:24].index})
# bool is considered numeric in describe, although not an np.number
desc = df.describe()
expected_cols = ['C_bool', 'D_num']
expected = DataFrame(dict((k, df[k].describe())
for k in expected_cols),
columns=expected_cols)
assert_frame_equal(desc, expected)
desc = df.describe(include=["category"])
self.assertTrue(desc.columns.tolist() == ["A_cat"])
# 'all' includes numpy-dtypes + category
desc1 = df.describe(include="all")
desc2 = df.describe(include=[np.generic, "category"])
assert_frame_equal(desc1, desc2)
def test_describe_timedelta(self):
df = DataFrame({"td": pd.to_timedelta(np.arange(24) % 20, "D")})
self.assertTrue(df.describe().loc["mean"][0] == pd.to_timedelta(
"8d4h"))
def test_describe_typefiltering_dupcol(self):
df = DataFrame({'catA': ['foo', 'foo', 'bar'] * 8,
'catB': ['a', 'b', 'c', 'd'] * 6,
'numC': np.arange(24),
'numD': np.arange(24.) + .5,
'ts': tm.makeTimeSeries()[:24].index})
s = df.describe(include='all').shape[1]
df = pd.concat([df, df], axis=1)
s2 = df.describe(include='all').shape[1]
self.assertTrue(s2 == 2 * s)
def test_describe_typefiltering_groupby(self):
df = DataFrame({'catA': ['foo', 'foo', 'bar'] * 8,
'catB': ['a', 'b', 'c', 'd'] * 6,
'numC': np.arange(24),
'numD': np.arange(24.) + .5,
'ts': tm.makeTimeSeries()[:24].index})
G = df.groupby('catA')
self.assertTrue(G.describe(include=['number']).shape == (16, 2))
self.assertTrue(G.describe(include=['number', 'object']).shape == (22,
3))
self.assertTrue(G.describe(include='all').shape == (26, 4))
def test_describe_multi_index_df_column_names(self):
""" Test that column names persist after the describe operation."""
df = pd.DataFrame(
{'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
# GH 11517
# test for hierarchical index
hierarchical_index_df = df.groupby(['A', 'B']).mean().T
self.assertTrue(hierarchical_index_df.columns.names == ['A', 'B'])
self.assertTrue(hierarchical_index_df.describe().columns.names ==
['A', 'B'])
# test for non-hierarchical index
non_hierarchical_index_df = df.groupby(['A']).mean().T
self.assertTrue(non_hierarchical_index_df.columns.names == ['A'])
self.assertTrue(non_hierarchical_index_df.describe().columns.names ==
['A'])
def test_no_order(self):
| tm._skip_if_no_scipy() | pandas.util.testing._skip_if_no_scipy |
# -*- coding: UTF-8 -*-
"""
Created by louis at 2021/9/13
Description:
"""
import os
import gc
import glob
import torch
from torch import nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import pandas as pd
import time
from itertools import islice
from torch.utils.data import Dataset, DataLoader
from multiprocessing import Pool
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.model_selection import train_test_split
from torch.utils.tensorboard import SummaryWriter
from tqdm.auto import tqdm
import logging
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (2048, rlimit[1]))
datefmt = '%Y-%m-%d %H:%M:%S'
logging.basicConfig(filename='pytorch-baseline.log', filemode='w', format='%(asctime)s - %(levelname)s - %(message)s',
datefmt=datefmt, level=logging.DEBUG)
# import tqdm
tqdm.pandas()
import warnings
from multiprocessing import cpu_count
def get_path_dict(f, v):
f_dict = {}
for i in tqdm(v):
fpath = f'{f}/stock_id={i}'
flist = glob.glob(os.path.join(fpath, '*.parquet'))
if len(flist) > 0:
f_dict[i] = flist[0]
return f_dict
# train_idx, valid_idx = train_test_split(train_ds['row_id'], shuffle=True, test_size=0.1, random_state=SEED)
# ds: train.csv里面的数据 f_dict:是 book_train.parquet 里面的数据
def process_optiver_ds(ds, f_dict, skip_cols, t_dict):
x = []
y = []
full_seconds_in_bucket = {'seconds_in_bucket': np.arange(600)}
full_seconds_in_bucket = pd.DataFrame(full_seconds_in_bucket)
for stock_id, stock_fnmame in tqdm(f_dict.items()):
trade_train_ = t_dict.get(stock_id)
trade_train_ = pd.read_parquet(trade_train_)
optiver_ds = | pd.read_parquet(stock_fnmame) | pandas.read_parquet |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import datetime, timedelta
import numpy as np
import pytest
from pandas.errors import (
NullFrequencyError, OutOfBoundsDatetime, PerformanceWarning)
import pandas as pd
from pandas import (
DataFrame, DatetimeIndex, NaT, Series, Timedelta, TimedeltaIndex,
Timestamp, timedelta_range)
import pandas.util.testing as tm
def get_upcast_box(box, vector):
"""
Given two box-types, find the one that takes priority
"""
if box is DataFrame or isinstance(vector, DataFrame):
return DataFrame
if box is Series or isinstance(vector, Series):
return Series
if box is pd.Index or isinstance(vector, pd.Index):
return pd.Index
return box
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Comparisons
class TestTimedelta64ArrayLikeComparisons:
# Comparison tests for timedelta64[ns] vectors fully parametrized over
# DataFrame/Series/TimedeltaIndex/TimedeltaArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_timedelta64_zerodim(self, box_with_array):
# GH#26689 should unbox when comparing with zerodim array
box = box_with_array
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = pd.timedelta_range('2H', periods=4)
other = np.array(tdi.to_numpy()[0])
tdi = tm.box_expected(tdi, box)
res = tdi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(res, expected)
with pytest.raises(TypeError):
# zero-dim of wrong dtype should still raise
tdi >= np.array(4)
class TestTimedelta64ArrayComparisons:
# TODO: All of these need to be parametrized over box
def test_compare_timedelta_series(self):
# regression test for GH#5963
s = pd.Series([timedelta(days=1), timedelta(days=2)])
actual = s > timedelta(days=1)
expected = pd.Series([False, True])
tm.assert_series_equal(actual, expected)
def test_tdi_cmp_str_invalid(self, box_with_array):
# GH#13624
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = TimedeltaIndex(['1 day', '2 days'])
tdarr = tm.box_expected(tdi, box_with_array)
for left, right in [(tdarr, 'a'), ('a', tdarr)]:
with pytest.raises(TypeError):
left > right
with pytest.raises(TypeError):
left >= right
with pytest.raises(TypeError):
left < right
with pytest.raises(TypeError):
left <= right
result = left == right
expected = np.array([False, False], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = left != right
expected = np.array([True, True], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize('dtype', [None, object])
def test_comp_nat(self, dtype):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = rhs != lhs
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == rhs, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(lhs != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != lhs, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > lhs, expected)
def test_comparisons_nat(self):
tdidx1 = pd.TimedeltaIndex(['1 day', pd.NaT, '1 day 00:00:01', pd.NaT,
'1 day 00:00:01', '5 day 00:00:03'])
tdidx2 = pd.TimedeltaIndex(['2 day', '2 day', pd.NaT, pd.NaT,
'1 day 00:00:02', '5 days 00:00:03'])
tdarr = np.array([np.timedelta64(2, 'D'),
np.timedelta64(2, 'D'), np.timedelta64('nat'),
np.timedelta64('nat'),
np.timedelta64(1, 'D') + np.timedelta64(2, 's'),
np.timedelta64(5, 'D') + np.timedelta64(3, 's')])
cases = [(tdidx1, tdidx2), (tdidx1, tdarr)]
# Check pd.NaT is handles as the same as np.nan
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
# TODO: better name
def test_comparisons_coverage(self):
rng = timedelta_range('1 days', periods=10)
result = rng < rng[3]
expected = np.array([True, True, True] + [False] * 7)
tm.assert_numpy_array_equal(result, expected)
# raise TypeError for now
with pytest.raises(TypeError):
rng < rng[3].value
result = rng == list(rng)
exp = rng == rng
tm.assert_numpy_array_equal(result, exp)
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedelta64ArithmeticUnsorted:
# Tests moved from type-specific test files but not
# yet sorted/parametrized/de-duplicated
def test_ufunc_coercions(self):
# normal ops are also tested in tseries/test_timedeltas.py
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [idx * 2, np.multiply(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['4H', '8H', '12H', '16H', '20H'],
freq='4H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '4H'
for result in [idx / 2, np.divide(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['1H', '2H', '3H', '4H', '5H'],
freq='H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'H'
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [-idx, np.negative(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['-2H', '-4H', '-6H', '-8H', '-10H'],
freq='-2H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '-2H'
idx = TimedeltaIndex(['-2H', '-1H', '0H', '1H', '2H'],
freq='H', name='x')
for result in [abs(idx), np.absolute(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['2H', '1H', '0H', '1H', '2H'],
freq=None, name='x')
tm.assert_index_equal(result, exp)
assert result.freq is None
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
msg = "cannot subtract a datelike from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi - dt
with pytest.raises(TypeError, match=msg):
tdi - dti
msg = (r"descriptor '__sub__' requires a 'datetime\.datetime' object"
" but received a 'Timedelta'")
with pytest.raises(TypeError, match=msg):
td - dt
msg = "bad operand type for unary -: 'DatetimeArray'"
with pytest.raises(TypeError, match=msg):
td - dti
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = pd.date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = pd.date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
assert result == expected
assert isinstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
dt_tz - ts
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt_tz - dt
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
dt_tz - ts_tz2
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt - dt_tz
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
ts - dt_tz
with pytest.raises(TypeError, match=msg):
ts_tz2 - ts
with pytest.raises(TypeError, match=msg):
ts_tz2 - dt
with pytest.raises(TypeError, match=msg):
ts_tz - ts_tz2
# with dti
with pytest.raises(TypeError, match=msg):
dti - ts_tz
with pytest.raises(TypeError, match=msg):
dti_tz - ts
with pytest.raises(TypeError, match=msg):
dti_tz - ts_tz2
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
msg = "cannot add indices of unequal length"
with pytest.raises(ValueError, match=msg):
tdi + dti[0:1]
with pytest.raises(ValueError, match=msg):
tdi[0:1] + dti
# random indexes
with pytest.raises(NullFrequencyError):
tdi + pd.Int64Index([1, 2, 3])
# this is a union!
# pytest.raises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
assert result == expected
result = td + dt
expected = Timestamp('20130102')
assert result == expected
# TODO: Needs more informative name, probably split up into
# more targeted tests
@pytest.mark.parametrize('freq', ['D', 'B'])
def test_timedelta(self, freq):
index = pd.date_range('1/1/2000', periods=50, freq=freq)
shifted = index + timedelta(1)
back = shifted + timedelta(-1)
tm.assert_index_equal(index, back)
if freq == 'D':
expected = pd.tseries.offsets.Day(1)
assert index.freq == expected
assert shifted.freq == expected
assert back.freq == expected
else: # freq == 'B'
assert index.freq == pd.tseries.offsets.BusinessDay(1)
assert shifted.freq is None
assert back.freq == pd.tseries.offsets.BusinessDay(1)
result = index - timedelta(1)
expected = index + timedelta(-1)
tm.assert_index_equal(result, expected)
# GH#4134, buggy with timedeltas
rng = pd.date_range('2013', '2014')
s = Series(rng)
result1 = rng - pd.offsets.Hour(1)
result2 = DatetimeIndex(s - np.timedelta64(100000000))
result3 = rng - np.timedelta64(100000000)
result4 = DatetimeIndex(s - pd.offsets.Hour(1))
tm.assert_index_equal(result1, result4)
tm.assert_index_equal(result2, result3)
class TestAddSubNaTMasking:
# TODO: parametrize over boxes
def test_tdi_add_timestamp_nat_masking(self):
# GH#17991 checking for overflow-masking with NaT
tdinat = pd.to_timedelta(['24658 days 11:15:00', 'NaT'])
tsneg = Timestamp('1950-01-01')
ts_neg_variants = [tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype('datetime64[ns]'),
tsneg.to_datetime64().astype('datetime64[D]')]
tspos = Timestamp('1980-01-01')
ts_pos_variants = [tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype('datetime64[ns]'),
tspos.to_datetime64().astype('datetime64[D]')]
for variant in ts_neg_variants + ts_pos_variants:
res = tdinat + variant
assert res[1] is pd.NaT
def test_tdi_add_overflow(self):
# See GH#14068
# preliminary test scalar analogue of vectorized tests below
with pytest.raises(OutOfBoundsDatetime):
pd.to_timedelta(106580, 'D') + Timestamp('2000')
with pytest.raises(OutOfBoundsDatetime):
Timestamp('2000') + pd.to_timedelta(106580, 'D')
_NaT = int(pd.NaT) + 1
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([106580], 'D') + Timestamp('2000')
with pytest.raises(OverflowError, match=msg):
Timestamp('2000') + pd.to_timedelta([106580], 'D')
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([_NaT]) - Timedelta('1 days')
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta(['5 days', _NaT]) - Timedelta('1 days')
with pytest.raises(OverflowError, match=msg):
(pd.to_timedelta([_NaT, '5 days', '1 hours']) -
pd.to_timedelta(['7 seconds', _NaT, '4 hours']))
# These should not overflow!
exp = TimedeltaIndex([pd.NaT])
result = pd.to_timedelta([pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex(['4 days', pd.NaT])
result = pd.to_timedelta(['5 days', pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex([pd.NaT, pd.NaT, '5 hours'])
result = (pd.to_timedelta([pd.NaT, '5 days', '1 hours']) +
pd.to_timedelta(['7 seconds', pd.NaT, '4 hours']))
tm.assert_index_equal(result, exp)
class TestTimedeltaArraylikeAddSubOps:
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# TODO: moved from frame tests; needs parametrization/de-duplication
def test_td64_df_add_int_frame(self):
# GH#22696 Check that we don't dispatch to numpy implementation,
# which treats int64 as m8[ns]
tdi = pd.timedelta_range('1', periods=3)
df = tdi.to_frame()
other = pd.DataFrame([1, 2, 3], index=tdi) # indexed like `df`
with pytest.raises(TypeError):
df + other
with pytest.raises(TypeError):
other + df
with pytest.raises(TypeError):
df - other
with pytest.raises(TypeError):
other - df
# TODO: moved from tests.indexes.timedeltas.test_arithmetic; needs
# parametrization+de-duplication
def test_timedelta_ops_with_missing_values(self):
# setup
s1 = pd.to_timedelta(Series(['00:00:01']))
s2 = pd.to_timedelta(Series(['00:00:02']))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# Passing datetime64-dtype data to TimedeltaIndex is deprecated
sn = pd.to_timedelta(Series([pd.NaT]))
df1 = pd.DataFrame(['00:00:01']).apply(pd.to_timedelta)
df2 = pd.DataFrame(['00:00:02']).apply(pd.to_timedelta)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# Passing datetime64-dtype data to TimedeltaIndex is deprecated
dfn = pd.DataFrame([pd.NaT]).apply(pd.to_timedelta)
scalar1 = pd.to_timedelta('00:00:01')
scalar2 = pd.to_timedelta('00:00:02')
timedelta_NaT = pd.to_timedelta('NaT')
actual = scalar1 + scalar1
assert actual == scalar2
actual = scalar2 - scalar1
assert actual == scalar1
actual = s1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - s1
tm.assert_series_equal(actual, s1)
actual = s1 + scalar1
tm.assert_series_equal(actual, s2)
actual = scalar1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - scalar1
tm.assert_series_equal(actual, s1)
actual = -scalar1 + s2
tm.assert_series_equal(actual, s1)
actual = s1 + timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
actual = s1 - timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = -timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
with pytest.raises(TypeError):
s1 + np.nan
with pytest.raises(TypeError):
np.nan + s1
with pytest.raises(TypeError):
s1 - np.nan
with pytest.raises(TypeError):
-np.nan + s1
actual = s1 + pd.NaT
tm.assert_series_equal(actual, sn)
actual = s2 - pd.NaT
tm.assert_series_equal(actual, sn)
actual = s1 + df1
tm.assert_frame_equal(actual, df2)
actual = s2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + s1
tm.assert_frame_equal(actual, df2)
actual = df2 - s1
tm.assert_frame_equal(actual, df1)
actual = df1 + df1
tm.assert_frame_equal(actual, df2)
actual = df2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + scalar1
tm.assert_frame_equal(actual, df2)
actual = df2 - scalar1
tm.assert_frame_equal(actual, df1)
actual = df1 + timedelta_NaT
tm.assert_frame_equal(actual, dfn)
actual = df1 - timedelta_NaT
tm.assert_frame_equal(actual, dfn)
with pytest.raises(TypeError):
df1 + np.nan
with pytest.raises(TypeError):
df1 - np.nan
actual = df1 + pd.NaT # NaT is datetime, not timedelta
tm.assert_frame_equal(actual, dfn)
actual = df1 - pd.NaT
tm.assert_frame_equal(actual, dfn)
# TODO: moved from tests.series.test_operators, needs splitting, cleanup,
# de-duplication, box-parametrization...
def test_operators_timedelta64(self):
# series ops
v1 = pd.date_range('2012-1-1', periods=3, freq='D')
v2 = pd.date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24,
rs.index).astype('int64').astype('timedelta64[ns]')
tm.assert_series_equal(rs, xp)
assert rs.dtype == 'timedelta64[ns]'
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
assert td.dtype == 'timedelta64[ns]'
# series on the rhs
result = df['A'] - df['A'].shift()
assert result.dtype == 'timedelta64[ns]'
result = df['A'] + td
assert result.dtype == 'M8[ns]'
# scalar Timestamp on rhs
maxa = df['A'].max()
assert isinstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
assert resultb.dtype == 'timedelta64[ns]'
# timestamp on lhs
result = resultb + df['A']
values = [Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')]
expected = Series(values, name='A')
tm.assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series(
[timedelta(days=4017 + i) for i in range(3)], name='A')
tm.assert_series_equal(result, expected)
assert result.dtype == 'm8[ns]'
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
assert resulta.dtype == 'm8[ns]'
# roundtrip
resultb = resulta + d
tm.assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
tm.assert_series_equal(resultb, df['A'])
assert resultb.dtype == 'M8[ns]'
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df['A'] + td
resultb = resulta - td
tm.assert_series_equal(df['A'], resultb)
assert resultb.dtype == 'M8[ns]'
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
assert rs[2] == value
def test_timedelta64_ops_nat(self):
# GH 11349
timedelta_series = Series([NaT, Timedelta('1s')])
nat_series_dtype_timedelta = Series([NaT, NaT],
dtype='timedelta64[ns]')
single_nat_dtype_timedelta = Series([NaT], dtype='timedelta64[ns]')
# subtraction
tm.assert_series_equal(timedelta_series - NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(-NaT + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series - single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(-single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
# addition
tm.assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series + single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
# multiplication
tm.assert_series_equal(nat_series_dtype_timedelta * 1.0,
nat_series_dtype_timedelta)
tm.assert_series_equal(1.0 * nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series * 1, timedelta_series)
tm.assert_series_equal(1 * timedelta_series, timedelta_series)
tm.assert_series_equal(timedelta_series * 1.5,
Series([NaT, Timedelta('1.5s')]))
tm.assert_series_equal(1.5 * timedelta_series,
Series([NaT, Timedelta('1.5s')]))
tm.assert_series_equal(timedelta_series * np.nan,
nat_series_dtype_timedelta)
tm.assert_series_equal(np.nan * timedelta_series,
nat_series_dtype_timedelta)
# division
tm.assert_series_equal(timedelta_series / 2,
Series([NaT, Timedelta('0.5s')]))
tm.assert_series_equal(timedelta_series / 2.0,
Series([NaT, Timedelta('0.5s')]))
tm.assert_series_equal(timedelta_series / np.nan,
nat_series_dtype_timedelta)
# -------------------------------------------------------------
# Invalid Operations
def test_td64arr_add_str_invalid(self, box_with_array):
# GH#13624
tdi = TimedeltaIndex(['1 day', '2 days'])
tdi = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdi + 'a'
with pytest.raises(TypeError):
'a' + tdi
@pytest.mark.parametrize('other', [3.14, np.array([2.0, 3.0])])
def test_td64arr_add_sub_float(self, box_with_array, other):
tdi = TimedeltaIndex(['-1 days', '-1 days'])
tdarr = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdarr + other
with pytest.raises(TypeError):
other + tdarr
with pytest.raises(TypeError):
tdarr - other
with pytest.raises(TypeError):
other - tdarr
@pytest.mark.parametrize('freq', [None, 'H'])
def test_td64arr_sub_period(self, box_with_array, freq):
# GH#13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
idx = TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
idx = tm.box_expected(idx, box_with_array)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
@pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H'])
@pytest.mark.parametrize('tdi_freq', [None, 'H'])
def test_td64arr_sub_pi(self, box_with_array, tdi_freq, pi_freq):
# GH#20049 subtracting PeriodIndex should raise TypeError
tdi = TimedeltaIndex(['1 hours', '2 hours'], freq=tdi_freq)
dti = Timestamp('2018-03-07 17:16:40') + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
tdi = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdi - pi
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
def test_td64arr_sub_timestamp_raises(self, box_with_array):
idx = TimedeltaIndex(['1 day', '2 day'])
idx = tm.box_expected(idx, box_with_array)
msg = ("cannot subtract a datelike from|"
"Could not operate|"
"cannot perform operation")
with pytest.raises(TypeError, match=msg):
idx - Timestamp('2011-01-01')
def test_td64arr_add_timestamp(self, box_with_array, tz_naive_fixture):
# GH#23215
# TODO: parametrize over scalar datetime types?
tz = tz_naive_fixture
other = Timestamp('2011-01-01', tz=tz)
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'], tz=tz)
idx = tm.box_expected(idx, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = idx + other
tm.assert_equal(result, expected)
result = other + idx
tm.assert_equal(result, expected)
def test_td64arr_add_sub_timestamp(self, box_with_array):
# GH#11925
ts = Timestamp('2012-01-01')
# TODO: parametrize over types of datetime scalar?
tdi = timedelta_range('1 day', periods=3)
expected = pd.date_range('2012-01-02', periods=3)
tdarr = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(ts + tdarr, expected)
tm.assert_equal(tdarr + ts, expected)
expected2 = pd.date_range('2011-12-31', periods=3, freq='-1D')
expected2 = tm.box_expected(expected2, box_with_array)
tm.assert_equal(ts - tdarr, expected2)
tm.assert_equal(ts + (-tdarr), expected2)
with pytest.raises(TypeError):
tdarr - ts
def test_tdi_sub_dt64_array(self, box_with_array):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) - tdi
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
with pytest.raises(TypeError):
tdi - dtarr
# TimedeltaIndex.__rsub__
result = dtarr - tdi
tm.assert_equal(result, expected)
def test_tdi_add_dt64_array(self, box_with_array):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) + tdi
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdi + dtarr
tm.assert_equal(result, expected)
result = dtarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_add_datetime64_nat(self, box_with_array):
# GH#23215
other = np.datetime64('NaT')
tdi = timedelta_range('1 day', periods=3)
expected = pd.DatetimeIndex(["NaT", "NaT", "NaT"])
tdser = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(tdser + other, expected)
tm.assert_equal(other + tdser, expected)
# ------------------------------------------------------------------
# Operations with int-like others
def test_td64arr_add_int_series_invalid(self, box):
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
int_ser = Series([2, 3, 4])
with pytest.raises(err):
tdser + int_ser
with pytest.raises(err):
int_ser + tdser
with pytest.raises(err):
tdser - int_ser
with pytest.raises(err):
int_ser - tdser
def test_td64arr_add_intlike(self, box_with_array):
# GH#19123
tdi = TimedeltaIndex(['59 days', '59 days', 'NaT'])
ser = tm.box_expected(tdi, box_with_array)
err = TypeError
if box_with_array in [pd.Index, tm.to_array]:
err = NullFrequencyError
other = Series([20, 30, 40], dtype='uint8')
# TODO: separate/parametrize
with pytest.raises(err):
ser + 1
with pytest.raises(err):
ser - 1
with pytest.raises(err):
ser + other
with pytest.raises(err):
ser - other
with pytest.raises(err):
ser + np.array(other)
with pytest.raises(err):
ser - np.array(other)
with pytest.raises(err):
ser + pd.Index(other)
with pytest.raises(err):
ser - pd.Index(other)
@pytest.mark.parametrize('scalar', [1, 1.5, np.array(2)])
def test_td64arr_add_sub_numeric_scalar_invalid(self, box_with_array,
scalar):
box = box_with_array
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
tdser = tm.box_expected(tdser, box)
err = TypeError
if box in [pd.Index, tm.to_array] and not isinstance(scalar, float):
err = NullFrequencyError
with pytest.raises(err):
tdser + scalar
with pytest.raises(err):
scalar + tdser
with pytest.raises(err):
tdser - scalar
with pytest.raises(err):
scalar - tdser
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vec', [
np.array([1, 2, 3]),
pd.Index([1, 2, 3]),
Series([1, 2, 3])
# TODO: Add DataFrame in here?
], ids=lambda x: type(x).__name__)
def test_td64arr_add_sub_numeric_arr_invalid(self, box, vec, dtype):
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not dtype.startswith('float'):
err = NullFrequencyError
vector = vec.astype(dtype)
with pytest.raises(err):
tdser + vector
with pytest.raises(err):
vector + tdser
with pytest.raises(err):
tdser - vector
with pytest.raises(err):
vector - tdser
# ------------------------------------------------------------------
# Operations with timedelta-like others
# TODO: this was taken from tests.series.test_ops; de-duplicate
@pytest.mark.parametrize('scalar_td', [timedelta(minutes=5, seconds=4),
Timedelta(minutes=5, seconds=4),
Timedelta('5m4s').to_timedelta64()])
def test_operators_timedelta64_with_timedelta(self, scalar_td):
# smoke tests
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td1 + scalar_td
scalar_td + td1
td1 - scalar_td
scalar_td - td1
td1 / scalar_td
scalar_td / td1
# TODO: this was taken from tests.series.test_ops; de-duplicate
def test_timedelta64_operations_with_timedeltas(self):
# td operate with td
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td2 = timedelta(minutes=5, seconds=4)
result = td1 - td2
expected = (Series([timedelta(seconds=0)] * 3) -
Series([timedelta(seconds=1)] * 3))
assert result.dtype == 'm8[ns]'
tm.assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) -
Series([timedelta(seconds=0)] * 3))
tm.assert_series_equal(result2, expected)
# roundtrip
tm.assert_series_equal(result + td2, td1)
# Now again, using pd.to_timedelta, which should build
# a Series or a scalar, depending on input.
td1 = Series(pd.to_timedelta(['00:05:03'] * 3))
td2 = pd.to_timedelta('00:05:04')
result = td1 - td2
expected = (Series([timedelta(seconds=0)] * 3) -
Series([timedelta(seconds=1)] * 3))
assert result.dtype == 'm8[ns]'
tm.assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) -
Series([timedelta(seconds=0)] * 3))
tm.assert_series_equal(result2, expected)
# roundtrip
tm.assert_series_equal(result + td2, td1)
def test_td64arr_add_td64_array(self, box):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 2 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + tdarr
tm.assert_equal(result, expected)
result = tdarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_sub_td64_array(self, box):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 0 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi - tdarr
tm.assert_equal(result, expected)
result = tdarr - tdi
tm.assert_equal(result, expected)
# TODO: parametrize over [add, sub, radd, rsub]?
@pytest.mark.parametrize('names', [(None, None, None),
('Egon', 'Venkman', None),
('NCC1701D', 'NCC1701D', 'NCC1701D')])
def test_td64arr_add_sub_tdi(self, box, names):
# GH#17250 make sure result dtype is correct
# GH#19043 make sure names are propagated correctly
if box is pd.DataFrame and names[1] == 'Venkman':
pytest.skip("Name propagation for DataFrame does not behave like "
"it does for Index/Series")
tdi = TimedeltaIndex(['0 days', '1 day'], name=names[0])
ser = Series([Timedelta(hours=3), Timedelta(hours=4)], name=names[1])
expected = Series([Timedelta(hours=3), Timedelta(days=1, hours=4)],
name=names[2])
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
result = tdi + ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser + tdi
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
expected = Series([Timedelta(hours=-3), Timedelta(days=1, hours=-4)],
name=names[2])
expected = tm.box_expected(expected, box)
result = tdi - ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser - tdi
tm.assert_equal(result, -expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
def test_td64arr_add_sub_td64_nat(self, box):
# GH#23320 special handling for timedelta64("NaT")
tdi = pd.TimedeltaIndex([NaT, Timedelta('1s')])
other = np.timedelta64("NaT")
expected = pd.TimedeltaIndex(["NaT"] * 2)
obj = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
result = other - obj
tm.assert_equal(result, expected)
def test_td64arr_sub_NaT(self, box):
# GH#18808
ser = Series([NaT, Timedelta('1s')])
expected = Series([NaT, NaT], dtype='timedelta64[ns]')
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
res = ser - pd.NaT
tm.assert_equal(res, expected)
def test_td64arr_add_timedeltalike(self, two_hours, box):
# only test adding/sub offsets as + is now numeric
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng + two_hours
tm.assert_equal(result, expected)
def test_td64arr_sub_timedeltalike(self, two_hours, box):
# only test adding/sub offsets as - is now numeric
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng - two_hours
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# __add__/__sub__ with DateOffsets and arrays of DateOffsets
# TODO: this was taken from tests.series.test_operators; de-duplicate
def test_timedelta64_operations_with_DateOffset(self):
# GH#10699
td = Series([timedelta(minutes=5, seconds=3)] * 3)
result = td + pd.offsets.Minute(1)
expected = Series([timedelta(minutes=6, seconds=3)] * 3)
tm.assert_series_equal(result, expected)
result = td - pd.offsets.Minute(1)
expected = Series([timedelta(minutes=4, seconds=3)] * 3)
tm.assert_series_equal(result, expected)
with tm.assert_produces_warning(PerformanceWarning):
result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3),
pd.offsets.Hour(2)])
expected = Series([timedelta(minutes=6, seconds=3),
timedelta(minutes=5, seconds=6),
timedelta(hours=2, minutes=5, seconds=3)])
tm.assert_series_equal(result, expected)
result = td + pd.offsets.Minute(1) + pd.offsets.Second(12)
expected = Series([timedelta(minutes=6, seconds=15)] * 3)
tm.assert_series_equal(result, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
td + op(5)
op(5) + td
td - op(5)
op(5) - td
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_add_offset_index(self, names, box):
# GH#18849, GH#19744
if box is pd.DataFrame and names[1] == 'bar':
pytest.skip("Name propagation for DataFrame does not behave like "
"it does for Index/Series")
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
freq='infer', name=names[2])
tdi = tm.box_expected(tdi, box)
expected = | tm.box_expected(expected, box) | pandas.util.testing.box_expected |
from datetime import timedelta
from functools import partial
from operator import attrgetter
import dateutil
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import OutOfBoundsDatetime, conversion
import pandas as pd
from pandas import (
DatetimeIndex, Index, Timestamp, date_range, datetime, offsets,
to_datetime)
from pandas.core.arrays import DatetimeArray, period_array
import pandas.util.testing as tm
class TestDatetimeIndex(object):
@pytest.mark.parametrize('dt_cls', [DatetimeIndex,
DatetimeArray._from_sequence])
def test_freq_validation_with_nat(self, dt_cls):
# GH#11587 make sure we get a useful error message when generate_range
# raises
msg = ("Inferred frequency None from passed values does not conform "
"to passed frequency D")
with pytest.raises(ValueError, match=msg):
dt_cls([pd.NaT, pd.Timestamp('2011-01-01')], freq='D')
with pytest.raises(ValueError, match=msg):
dt_cls([pd.NaT, pd.Timestamp('2011-01-01').value],
freq='D')
def test_categorical_preserves_tz(self):
# GH#18664 retain tz when going DTI-->Categorical-->DTI
# TODO: parametrize over DatetimeIndex/DatetimeArray
# once CategoricalIndex(DTA) works
dti = pd.DatetimeIndex(
[pd.NaT, '2015-01-01', '1999-04-06 15:14:13', '2015-01-01'],
tz='US/Eastern')
ci = pd.CategoricalIndex(dti)
carr = pd.Categorical(dti)
cser = pd.Series(ci)
for obj in [ci, carr, cser]:
result = pd.DatetimeIndex(obj)
tm.assert_index_equal(result, dti)
def test_dti_with_period_data_raises(self):
# GH#23675
data = pd.PeriodIndex(['2016Q1', '2016Q2'], freq='Q')
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
DatetimeIndex(data)
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
to_datetime(data)
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
DatetimeIndex(period_array(data))
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
to_datetime(period_array(data))
def test_dti_with_timedelta64_data_deprecation(self):
# GH#23675
data = np.array([0], dtype='m8[ns]')
with tm.assert_produces_warning(FutureWarning):
result = DatetimeIndex(data)
assert result[0] == Timestamp('1970-01-01')
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = to_datetime(data)
assert result[0] == Timestamp('1970-01-01')
with tm.assert_produces_warning(FutureWarning):
result = DatetimeIndex(pd.TimedeltaIndex(data))
assert result[0] == Timestamp('1970-01-01')
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = to_datetime(pd.TimedeltaIndex(data))
assert result[0] == Timestamp('1970-01-01')
def test_construction_caching(self):
df = pd.DataFrame({'dt': pd.date_range('20130101', periods=3),
'dttz': pd.date_range('20130101', periods=3,
tz='US/Eastern'),
'dt_with_null': [pd.Timestamp('20130101'), pd.NaT,
pd.Timestamp('20130103')],
'dtns': pd.date_range('20130101', periods=3,
freq='ns')})
assert df.dttz.dtype.tz.zone == 'US/Eastern'
@pytest.mark.parametrize('kwargs', [
{'tz': 'dtype.tz'},
{'dtype': 'dtype'},
{'dtype': 'dtype', 'tz': 'dtype.tz'}])
def test_construction_with_alt(self, kwargs, tz_aware_fixture):
tz = tz_aware_fixture
i = pd.date_range('20130101', periods=5, freq='H', tz=tz)
kwargs = {key: attrgetter(val)(i) for key, val in kwargs.items()}
result = DatetimeIndex(i, **kwargs)
tm.assert_index_equal(i, result)
@pytest.mark.parametrize('kwargs', [
{'tz': 'dtype.tz'},
{'dtype': 'dtype'},
{'dtype': 'dtype', 'tz': 'dtype.tz'}])
def test_construction_with_alt_tz_localize(self, kwargs, tz_aware_fixture):
tz = tz_aware_fixture
i = pd.date_range('20130101', periods=5, freq='H', tz=tz)
kwargs = {key: attrgetter(val)(i) for key, val in kwargs.items()}
if str(tz) in ('UTC', 'tzutc()'):
warn = None
else:
warn = FutureWarning
with tm.assert_produces_warning(warn, check_stacklevel=False):
result = DatetimeIndex(i.tz_localize(None).asi8, **kwargs)
expected = DatetimeIndex(i, **kwargs)
tm.assert_index_equal(result, expected)
# localize into the provided tz
i2 = DatetimeIndex(i.tz_localize(None).asi8, tz='UTC')
expected = i.tz_localize(None).tz_localize('UTC')
tm.assert_index_equal(i2, expected)
# incompat tz/dtype
pytest.raises(ValueError, lambda: DatetimeIndex(
i.tz_localize(None).asi8, dtype=i.dtype, tz='US/Pacific'))
def test_construction_index_with_mixed_timezones(self):
# gh-11488: no tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01'),
Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01'),
Timestamp('2011-01-02')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# same tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01 10:00'), Timestamp('2011-01-02 10:00')
], tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# same tz results in DatetimeIndex (DST)
result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
Timestamp('2011-08-01 10:00', tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# Different tz results in Index(dtype=object)
result = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
exp = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
exp = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
# length = 1
result = Index([Timestamp('2011-01-01')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# length = 1 with tz
result = Index(
[Timestamp('2011-01-01 10:00', tz='Asia/Tokyo')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00')], tz='Asia/Tokyo',
name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
def test_construction_index_with_mixed_timezones_with_NaT(self):
# see gh-11488
result = Index([pd.NaT, Timestamp('2011-01-01'),
pd.NaT, Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01'),
pd.NaT, Timestamp('2011-01-02')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# Same tz results in DatetimeIndex
result = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00')],
tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# same tz results in DatetimeIndex (DST)
result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
pd.NaT,
Timestamp('2011-08-01 10:00', tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'), pd.NaT,
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# different tz results in Index(dtype=object)
result = Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='US/Eastern')],
name='idx')
exp = Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
result = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='US/Eastern')], name='idx')
exp = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
# all NaT
result = Index([pd.NaT, pd.NaT], name='idx')
exp = DatetimeIndex([pd.NaT, pd.NaT], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# all NaT with tz
result = Index([pd.NaT, pd.NaT], tz='Asia/Tokyo', name='idx')
exp = DatetimeIndex([pd.NaT, pd.NaT], tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
def test_construction_dti_with_mixed_timezones(self):
# GH 11488 (not changed, added explicit tests)
# no tz results in DatetimeIndex
result = DatetimeIndex(
[Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
# same tz results in DatetimeIndex
result = DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00',
tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00')],
tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
# same tz results in DatetimeIndex (DST)
result = DatetimeIndex([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
Timestamp('2011-08-01 10:00',
tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
# tz mismatch affecting to tz-aware raises TypeError/ValueError
with pytest.raises(ValueError):
DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
msg = 'cannot be converted to datetime64'
with pytest.raises(ValueError, match=msg):
DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='Asia/Tokyo', name='idx')
with pytest.raises(ValueError):
DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='US/Eastern', name='idx')
with pytest.raises(ValueError, match=msg):
# passing tz should results in DatetimeIndex, then mismatch raises
# TypeError
Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='Asia/Tokyo', name='idx')
def test_construction_base_constructor(self):
arr = [pd.Timestamp('2011-01-01'), pd.NaT, pd.Timestamp('2011-01-03')]
tm.assert_index_equal(pd.Index(arr), pd.DatetimeIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.DatetimeIndex(np.array(arr)))
arr = [np.nan, pd.NaT, pd.Timestamp('2011-01-03')]
tm.assert_index_equal(pd.Index(arr), pd.DatetimeIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.DatetimeIndex(np.array(arr)))
def test_construction_outofbounds(self):
# GH 13663
dates = [datetime(3000, 1, 1), datetime(4000, 1, 1),
datetime(5000, 1, 1), datetime(6000, 1, 1)]
exp = Index(dates, dtype=object)
# coerces to object
tm.assert_index_equal(Index(dates), exp)
with pytest.raises(OutOfBoundsDatetime):
# can't create DatetimeIndex
DatetimeIndex(dates)
def test_construction_with_ndarray(self):
# GH 5152
dates = [datetime(2013, 10, 7),
datetime(2013, 10, 8),
datetime(2013, 10, 9)]
data = DatetimeIndex(dates, freq=pd.offsets.BDay()).values
result = DatetimeIndex(data, freq=pd.offsets.BDay())
expected = DatetimeIndex(['2013-10-07',
'2013-10-08',
'2013-10-09'],
freq='B')
tm.assert_index_equal(result, expected)
def test_verify_integrity_deprecated(self):
# GH#23919
with tm.assert_produces_warning(FutureWarning):
DatetimeIndex(['1/1/2000'], verify_integrity=False)
def test_range_kwargs_deprecated(self):
# GH#23919
with tm.assert_produces_warning(FutureWarning):
DatetimeIndex(start='1/1/2000', end='1/10/2000', freq='D')
def test_integer_values_and_tz_deprecated(self):
# GH-24559
values = np.array([946684800000000000])
with tm.assert_produces_warning(FutureWarning):
result = DatetimeIndex(values, tz='US/Central')
expected = pd.DatetimeIndex(['2000-01-01T00:00:00'], tz="US/Central")
tm.assert_index_equal(result, expected)
# but UTC is *not* deprecated.
with tm.assert_produces_warning(None):
result = DatetimeIndex(values, tz='UTC')
expected = pd.DatetimeIndex(['2000-01-01T00:00:00'], tz="US/Central")
def test_constructor_coverage(self):
rng = date_range('1/1/2000', periods=10.5)
exp = date_range('1/1/2000', periods=10)
tm.assert_index_equal(rng, exp)
msg = 'periods must be a number, got foo'
with pytest.raises(TypeError, match=msg):
date_range(start='1/1/2000', periods='foo', freq='D')
with pytest.raises(ValueError):
with tm.assert_produces_warning(FutureWarning):
DatetimeIndex(start='1/1/2000', end='1/10/2000')
with pytest.raises(TypeError):
DatetimeIndex('1/1/2000')
# generator expression
gen = (datetime(2000, 1, 1) + timedelta(i) for i in range(10))
result = DatetimeIndex(gen)
expected = DatetimeIndex([datetime(2000, 1, 1) + timedelta(i)
for i in range(10)])
tm.assert_index_equal(result, expected)
# NumPy string array
strings = np.array(['2000-01-01', '2000-01-02', '2000-01-03'])
result = DatetimeIndex(strings)
expected = DatetimeIndex(strings.astype('O'))
tm.assert_index_equal(result, expected)
from_ints = DatetimeIndex(expected.asi8)
tm.assert_index_equal(from_ints, expected)
# string with NaT
strings = np.array(['2000-01-01', '2000-01-02', 'NaT'])
result = DatetimeIndex(strings)
expected = DatetimeIndex(strings.astype('O'))
tm.assert_index_equal(result, expected)
from_ints = DatetimeIndex(expected.asi8)
tm.assert_index_equal(from_ints, expected)
# non-conforming
pytest.raises(ValueError, DatetimeIndex,
['2000-01-01', '2000-01-02', '2000-01-04'], freq='D')
pytest.raises(ValueError, date_range, start='2011-01-01',
freq='b')
pytest.raises(ValueError, date_range, end='2011-01-01',
freq='B')
pytest.raises(ValueError, date_range, periods=10, freq='D')
@pytest.mark.parametrize('freq', ['AS', 'W-SUN'])
def test_constructor_datetime64_tzformat(self, freq):
# see GH#6572: ISO 8601 format results in pytz.FixedOffset
idx = date_range('2013-01-01T00:00:00-05:00',
'2016-01-01T23:59:59-05:00', freq=freq)
expected = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59',
freq=freq, tz=pytz.FixedOffset(-300))
tm.assert_index_equal(idx, expected)
# Unable to use `US/Eastern` because of DST
expected_i8 = date_range('2013-01-01T00:00:00',
'2016-01-01T23:59:59', freq=freq,
tz='America/Lima')
tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8)
idx = date_range('2013-01-01T00:00:00+09:00',
'2016-01-01T23:59:59+09:00', freq=freq)
expected = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59',
freq=freq, tz=pytz.FixedOffset(540))
tm.assert_index_equal(idx, expected)
expected_i8 = date_range('2013-01-01T00:00:00',
'2016-01-01T23:59:59', freq=freq,
tz='Asia/Tokyo')
tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8)
# Non ISO 8601 format results in dateutil.tz.tzoffset
idx = date_range('2013/1/1 0:00:00-5:00', '2016/1/1 23:59:59-5:00',
freq=freq)
expected = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59',
freq=freq, tz=pytz.FixedOffset(-300))
tm.assert_index_equal(idx, expected)
# Unable to use `US/Eastern` because of DST
expected_i8 = date_range('2013-01-01T00:00:00',
'2016-01-01T23:59:59', freq=freq,
tz='America/Lima')
tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8)
idx = date_range('2013/1/1 0:00:00+9:00',
'2016/1/1 23:59:59+09:00', freq=freq)
expected = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59',
freq=freq, tz=pytz.FixedOffset(540))
tm.assert_index_equal(idx, expected)
expected_i8 = date_range('2013-01-01T00:00:00',
'2016-01-01T23:59:59', freq=freq,
tz='Asia/Tokyo')
tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8)
def test_constructor_dtype(self):
# passing a dtype with a tz should localize
idx = DatetimeIndex(['2013-01-01', '2013-01-02'],
dtype='datetime64[ns, US/Eastern]')
expected = DatetimeIndex(['2013-01-01', '2013-01-02']
).tz_localize('US/Eastern')
tm.assert_index_equal(idx, expected)
idx = DatetimeIndex(['2013-01-01', '2013-01-02'],
tz='US/Eastern')
| tm.assert_index_equal(idx, expected) | pandas.util.testing.assert_index_equal |
from io import StringIO
import operator
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, date_range
import pandas._testing as tm
from pandas.core.computation.check import _NUMEXPR_INSTALLED
PARSERS = "python", "pandas"
ENGINES = "python", pytest.param("numexpr", marks=td.skip_if_no_ne)
@pytest.fixture(params=PARSERS, ids=lambda x: x)
def parser(request):
return request.param
@pytest.fixture(params=ENGINES, ids=lambda x: x)
def engine(request):
return request.param
def skip_if_no_pandas_parser(parser):
if parser != "pandas":
pytest.skip(f"cannot evaluate with parser {repr(parser)}")
class TestCompat:
def setup_method(self, method):
self.df = DataFrame({"A": [1, 2, 3]})
self.expected1 = self.df[self.df.A > 0]
self.expected2 = self.df.A + 1
def test_query_default(self):
# GH 12749
# this should always work, whether _NUMEXPR_INSTALLED or not
df = self.df
result = df.query("A>0")
tm.assert_frame_equal(result, self.expected1)
result = df.eval("A+1")
tm.assert_series_equal(result, self.expected2, check_names=False)
def test_query_None(self):
df = self.df
result = df.query("A>0", engine=None)
tm.assert_frame_equal(result, self.expected1)
result = df.eval("A+1", engine=None)
tm.assert_series_equal(result, self.expected2, check_names=False)
def test_query_python(self):
df = self.df
result = df.query("A>0", engine="python")
tm.assert_frame_equal(result, self.expected1)
result = df.eval("A+1", engine="python")
tm.assert_series_equal(result, self.expected2, check_names=False)
def test_query_numexpr(self):
df = self.df
if _NUMEXPR_INSTALLED:
result = df.query("A>0", engine="numexpr")
tm.assert_frame_equal(result, self.expected1)
result = df.eval("A+1", engine="numexpr")
tm.assert_series_equal(result, self.expected2, check_names=False)
else:
with pytest.raises(ImportError):
df.query("A>0", engine="numexpr")
with pytest.raises(ImportError):
df.eval("A+1", engine="numexpr")
class TestDataFrameEval:
# smaller hits python, larger hits numexpr
@pytest.mark.parametrize("n", [4, 4000])
@pytest.mark.parametrize(
"op_str,op,rop",
[
("+", "__add__", "__radd__"),
("-", "__sub__", "__rsub__"),
("*", "__mul__", "__rmul__"),
("/", "__truediv__", "__rtruediv__"),
],
)
def test_ops(self, op_str, op, rop, n):
# tst ops and reversed ops in evaluation
# GH7198
df = DataFrame(1, index=range(n), columns=list("abcd"))
df.iloc[0] = 2
m = df.mean()
base = DataFrame( # noqa
np.tile(m.values, n).reshape(n, -1), columns=list("abcd")
)
expected = eval(f"base {op_str} df")
# ops as strings
result = eval(f"m {op_str} df")
tm.assert_frame_equal(result, expected)
# these are commutative
if op in ["+", "*"]:
result = getattr(df, op)(m)
tm.assert_frame_equal(result, expected)
# these are not
elif op in ["-", "/"]:
result = getattr(df, rop)(m)
tm.assert_frame_equal(result, expected)
def test_dataframe_sub_numexpr_path(self):
# GH7192: Note we need a large number of rows to ensure this
# goes through the numexpr path
df = DataFrame(dict(A=np.random.randn(25000)))
df.iloc[0:5] = np.nan
expected = 1 - np.isnan(df.iloc[0:25])
result = (1 - np.isnan(df)).iloc[0:25]
tm.assert_frame_equal(result, expected)
def test_query_non_str(self):
# GH 11485
df = pd.DataFrame({"A": [1, 2, 3], "B": ["a", "b", "b"]})
msg = "expr must be a string to be evaluated"
with pytest.raises(ValueError, match=msg):
df.query(lambda x: x.B == "b")
with pytest.raises(ValueError, match=msg):
df.query(111)
def test_query_empty_string(self):
# GH 13139
df = pd.DataFrame({"A": [1, 2, 3]})
msg = "expr cannot be an empty string"
with pytest.raises(ValueError, match=msg):
df.query("")
def test_eval_resolvers_as_list(self):
# GH 14095
df = DataFrame(np.random.randn(10, 2), columns=list("ab"))
dict1 = {"a": 1}
dict2 = {"b": 2}
assert df.eval("a + b", resolvers=[dict1, dict2]) == dict1["a"] + dict2["b"]
assert pd.eval("a + b", resolvers=[dict1, dict2]) == dict1["a"] + dict2["b"]
class TestDataFrameQueryWithMultiIndex:
def test_query_with_named_multiindex(self, parser, engine):
skip_if_no_pandas_parser(parser)
a = np.random.choice(["red", "green"], size=10)
b = np.random.choice(["eggs", "ham"], size=10)
index = MultiIndex.from_arrays([a, b], names=["color", "food"])
df = DataFrame(np.random.randn(10, 2), index=index)
ind = Series(
df.index.get_level_values("color").values, index=index, name="color"
)
# equality
res1 = df.query('color == "red"', parser=parser, engine=engine)
res2 = df.query('"red" == color', parser=parser, engine=engine)
exp = df[ind == "red"]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# inequality
res1 = df.query('color != "red"', parser=parser, engine=engine)
res2 = df.query('"red" != color', parser=parser, engine=engine)
exp = df[ind != "red"]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# list equality (really just set membership)
res1 = df.query('color == ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] == color', parser=parser, engine=engine)
exp = df[ind.isin(["red"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
res1 = df.query('color != ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] != color', parser=parser, engine=engine)
exp = df[~ind.isin(["red"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# in/not in ops
res1 = df.query('["red"] in color', parser=parser, engine=engine)
res2 = df.query('"red" in color', parser=parser, engine=engine)
exp = df[ind.isin(["red"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
res1 = df.query('["red"] not in color', parser=parser, engine=engine)
res2 = df.query('"red" not in color', parser=parser, engine=engine)
exp = df[~ind.isin(["red"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
def test_query_with_unnamed_multiindex(self, parser, engine):
skip_if_no_pandas_parser(parser)
a = np.random.choice(["red", "green"], size=10)
b = np.random.choice(["eggs", "ham"], size=10)
index = MultiIndex.from_arrays([a, b])
df = DataFrame(np.random.randn(10, 2), index=index)
ind = Series(df.index.get_level_values(0).values, index=index)
res1 = df.query('ilevel_0 == "red"', parser=parser, engine=engine)
res2 = df.query('"red" == ilevel_0', parser=parser, engine=engine)
exp = df[ind == "red"]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# inequality
res1 = df.query('ilevel_0 != "red"', parser=parser, engine=engine)
res2 = df.query('"red" != ilevel_0', parser=parser, engine=engine)
exp = df[ind != "red"]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# list equality (really just set membership)
res1 = df.query('ilevel_0 == ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] == ilevel_0', parser=parser, engine=engine)
exp = df[ind.isin(["red"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
res1 = df.query('ilevel_0 != ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] != ilevel_0', parser=parser, engine=engine)
exp = df[~ind.isin(["red"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# in/not in ops
res1 = df.query('["red"] in ilevel_0', parser=parser, engine=engine)
res2 = df.query('"red" in ilevel_0', parser=parser, engine=engine)
exp = df[ind.isin(["red"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
res1 = df.query('["red"] not in ilevel_0', parser=parser, engine=engine)
res2 = df.query('"red" not in ilevel_0', parser=parser, engine=engine)
exp = df[~ind.isin(["red"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# ## LEVEL 1
ind = Series(df.index.get_level_values(1).values, index=index)
res1 = df.query('ilevel_1 == "eggs"', parser=parser, engine=engine)
res2 = df.query('"eggs" == ilevel_1', parser=parser, engine=engine)
exp = df[ind == "eggs"]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# inequality
res1 = df.query('ilevel_1 != "eggs"', parser=parser, engine=engine)
res2 = df.query('"eggs" != ilevel_1', parser=parser, engine=engine)
exp = df[ind != "eggs"]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# list equality (really just set membership)
res1 = df.query('ilevel_1 == ["eggs"]', parser=parser, engine=engine)
res2 = df.query('["eggs"] == ilevel_1', parser=parser, engine=engine)
exp = df[ind.isin(["eggs"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
res1 = df.query('ilevel_1 != ["eggs"]', parser=parser, engine=engine)
res2 = df.query('["eggs"] != ilevel_1', parser=parser, engine=engine)
exp = df[~ind.isin(["eggs"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# in/not in ops
res1 = df.query('["eggs"] in ilevel_1', parser=parser, engine=engine)
res2 = df.query('"eggs" in ilevel_1', parser=parser, engine=engine)
exp = df[ind.isin(["eggs"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
res1 = df.query('["eggs"] not in ilevel_1', parser=parser, engine=engine)
res2 = df.query('"eggs" not in ilevel_1', parser=parser, engine=engine)
exp = df[~ind.isin(["eggs"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
def test_query_with_partially_named_multiindex(self, parser, engine):
skip_if_no_pandas_parser(parser)
a = np.random.choice(["red", "green"], size=10)
b = np.arange(10)
index = MultiIndex.from_arrays([a, b])
index.names = [None, "rating"]
df = DataFrame(np.random.randn(10, 2), index=index)
res = df.query("rating == 1", parser=parser, engine=engine)
ind = Series(
df.index.get_level_values("rating").values, index=index, name="rating"
)
exp = df[ind == 1]
tm.assert_frame_equal(res, exp)
res = df.query("rating != 1", parser=parser, engine=engine)
ind = Series(
df.index.get_level_values("rating").values, index=index, name="rating"
)
exp = df[ind != 1]
tm.assert_frame_equal(res, exp)
res = df.query('ilevel_0 == "red"', parser=parser, engine=engine)
ind = Series(df.index.get_level_values(0).values, index=index)
exp = df[ind == "red"]
tm.assert_frame_equal(res, exp)
res = df.query('ilevel_0 != "red"', parser=parser, engine=engine)
ind = Series(df.index.get_level_values(0).values, index=index)
exp = df[ind != "red"]
tm.assert_frame_equal(res, exp)
def test_query_multiindex_get_index_resolvers(self):
df = tm.makeCustomDataframe(
10, 3, r_idx_nlevels=2, r_idx_names=["spam", "eggs"]
)
resolvers = df._get_index_resolvers()
def to_series(mi, level):
level_values = mi.get_level_values(level)
s = level_values.to_series()
s.index = mi
return s
col_series = df.columns.to_series()
expected = {
"index": df.index,
"columns": col_series,
"spam": to_series(df.index, "spam"),
"eggs": to_series(df.index, "eggs"),
"C0": col_series,
}
for k, v in resolvers.items():
if isinstance(v, Index):
assert v.is_(expected[k])
elif isinstance(v, Series):
tm.assert_series_equal(v, expected[k])
else:
raise AssertionError("object must be a Series or Index")
@td.skip_if_no_ne
class TestDataFrameQueryNumExprPandas:
@classmethod
def setup_class(cls):
cls.engine = "numexpr"
cls.parser = "pandas"
@classmethod
def teardown_class(cls):
del cls.engine, cls.parser
def test_date_query_with_attribute_access(self):
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
df = DataFrame(np.random.randn(5, 3))
df["dates1"] = date_range("1/1/2012", periods=5)
df["dates2"] = date_range("1/1/2013", periods=5)
df["dates3"] = date_range("1/1/2014", periods=5)
res = df.query(
"@df.dates1 < 20130101 < @df.dates3", engine=engine, parser=parser
)
expec = df[(df.dates1 < "20130101") & ("20130101" < df.dates3)]
tm.assert_frame_equal(res, expec)
def test_date_query_no_attribute_access(self):
engine, parser = self.engine, self.parser
df = DataFrame(np.random.randn(5, 3))
df["dates1"] = date_range("1/1/2012", periods=5)
df["dates2"] = date_range("1/1/2013", periods=5)
df["dates3"] = date_range("1/1/2014", periods=5)
res = df.query("dates1 < 20130101 < dates3", engine=engine, parser=parser)
expec = df[(df.dates1 < "20130101") & ("20130101" < df.dates3)]
tm.assert_frame_equal(res, expec)
def test_date_query_with_NaT(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(np.random.randn(n, 3))
df["dates1"] = date_range("1/1/2012", periods=n)
df["dates2"] = date_range("1/1/2013", periods=n)
df["dates3"] = date_range("1/1/2014", periods=n)
df.loc[np.random.rand(n) > 0.5, "dates1"] = pd.NaT
df.loc[np.random.rand(n) > 0.5, "dates3"] = pd.NaT
res = df.query("dates1 < 20130101 < dates3", engine=engine, parser=parser)
expec = df[(df.dates1 < "20130101") & ("20130101" < df.dates3)]
tm.assert_frame_equal(res, expec)
def test_date_index_query(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(np.random.randn(n, 3))
df["dates1"] = date_range("1/1/2012", periods=n)
df["dates3"] = date_range("1/1/2014", periods=n)
df.set_index("dates1", inplace=True, drop=True)
res = df.query("index < 20130101 < dates3", engine=engine, parser=parser)
expec = df[(df.index < "20130101") & ("20130101" < df.dates3)]
tm.assert_frame_equal(res, expec)
def test_date_index_query_with_NaT(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(np.random.randn(n, 3))
df["dates1"] = date_range("1/1/2012", periods=n)
df["dates3"] = date_range("1/1/2014", periods=n)
df.iloc[0, 0] = pd.NaT
df.set_index("dates1", inplace=True, drop=True)
res = df.query("index < 20130101 < dates3", engine=engine, parser=parser)
expec = df[(df.index < "20130101") & ("20130101" < df.dates3)]
tm.assert_frame_equal(res, expec)
def test_date_index_query_with_NaT_duplicates(self):
engine, parser = self.engine, self.parser
n = 10
d = {}
d["dates1"] = date_range("1/1/2012", periods=n)
d["dates3"] = date_range("1/1/2014", periods=n)
df = DataFrame(d)
df.loc[np.random.rand(n) > 0.5, "dates1"] = pd.NaT
df.set_index("dates1", inplace=True, drop=True)
res = df.query("dates1 < 20130101 < dates3", engine=engine, parser=parser)
expec = df[(df.index.to_series() < "20130101") & ("20130101" < df.dates3)]
tm.assert_frame_equal(res, expec)
def test_date_query_with_non_date(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(
{"dates": date_range("1/1/2012", periods=n), "nondate": np.arange(n)}
)
result = df.query("dates == nondate", parser=parser, engine=engine)
assert len(result) == 0
result = df.query("dates != nondate", parser=parser, engine=engine)
tm.assert_frame_equal(result, df)
for op in ["<", ">", "<=", ">="]:
with pytest.raises(TypeError):
df.query(f"dates {op} nondate", parser=parser, engine=engine)
def test_query_syntax_error(self):
engine, parser = self.engine, self.parser
df = DataFrame({"i": range(10), "+": range(3, 13), "r": range(4, 14)})
with pytest.raises(SyntaxError):
df.query("i - +", engine=engine, parser=parser)
def test_query_scope(self):
from pandas.core.computation.ops import UndefinedVariableError
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
df = DataFrame(np.random.randn(20, 2), columns=list("ab"))
a, b = 1, 2 # noqa
res = df.query("a > b", engine=engine, parser=parser)
expected = df[df.a > df.b]
tm.assert_frame_equal(res, expected)
res = df.query("@a > b", engine=engine, parser=parser)
expected = df[a > df.b]
tm.assert_frame_equal(res, expected)
# no local variable c
with pytest.raises(
UndefinedVariableError, match="local variable 'c' is not defined"
):
df.query("@a > b > @c", engine=engine, parser=parser)
# no column named 'c'
with pytest.raises(UndefinedVariableError, match="name 'c' is not defined"):
df.query("@a > b > c", engine=engine, parser=parser)
def test_query_doesnt_pickup_local(self):
from pandas.core.computation.ops import UndefinedVariableError
engine, parser = self.engine, self.parser
n = m = 10
df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list("abc"))
# we don't pick up the local 'sin'
with pytest.raises(UndefinedVariableError, match="name 'sin' is not defined"):
df.query("sin > 5", engine=engine, parser=parser)
def test_query_builtin(self):
from pandas.core.computation.engines import NumExprClobberingError
engine, parser = self.engine, self.parser
n = m = 10
df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list("abc"))
df.index.name = "sin"
msg = "Variables in expression.+"
with pytest.raises(NumExprClobberingError, match=msg):
df.query("sin > 5", engine=engine, parser=parser)
def test_query(self):
engine, parser = self.engine, self.parser
df = DataFrame(np.random.randn(10, 3), columns=["a", "b", "c"])
tm.assert_frame_equal(
df.query("a < b", engine=engine, parser=parser), df[df.a < df.b]
)
tm.assert_frame_equal(
df.query("a + b > b * c", engine=engine, parser=parser),
df[df.a + df.b > df.b * df.c],
)
def test_query_index_with_name(self):
engine, parser = self.engine, self.parser
df = DataFrame(
np.random.randint(10, size=(10, 3)),
index=Index(range(10), name="blob"),
columns=["a", "b", "c"],
)
res = df.query("(blob < 5) & (a < b)", engine=engine, parser=parser)
expec = df[(df.index < 5) & (df.a < df.b)]
tm.assert_frame_equal(res, expec)
res = df.query("blob < b", engine=engine, parser=parser)
expec = df[df.index < df.b]
tm.assert_frame_equal(res, expec)
def test_query_index_without_name(self):
engine, parser = self.engine, self.parser
df = DataFrame(
np.random.randint(10, size=(10, 3)),
index=range(10),
columns=["a", "b", "c"],
)
# "index" should refer to the index
res = df.query("index < b", engine=engine, parser=parser)
expec = df[df.index < df.b]
tm.assert_frame_equal(res, expec)
# test against a scalar
res = df.query("index < 5", engine=engine, parser=parser)
expec = df[df.index < 5]
tm.assert_frame_equal(res, expec)
def test_nested_scope(self):
engine = self.engine
parser = self.parser
skip_if_no_pandas_parser(parser)
df = DataFrame(np.random.randn(5, 3))
df2 = DataFrame(np.random.randn(5, 3))
expected = df[(df > 0) & (df2 > 0)]
result = df.query("(@df > 0) & (@df2 > 0)", engine=engine, parser=parser)
tm.assert_frame_equal(result, expected)
result = pd.eval("df[df > 0 and df2 > 0]", engine=engine, parser=parser)
tm.assert_frame_equal(result, expected)
result = pd.eval(
"df[df > 0 and df2 > 0 and df[df > 0] > 0]", engine=engine, parser=parser
)
expected = df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)]
tm.assert_frame_equal(result, expected)
result = pd.eval("df[(df>0) & (df2>0)]", engine=engine, parser=parser)
expected = df.query("(@df>0) & (@df2>0)", engine=engine, parser=parser)
tm.assert_frame_equal(result, expected)
def test_nested_raises_on_local_self_reference(self):
from pandas.core.computation.ops import UndefinedVariableError
df = DataFrame(np.random.randn(5, 3))
# can't reference ourself b/c we're a local so @ is necessary
with pytest.raises(UndefinedVariableError, match="name 'df' is not defined"):
df.query("df > 0", engine=self.engine, parser=self.parser)
def test_local_syntax(self):
skip_if_no_pandas_parser(self.parser)
engine, parser = self.engine, self.parser
df = DataFrame(np.random.randn(100, 10), columns=list("abcdefghij"))
b = 1
expect = df[df.a < b]
result = df.query("a < @b", engine=engine, parser=parser)
tm.assert_frame_equal(result, expect)
expect = df[df.a < df.b]
result = df.query("a < b", engine=engine, parser=parser)
tm.assert_frame_equal(result, expect)
def test_chained_cmp_and_in(self):
skip_if_no_pandas_parser(self.parser)
engine, parser = self.engine, self.parser
cols = list("abc")
df = DataFrame(np.random.randn(100, len(cols)), columns=cols)
res = df.query(
"a < b < c and a not in b not in c", engine=engine, parser=parser
)
ind = (
(df.a < df.b) & (df.b < df.c) & ~df.b.isin(df.a) & ~df.c.isin(df.b)
) # noqa
expec = df[ind]
tm.assert_frame_equal(res, expec)
def test_local_variable_with_in(self):
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
a = Series(np.random.randint(3, size=15), name="a")
b = Series(np.random.randint(10, size=15), name="b")
df = DataFrame({"a": a, "b": b})
expected = df.loc[(df.b - 1).isin(a)]
result = df.query("b - 1 in a", engine=engine, parser=parser)
tm.assert_frame_equal(expected, result)
b = Series(np.random.randint(10, size=15), name="b")
expected = df.loc[(b - 1).isin(a)]
result = df.query("@b - 1 in a", engine=engine, parser=parser)
tm.assert_frame_equal(expected, result)
def test_at_inside_string(self):
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
c = 1 # noqa
df = DataFrame({"a": ["a", "a", "b", "b", "@c", "@c"]})
result = df.query('a == "@c"', engine=engine, parser=parser)
expected = df[df.a == "@c"]
tm.assert_frame_equal(result, expected)
def test_query_undefined_local(self):
from pandas.core.computation.ops import UndefinedVariableError
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
df = DataFrame(np.random.rand(10, 2), columns=list("ab"))
with pytest.raises(
UndefinedVariableError, match="local variable 'c' is not defined"
):
df.query("a == @c", engine=engine, parser=parser)
def test_index_resolvers_come_after_columns_with_the_same_name(self):
n = 1 # noqa
a = np.r_[20:101:20]
df = DataFrame({"index": a, "b": np.random.randn(a.size)})
df.index.name = "index"
result = df.query("index > 5", engine=self.engine, parser=self.parser)
expected = df[df["index"] > 5]
tm.assert_frame_equal(result, expected)
df = DataFrame({"index": a, "b": np.random.randn(a.size)})
result = df.query("ilevel_0 > 5", engine=self.engine, parser=self.parser)
expected = df.loc[df.index[df.index > 5]]
tm.assert_frame_equal(result, expected)
df = DataFrame({"a": a, "b": np.random.randn(a.size)})
df.index.name = "a"
result = df.query("a > 5", engine=self.engine, parser=self.parser)
expected = df[df.a > 5]
tm.assert_frame_equal(result, expected)
result = df.query("index > 5", engine=self.engine, parser=self.parser)
expected = df.loc[df.index[df.index > 5]]
tm.assert_frame_equal(result, expected)
def test_inf(self):
n = 10
df = DataFrame({"a": np.random.rand(n), "b": np.random.rand(n)})
df.loc[::2, 0] = np.inf
d = {"==": operator.eq, "!=": operator.ne}
for op, f in d.items():
q = f"a {op} inf"
expected = df[f(df.a, np.inf)]
result = df.query(q, engine=self.engine, parser=self.parser)
tm.assert_frame_equal(result, expected)
@td.skip_if_no_ne
class TestDataFrameQueryNumExprPython(TestDataFrameQueryNumExprPandas):
@classmethod
def setup_class(cls):
super().setup_class()
cls.engine = "numexpr"
cls.parser = "python"
def test_date_query_no_attribute_access(self):
engine, parser = self.engine, self.parser
df = DataFrame(np.random.randn(5, 3))
df["dates1"] = date_range("1/1/2012", periods=5)
df["dates2"] = date_range("1/1/2013", periods=5)
df["dates3"] = date_range("1/1/2014", periods=5)
res = df.query(
"(dates1 < 20130101) & (20130101 < dates3)", engine=engine, parser=parser
)
expec = df[(df.dates1 < "20130101") & ("20130101" < df.dates3)]
tm.assert_frame_equal(res, expec)
def test_date_query_with_NaT(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(np.random.randn(n, 3))
df["dates1"] = date_range("1/1/2012", periods=n)
df["dates2"] = date_range("1/1/2013", periods=n)
df["dates3"] = date_range("1/1/2014", periods=n)
df.loc[np.random.rand(n) > 0.5, "dates1"] = pd.NaT
df.loc[np.random.rand(n) > 0.5, "dates3"] = pd.NaT
res = df.query(
"(dates1 < 20130101) & (20130101 < dates3)", engine=engine, parser=parser
)
expec = df[(df.dates1 < "20130101") & ("20130101" < df.dates3)]
tm.assert_frame_equal(res, expec)
def test_date_index_query(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(np.random.randn(n, 3))
df["dates1"] = date_range("1/1/2012", periods=n)
df["dates3"] = date_range("1/1/2014", periods=n)
df.set_index("dates1", inplace=True, drop=True)
res = df.query(
"(index < 20130101) & (20130101 < dates3)", engine=engine, parser=parser
)
expec = df[(df.index < "20130101") & ("20130101" < df.dates3)]
tm.assert_frame_equal(res, expec)
def test_date_index_query_with_NaT(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(np.random.randn(n, 3))
df["dates1"] = date_range("1/1/2012", periods=n)
df["dates3"] = date_range("1/1/2014", periods=n)
df.iloc[0, 0] = pd.NaT
df.set_index("dates1", inplace=True, drop=True)
res = df.query(
"(index < 20130101) & (20130101 < dates3)", engine=engine, parser=parser
)
expec = df[(df.index < "20130101") & ("20130101" < df.dates3)]
tm.assert_frame_equal(res, expec)
def test_date_index_query_with_NaT_duplicates(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(np.random.randn(n, 3))
df["dates1"] = date_range("1/1/2012", periods=n)
df["dates3"] = date_range("1/1/2014", periods=n)
df.loc[np.random.rand(n) > 0.5, "dates1"] = pd.NaT
df.set_index("dates1", inplace=True, drop=True)
with pytest.raises(NotImplementedError):
df.query("index < 20130101 < dates3", engine=engine, parser=parser)
def test_nested_scope(self):
from pandas.core.computation.ops import UndefinedVariableError
engine = self.engine
parser = self.parser
# smoke test
x = 1 # noqa
result = pd.eval("x + 1", engine=engine, parser=parser)
assert result == 2
df = DataFrame(np.random.randn(5, 3))
df2 = DataFrame(np.random.randn(5, 3))
# don't have the pandas parser
with pytest.raises(SyntaxError):
df.query("(@df>0) & (@df2>0)", engine=engine, parser=parser)
with pytest.raises(UndefinedVariableError, match="name 'df' is not defined"):
df.query("(df>0) & (df2>0)", engine=engine, parser=parser)
expected = df[(df > 0) & (df2 > 0)]
result = pd.eval("df[(df > 0) & (df2 > 0)]", engine=engine, parser=parser)
tm.assert_frame_equal(expected, result)
expected = df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)]
result = pd.eval(
"df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)]", engine=engine, parser=parser
)
tm.assert_frame_equal(expected, result)
class TestDataFrameQueryPythonPandas(TestDataFrameQueryNumExprPandas):
@classmethod
def setup_class(cls):
super().setup_class()
cls.engine = "python"
cls.parser = "pandas"
def test_query_builtin(self):
engine, parser = self.engine, self.parser
n = m = 10
df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list("abc"))
df.index.name = "sin"
expected = df[df.index > 5]
result = df.query("sin > 5", engine=engine, parser=parser)
tm.assert_frame_equal(expected, result)
class TestDataFrameQueryPythonPython(TestDataFrameQueryNumExprPython):
@classmethod
def setup_class(cls):
super().setup_class()
cls.engine = cls.parser = "python"
def test_query_builtin(self):
engine, parser = self.engine, self.parser
n = m = 10
df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list("abc"))
df.index.name = "sin"
expected = df[df.index > 5]
result = df.query("sin > 5", engine=engine, parser=parser)
tm.assert_frame_equal(expected, result)
class TestDataFrameQueryStrings:
def test_str_query_method(self, parser, engine):
df = DataFrame(np.random.randn(10, 1), columns=["b"])
df["strings"] = Series(list("aabbccddee"))
expect = df[df.strings == "a"]
if parser != "pandas":
col = "strings"
lst = '"a"'
lhs = [col] * 2 + [lst] * 2
rhs = lhs[::-1]
eq, ne = "==", "!="
ops = 2 * ([eq] + [ne])
for lhs, op, rhs in zip(lhs, ops, rhs):
ex = f"{lhs} {op} {rhs}"
msg = r"'(Not)?In' nodes are not implemented"
with pytest.raises(NotImplementedError, match=msg):
df.query(
ex,
engine=engine,
parser=parser,
local_dict={"strings": df.strings},
)
else:
res = df.query('"a" == strings', engine=engine, parser=parser)
tm.assert_frame_equal(res, expect)
res = df.query('strings == "a"', engine=engine, parser=parser)
tm.assert_frame_equal(res, expect)
tm.assert_frame_equal(res, df[df.strings.isin(["a"])])
expect = df[df.strings != "a"]
res = df.query('strings != "a"', engine=engine, parser=parser)
tm.assert_frame_equal(res, expect)
res = df.query('"a" != strings', engine=engine, parser=parser)
tm.assert_frame_equal(res, expect)
tm.assert_frame_equal(res, df[~df.strings.isin(["a"])])
def test_str_list_query_method(self, parser, engine):
df = DataFrame(np.random.randn(10, 1), columns=["b"])
df["strings"] = Series(list("aabbccddee"))
expect = df[df.strings.isin(["a", "b"])]
if parser != "pandas":
col = "strings"
lst = '["a", "b"]'
lhs = [col] * 2 + [lst] * 2
rhs = lhs[::-1]
eq, ne = "==", "!="
ops = 2 * ([eq] + [ne])
for lhs, op, rhs in zip(lhs, ops, rhs):
ex = f"{lhs} {op} {rhs}"
with pytest.raises(NotImplementedError):
df.query(ex, engine=engine, parser=parser)
else:
res = df.query('strings == ["a", "b"]', engine=engine, parser=parser)
tm.assert_frame_equal(res, expect)
res = df.query('["a", "b"] == strings', engine=engine, parser=parser)
tm.assert_frame_equal(res, expect)
expect = df[~df.strings.isin(["a", "b"])]
res = df.query('strings != ["a", "b"]', engine=engine, parser=parser)
tm.assert_frame_equal(res, expect)
res = df.query('["a", "b"] != strings', engine=engine, parser=parser)
tm.assert_frame_equal(res, expect)
def test_query_with_string_columns(self, parser, engine):
df = DataFrame(
{
"a": list("aaaabbbbcccc"),
"b": list("aabbccddeeff"),
"c": np.random.randint(5, size=12),
"d": np.random.randint(9, size=12),
}
)
if parser == "pandas":
res = df.query("a in b", parser=parser, engine=engine)
expec = df[df.a.isin(df.b)]
tm.assert_frame_equal(res, expec)
res = df.query("a in b and c < d", parser=parser, engine=engine)
expec = df[df.a.isin(df.b) & (df.c < df.d)]
tm.assert_frame_equal(res, expec)
else:
with pytest.raises(NotImplementedError):
df.query("a in b", parser=parser, engine=engine)
with pytest.raises(NotImplementedError):
df.query("a in b and c < d", parser=parser, engine=engine)
def test_object_array_eq_ne(self, parser, engine):
df = DataFrame(
{
"a": list("aaaabbbbcccc"),
"b": list("aabbccddeeff"),
"c": np.random.randint(5, size=12),
"d": np.random.randint(9, size=12),
}
)
res = df.query("a == b", parser=parser, engine=engine)
exp = df[df.a == df.b]
tm.assert_frame_equal(res, exp)
res = df.query("a != b", parser=parser, engine=engine)
exp = df[df.a != df.b]
tm.assert_frame_equal(res, exp)
def test_query_with_nested_strings(self, parser, engine):
skip_if_no_pandas_parser(parser)
raw = """id event timestamp
1 "page 1 load" 1/1/2014 0:00:01
1 "page 1 exit" 1/1/2014 0:00:31
2 "page 2 load" 1/1/2014 0:01:01
2 "page 2 exit" 1/1/2014 0:01:31
3 "page 3 load" 1/1/2014 0:02:01
3 "page 3 exit" 1/1/2014 0:02:31
4 "page 1 load" 2/1/2014 1:00:01
4 "page 1 exit" 2/1/2014 1:00:31
5 "page 2 load" 2/1/2014 1:01:01
5 "page 2 exit" 2/1/2014 1:01:31
6 "page 3 load" 2/1/2014 1:02:01
6 "page 3 exit" 2/1/2014 1:02:31
"""
df = pd.read_csv(
StringIO(raw), sep=r"\s{2,}", engine="python", parse_dates=["timestamp"]
)
expected = df[df.event == '"page 1 load"']
res = df.query("""'"page 1 load"' in event""", parser=parser, engine=engine)
tm.assert_frame_equal(expected, res)
def test_query_with_nested_special_character(self, parser, engine):
skip_if_no_pandas_parser(parser)
df = DataFrame({"a": ["a", "b", "test & test"], "b": [1, 2, 3]})
res = df.query('a == "test & test"', parser=parser, engine=engine)
expec = df[df.a == "test & test"]
tm.assert_frame_equal(res, expec)
def test_query_lex_compare_strings(self, parser, engine):
a = Series(np.random.choice(list("abcde"), 20))
b = Series(np.arange(a.size))
df = DataFrame({"X": a, "Y": b})
ops = {"<": operator.lt, ">": operator.gt, "<=": operator.le, ">=": operator.ge}
for op, func in ops.items():
res = df.query(f'X {op} "d"', engine=engine, parser=parser)
expected = df[func(df.X, "d")]
tm.assert_frame_equal(res, expected)
def test_query_single_element_booleans(self, parser, engine):
columns = "bid", "bidsize", "ask", "asksize"
data = np.random.randint(2, size=(1, len(columns))).astype(bool)
df = DataFrame(data, columns=columns)
res = df.query("bid & ask", engine=engine, parser=parser)
expected = df[df.bid & df.ask]
tm.assert_frame_equal(res, expected)
def test_query_string_scalar_variable(self, parser, engine):
skip_if_no_pandas_parser(parser)
df = pd.DataFrame(
{
"Symbol": ["BUD US", "BUD US", "IBM US", "IBM US"],
"Price": [109.70, 109.72, 183.30, 183.35],
}
)
e = df[df.Symbol == "BUD US"]
symb = "BUD US" # noqa
r = df.query("Symbol == @symb", parser=parser, engine=engine)
tm.assert_frame_equal(e, r)
class TestDataFrameEvalWithFrame:
def setup_method(self, method):
self.frame = DataFrame(np.random.randn(10, 3), columns=list("abc"))
def teardown_method(self, method):
del self.frame
def test_simple_expr(self, parser, engine):
res = self.frame.eval("a + b", engine=engine, parser=parser)
expect = self.frame.a + self.frame.b
tm.assert_series_equal(res, expect)
def test_bool_arith_expr(self, parser, engine):
res = self.frame.eval("a[a < 1] + b", engine=engine, parser=parser)
expect = self.frame.a[self.frame.a < 1] + self.frame.b
tm.assert_series_equal(res, expect)
@pytest.mark.parametrize("op", ["+", "-", "*", "/"])
def test_invalid_type_for_operator_raises(self, parser, engine, op):
df = DataFrame({"a": [1, 2], "b": ["c", "d"]})
msg = r"unsupported operand type\(s\) for .+: '.+' and '.+'"
with pytest.raises(TypeError, match=msg):
df.eval(f"a {op} b", engine=engine, parser=parser)
class TestDataFrameQueryBacktickQuoting:
@pytest.fixture(scope="class")
def df(self):
"""
Yields a dataframe with strings that may or may not need escaping
by backticks. The last two columns cannot be escaped by backticks
and should raise a ValueError.
"""
yield DataFrame(
{
"A": [1, 2, 3],
"B B": [3, 2, 1],
"C C": [4, 5, 6],
"C C": [7, 4, 3],
"C_C": [8, 9, 10],
"D_D D": [11, 1, 101],
"E.E": [6, 3, 5],
"F-F": [8, 1, 10],
"1e1": [2, 4, 8],
"def": [10, 11, 2],
"A (x)": [4, 1, 3],
"B(x)": [1, 1, 5],
"B (x)": [2, 7, 4],
" &^ :!€$?(} > <++*'' ": [2, 5, 6],
"": [10, 11, 1],
" A": [4, 7, 9],
" ": [1, 2, 1],
"it's": [6, 3, 1],
"that's": [9, 1, 8],
"☺": [8, 7, 6],
"foo#bar": [2, 4, 5],
1: [5, 7, 9],
}
)
def test_single_backtick_variable_query(self, df):
res = df.query("1 < `B B`")
expect = df[1 < df["B B"]]
tm.assert_frame_equal(res, expect)
def test_two_backtick_variables_query(self, df):
res = df.query("1 < `B B` and 4 < `C C`")
expect = df[(1 < df["B B"]) & (4 < df["C C"])]
tm.assert_frame_equal(res, expect)
def test_single_backtick_variable_expr(self, df):
res = df.eval("A + `B B`")
expect = df["A"] + df["B B"]
tm.assert_series_equal(res, expect)
def test_two_backtick_variables_expr(self, df):
res = df.eval("`B B` + `C C`")
expect = df["B B"] + df["C C"]
tm.assert_series_equal(res, expect)
def test_already_underscore_variable(self, df):
res = df.eval("`C_C` + A")
expect = df["C_C"] + df["A"]
tm.assert_series_equal(res, expect)
def test_same_name_but_underscores(self, df):
res = df.eval("C_C + `C C`")
expect = df["C_C"] + df["C C"]
tm.assert_series_equal(res, expect)
def test_mixed_underscores_and_spaces(self, df):
res = df.eval("A + `D_D D`")
expect = df["A"] + df["D_D D"]
tm.assert_series_equal(res, expect)
def test_backtick_quote_name_with_no_spaces(self, df):
res = df.eval("A + `C_C`")
expect = df["A"] + df["C_C"]
tm.assert_series_equal(res, expect)
def test_special_characters(self, df):
res = df.eval("`E.E` + `F-F` - A")
expect = df["E.E"] + df["F-F"] - df["A"]
tm.assert_series_equal(res, expect)
def test_start_with_digit(self, df):
res = df.eval("A + `1e1`")
expect = df["A"] + df["1e1"]
tm.assert_series_equal(res, expect)
def test_keyword(self, df):
res = df.eval("A + `def`")
expect = df["A"] + df["def"]
tm.assert_series_equal(res, expect)
def test_unneeded_quoting(self, df):
res = df.query("`A` > 2")
expect = df[df["A"] > 2]
tm.assert_frame_equal(res, expect)
def test_parenthesis(self, df):
res = df.query("`A (x)` > 2")
expect = df[df["A (x)"] > 2]
tm.assert_frame_equal(res, expect)
def test_empty_string(self, df):
res = df.query("`` > 5")
expect = df[df[""] > 5]
tm.assert_frame_equal(res, expect)
def test_multiple_spaces(self, df):
res = df.query("`C C` > 5")
expect = df[df["C C"] > 5]
tm.assert_frame_equal(res, expect)
def test_start_with_spaces(self, df):
res = df.eval("` A` + ` `")
expect = df[" A"] + df[" "]
| tm.assert_series_equal(res, expect) | pandas._testing.assert_series_equal |
import string
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
###############
# for first time use uncomment this
#
# read in data
data= | pd.read_csv('VS_Extensions_1week_correct.csv') | pandas.read_csv |
# Tweet Analysis -- objective, predict the retweet level of a tweet
import string
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import os
from sklearn.model_selection import train_test_split
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
analyser = SentimentIntensityAnalyzer()
# import warnings filter
from warnings import simplefilter
# ignore all future warnings
simplefilter(action='ignore', category=FutureWarning)
# function to get the vaderSentiment score
def sentiment_analyzer_scores(sentence):
score = analyser.polarity_scores(sentence)
return score
# print("{:-<40} {}".format(sentence, str(score)))
# check if /data directory exists / If not then create "data" directory
data_dir_exists = os.path.isdir('./data')
if not data_dir_exists:
os.mkdir('data')
# read / load the tweet dataset file --- in real world, the idea is to get the tweet data through Twitter API
# For my project though, the dataset is already available in CSV format that I saved in "data" directory.
df = pd.read_csv(filepath_or_buffer='data/tweets.csv',
sep=',',
header=0) # header starts in first line
# clean data -- There are 28 columns in this dataset with 6444 rows. Most of the columns are not relevant to my
# analysis. Therefore, let's do some dropping of columns:
# drop columns that we will not use
df.drop(['id', 'is_retweet', 'original_author', 'in_reply_to_screen_name', 'in_reply_to_status_id'
, 'in_reply_to_user_id', 'is_quote_status', 'lang', 'longitude', 'latitude', 'place_id', 'place_full_name'
, 'place_name', 'place_type', 'place_country_code', 'place_country', 'place_contained_within'
, 'place_attributes', 'place_bounding_box', 'source_url', 'truncated', 'entities', 'extended_entities']
, axis=1, inplace=True)
# Now, we have a total of five (5) columns remaining as follows:
# handle -- twitter account (HillaryClinton and @realDonaldTrump)
# text -- the actual tweet
# time -- the date and time of posting
# retweet_count -- total times the tweet was retweeted
# favorite_count -- total times the tweet was tagged as favorite
# Create new columns (feature extract)
# actual date column
df['actual_date'] = df['time'].str[:10]
df['actual_date'] = | pd.to_datetime(df['actual_date'], format='%Y/%m/%d') | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
scripts to test the repair strategy
"""
__version__ = '1.0'
__author__ = '<NAME>'
import numpy as np
import pandas as pd
import time
import sys
sys.path.append(r'C:\RELAY')
from src.constraints import Constraints
from src.materials import Material
from src.parameters import Parameters
from src.ABD import A_from_lampam, B_from_lampam, D_from_lampam, filter_ABD
from src.excel import autofit_column_widths
from src.excel import delete_file
from src.save_set_up import save_constraints_LAYLA
from src.save_set_up import save_materials
from src.repair_diso_contig import repair_diso_contig
from src.repair_flexural import repair_flexural
from src.lampam_functions import calc_lampam
from src.repair_10_bal import repair_10_bal
from src.repair_10_bal import calc_mini_10
from src.repair_10_bal import is_equal
from src.repair_membrane import repair_membrane
from src.repair_membrane_1_no_ipo import calc_lampamA_ply_queue
from src.pretty_print import print_lampam, print_ss
#==============================================================================
# Input file
#==============================================================================
guidelines = 'none'
n_plies = 150
fibre_angles = 'trad'
fibre_angles = '3060'
fibre_angles = '15'
file_to_open = '/RELAY/pop/'\
+ fibre_angles + '-' + guidelines + '-' + str(n_plies) + 'plies.xlsx'
result_filename = 'repair-' + fibre_angles + '-' + guidelines \
+ '-' + str(n_plies) + 'plies.xlsx'
delete_file(result_filename)
#==============================================================================
# Material properties
#==============================================================================
data = pd.read_excel(
file_to_open, sheet_name='Materials', index_col=0, header=None)
data = data.transpose()
E11 = data.loc[1, 'E11']
E22 = data.loc[1, 'E22']
nu12 = data.loc[1, 'nu12']
G12 = data.loc[1, 'G12']
ply_t = data.loc[1, 'ply thickness']
mat = Material(E11=E11, E22=E22, G12=G12, nu12=nu12, ply_t=ply_t)
#print(data)
#==============================================================================
# Design & manufacturing constraints
#==============================================================================
data = pd.read_excel(
file_to_open, sheet_name='Constraints', index_col=0, header=None)
data = data.transpose()
#print(data)
sym = data.loc[1, 'symmetry']
bal = True
ipo = True
oopo = data.loc[1, 'out-of-plane orthotropy']
dam_tol = data.loc[1, 'damage tolerance']
rule_10_percent = True
percent_0 = 10
percent_90 = 10
percent_45 = 0
percent_135 = 0
percent_45_135 = 10
diso = True
delta_angle = 45
contig = True
n_contig = 4
set_of_angles = np.array(data.loc[1, 'fibre orientations'].split()).astype(int)
constraints = Constraints(
sym=sym,
bal=bal,
ipo=ipo,
oopo=oopo,
dam_tol=dam_tol,
rule_10_percent=rule_10_percent,
percent_0=percent_0,
percent_45=percent_45,
percent_90=percent_90,
percent_135=percent_135,
percent_45_135=percent_45_135,
diso=diso,
contig=contig,
n_contig=n_contig,
delta_angle=delta_angle,
set_of_angles=set_of_angles)
#==============================================================================
# Parameters
#==============================================================================
# lamination parameter weightings during membrane property refinement
in_plane_coeffs = np.array([1, 1, 0, 0])
# percentage of laminate thickness for plies that can be modified during
# the refinement of membrane properties
p_A = 80
# number of plies in the last permutation during repair for disorientation
# and/or contiguity
n_D1 = 6
# number of ply shifts tested at each step of the re-designing process during
# refinement of flexural properties
n_D2 = 10
# number of times the algorithms 1 and 2 are repeated during the flexural
# property refinement
n_D3 = 2
# lamination parameter weightings during flexural property refinement
out_of_plane_coeffs = np.array([1, 1, 1, 0])
table_param = pd.DataFrame()
table_param.loc[0, 'in_plane_coeffs'] \
= ' '.join(np.array(in_plane_coeffs, dtype=str))
table_param.loc[0, 'out_of_plane_coeffs'] \
= ' '.join(np.array(out_of_plane_coeffs, dtype=str))
table_param.loc[0, 'p_A'] = p_A
table_param.loc[0, 'n_D1'] = n_D1
table_param.loc[0, 'n_D2'] = n_D2
table_param.loc[0, 'n_D3'] = n_D3
table_param = table_param.transpose()
parameters = Parameters(
constraints=constraints,
p_A=p_A,
n_D1=n_D1,
n_D2=n_D2,
n_D3=n_D3,
repair_membrane_switch=True,
repair_flexural_switch=True)
#==============================================================================
# Tests
#==============================================================================
table_10_bal = pd.DataFrame()
table_membrane = pd.DataFrame()
table_diso_contig = | pd.DataFrame() | pandas.DataFrame |
#!/bin/env python
# -*coding: UTF-8 -*-
"""
Argo data fetcher for a local copy of GDAC ftp.
This is not intended to be used directly, only by the facade at fetchers.py
Since the GDAC ftp is organised by DAC/WMO folders, we start by implementing the 'float' and 'profile' entry points.
About the index local ftp fetcher:
We have a large index csv file "ar_index_global_prof.txt", that is about ~200Mb
For a given request, we need to load/read it
and then to apply a filter to select lines matching the request
With the current version, a dataframe of the full index is cached
and then another cached file is created for the result of the filter.
df_full = pd.read_csv("index.txt")
df_small = filter(df_full)
write_on_file(df_small)
I think we can avoid this with a virtual file system
When a request is done, we
"""
import os
from glob import glob
import numpy as np
import pandas as pd
from abc import abstractmethod
import warnings
import getpass
from .proto import ArgoDataFetcherProto
from argopy.errors import NetCDF4FileNotFoundError
from argopy.utilities import list_standard_variables, check_localftp, format_oneline, is_box
from argopy.options import OPTIONS
from argopy.stores import filestore, indexstore, indexfilter_box
from argopy.plotters import open_dashboard
access_points = ['wmo', 'box']
exit_formats = ['xarray']
dataset_ids = ['phy', 'bgc'] # First is default
api_server_check = OPTIONS['local_ftp']
class LocalFTPArgoDataFetcher(ArgoDataFetcherProto):
""" Manage access to Argo data from a local copy of GDAC ftp """
###
# Methods to be customised for a specific request
###
@abstractmethod
def init(self, *args, **kwargs):
""" Initialisation for a specific fetcher """
pass
# @abstractmethod
# def list_argo_files(self, errors: str = 'raise'):
# """ Set the internal list of absolute path of all files to load
# This function must define the attribute: self._list_of_argo_files with a list of path(s)
#
# Parameters
# ----------
# errors: {'raise','ignore'}, optional
# If 'raise' (default), raises a NetCDF4FileNotFoundError error if any of the requested
# files cannot be found. If 'ignore', file not found is skipped when fetching data.
# """
# pass
###
# Methods that must not change
###
def __init__(self,
local_ftp: str = "",
ds: str = "",
cache: bool = False,
cachedir: str = "",
dimension: str = 'point',
errors: str = 'raise',
parallel: bool = False,
parallel_method: str = 'thread',
progress: bool = False,
chunks: str = 'auto',
chunks_maxsize: dict = {},
**kwargs):
""" Init fetcher
Parameters
----------
local_ftp: str (optional)
Path to the local directory where the 'dac' folder is located.
ds: str (optional)
Dataset to load: 'phy' or 'ref' or 'bgc'
errors: str (optional)
If set to 'raise' (default), will raise a NetCDF4FileNotFoundError error if any of the requested
files cannot be found. If set to 'ignore', the file not found is skipped when fetching data.
cache: bool (optional)
Cache data or not (default: False)
cachedir: str (optional)
Path to cache folder
dimension: str
Main dimension of the output dataset. This can be "profile" to retrieve a collection of
profiles, or "point" (default) to have data as a collection of measurements.
This can be used to optimise performances.
parallel: bool (optional)
Chunk request to use parallel fetching (default: False)
parallel_method: str (optional)
Define the parallelization method: ``thread``, ``process`` or a :class:`dask.distributed.client.Client`.
progress: bool (optional)
Show a progress bar or not when fetching data.
chunks: 'auto' or dict of integers (optional)
Dictionary with request access point as keys and number of chunks to create as values.
Eg:
- ``{'wmo': 10}`` will create a maximum of 10 chunks along WMOs when used with ``Fetch_wmo``.
- ``{'lon': 2}`` will create a maximum of 2 chunks along longitude when used with ``Fetch_box``.
chunks_maxsize: dict (optional)
Dictionary with request access point as keys and chunk size as values (used as maximum values in
'auto' chunking).
Eg: ``{'wmo': 5}`` will create chunks with as many as 5 WMOs each.
"""
self.cache = cache
self.cachedir = cachedir
self.fs = filestore(cache=self.cache, cachedir=self.cachedir)
self.errors = errors
if not isinstance(parallel, bool):
# The parallelization method is passed through the argument 'parallel':
parallel_method = parallel
if parallel in ['thread', 'process']:
parallel = True
if parallel_method not in ["thread", "process"]:
raise ValueError("localftp only support multi-threading and processing ('%s' unknown)" % parallel_method)
self.parallel = parallel
self.parallel_method = parallel_method
self.progress = progress
self.chunks = chunks
self.chunks_maxsize = chunks_maxsize
self.definition = 'Local ftp Argo data fetcher'
self.dataset_id = OPTIONS['dataset'] if ds == '' else ds
self.local_ftp = OPTIONS['local_ftp'] if local_ftp == '' else local_ftp
check_localftp(self.local_ftp, errors='raise') # Validate local_ftp
self.init(**kwargs)
def __repr__(self):
summary = ["<datafetcher.localftp>"]
summary.append("Name: %s" % self.definition)
summary.append("FTP: %s" % self.local_ftp)
summary.append("Domain: %s" % format_oneline(self.cname()))
return '\n'.join(summary)
def cname(self):
""" Return a unique string defining the constraints """
return self._cname()
def get_path(self, wmo: int, cyc: int = None) -> str:
""" Return the absolute path toward the netcdf source file of a given wmo/cyc pair and a dataset
Based on the dataset, the wmo and the cycle requested, return the absolute path toward the file to load.
The file is searched using its expected file name pattern (following GDAC conventions).
If more than one file are found to match the pattern, the first 1 (alphabeticaly) is returned.
If no files match the pattern, the function can raise an error or fail silently and return None.
Parameters
----------
wmo: int
WMO float code
cyc: int, optional
Cycle number (None by default)
Returns
-------
netcdf_file_path : str
"""
# This function will be used whatever the access point, since we are working with a GDAC like set of files
def _filepathpattern(wmo, cyc=None):
""" Return a file path pattern to scan for a given wmo/cyc pair
Based on the dataset and the cycle number requested, construct the closest file path pattern to be loaded
This path is absolute, the pattern can contain '*', and it is the file path, so it has '.nc' extension
Returns
-------
file_path_pattern : str
"""
if cyc is None:
# Multi-profile file:
# dac/<DacName>/<FloatWmoID>/<FloatWmoID>_<S>prof.nc
if self.dataset_id == 'phy':
return os.path.sep.join([self.local_ftp, "dac", "*", str(wmo), "%i_prof.nc" % wmo])
elif self.dataset_id == 'bgc':
return os.path.sep.join([self.local_ftp, "dac", "*", str(wmo), "%i_Sprof.nc" % wmo])
else:
# Single profile file:
# dac/<DacName>/<FloatWmoID>/profiles/<B/M/S><R/D><FloatWmoID>_<XXX><D>.nc
if cyc < 1000:
return os.path.sep.join([self.local_ftp, "dac", "*", str(wmo), "profiles", "*%i_%0.3d*.nc" % (wmo, cyc)])
else:
return os.path.sep.join([self.local_ftp, "dac", "*", str(wmo), "profiles", "*%i_%0.4d*.nc" % (wmo, cyc)])
pattern = _filepathpattern(wmo, cyc)
lst = sorted(glob(pattern))
# lst = sorted(self.fs.glob(pattern)) # Much slower than the regular glob !
if len(lst) == 1:
return lst[0]
elif len(lst) == 0:
if self.errors == 'raise':
raise NetCDF4FileNotFoundError(pattern)
else:
# Otherwise remain silent/ignore
# todo: should raise a warning instead ?
return None
else:
# warnings.warn("More than one file to load for a single float cycle ! Return the 1st one by default.")
# The choice of the file to load depends on the user mode and dataset requested.
# todo: define a robust choice
if self.dataset_id == 'phy':
if cyc is None:
# Use the synthetic profile:
lst = [file for file in lst if
[file for file in [os.path.split(w)[-1] for w in lst] if file[0] == 'S'][0] in file]
else:
# Use the ascent profile:
lst = [file for file in lst if
[file for file in [os.path.split(w)[-1] for w in lst] if file[-1] != 'D'][0] in file]
elif self.dataset_id == 'bgc':
lst = [file for file in lst if
[file for file in [os.path.split(w)[-1] for w in lst] if file[0] == 'M'][0] in file]
return lst[0]
@property
def uri(self):
""" Return the list of files to load
Returns
-------
list(str)
"""
pass
@property
def cachepath(self):
""" Return path to cache file(s) for this request
Returns
-------
list(str)
"""
return [self.fs.cachepath(url) for url in self.uri]
def _preprocess_multiprof(self, ds):
""" Pre-process one Argo multi-profile file as a collection of points
Parameters
----------
ds: :class:`xarray.Dataset`
Dataset to process
Returns
-------
:class:`xarray.Dataset`
"""
# Replace JULD and JULD_QC by TIME and TIME_QC
ds = ds.rename({'JULD': 'TIME', 'JULD_QC': 'TIME_QC', 'JULD_LOCATION': 'TIME_LOCATION'})
ds['TIME'].attrs = {'long_name': 'Datetime (UTC) of the station',
'standard_name': 'time'}
# Cast data types:
ds = ds.argo.cast_types()
# Enforce real pressure resolution: 0.1 db
for vname in ds.data_vars:
if 'PRES' in vname and 'QC' not in vname:
ds[vname].values = np.round(ds[vname].values, 1)
# Remove variables without dimensions:
# todo: We should be able to find a way to keep them somewhere in the data structure
for v in ds.data_vars:
if len(list(ds[v].dims)) == 0:
ds = ds.drop_vars(v)
# print("DIRECTION", np.unique(ds['DIRECTION']))
# print("N_PROF", np.unique(ds['N_PROF']))
ds = ds.argo.profile2point() # Default output is a collection of points along N_POINTS
# print("DIRECTION", np.unique(ds['DIRECTION']))
# Remove netcdf file attributes and replace them with argopy ones:
ds.attrs = {}
if self.dataset_id == 'phy':
ds.attrs['DATA_ID'] = 'ARGO'
if self.dataset_id == 'bgc':
ds.attrs['DATA_ID'] = 'ARGO-BGC'
ds.attrs['DOI'] = 'http://doi.org/10.17882/42182'
ds.attrs['Fetched_from'] = self.local_ftp
ds.attrs['Fetched_by'] = getpass.getuser()
ds.attrs['Fetched_date'] = pd.to_datetime('now').strftime('%Y/%m/%d')
ds.attrs['Fetched_constraints'] = self.cname()
ds.attrs['Fetched_uri'] = ds.encoding['source']
ds = ds[np.sort(ds.data_vars)]
return ds
def to_xarray(self, errors: str = 'ignore'):
""" Load Argo data and return a xarray.Dataset
Returns
-------
:class:`xarray.Dataset`
"""
# Download data:
if not self.parallel:
method = 'sequential'
else:
method = self.parallel_method
# ds = self.fs.open_mfdataset(self.uri,
# method=method,
# concat_dim='N_POINTS',
# concat=True,
# preprocess=self._preprocess_multiprof,
# progress=self.progress,
# errors=errors,
# decode_cf=1, use_cftime=0, mask_and_scale=1, engine='h5netcdf')
ds = self.fs.open_mfdataset(self.uri,
method=method,
concat_dim='N_POINTS',
concat=True,
preprocess=self._preprocess_multiprof,
progress=self.progress,
errors=errors,
decode_cf=1, use_cftime=0, mask_and_scale=1)
# Data post-processing:
ds['N_POINTS'] = np.arange(0, len(ds['N_POINTS'])) # Re-index to avoid duplicate values
ds = ds.set_coords('N_POINTS')
ds = ds.sortby('TIME')
# Remove netcdf file attributes and replace them with simplified argopy ones:
ds.attrs = {}
if self.dataset_id == 'phy':
ds.attrs['DATA_ID'] = 'ARGO'
if self.dataset_id == 'bgc':
ds.attrs['DATA_ID'] = 'ARGO-BGC'
ds.attrs['DOI'] = 'http://doi.org/10.17882/42182'
ds.attrs['Fetched_from'] = self.local_ftp
ds.attrs['Fetched_by'] = getpass.getuser()
ds.attrs['Fetched_date'] = | pd.to_datetime('now') | pandas.to_datetime |
from __future__ import division
import pandas as pd
import numpy as np
from functools import wraps, partial
import re
import os
from glob import glob
from math import ceil
from toolz import merge, dissoc
from itertools import count
from operator import getitem
from ..compatibility import BytesIO, unicode, range, apply
from ..utils import textblock, file_size
from .. import array as da
from . import core
from .core import (DataFrame, Series, compute, concat, categorize_block,
tokens, get)
from .shuffle import set_partition
csv_defaults = {'compression': None}
def fill_kwargs(fn, args, kwargs):
""" Read a csv file and fill up kwargs
This normalizes kwargs against a sample file. It does the following:
1. If given a globstring, just use one file
2. Get names from csv file if not given
3. Identify the presence of a header
4. Identify dtypes
5. Establish column names
6. Switch around dtypes and column names if parse_dates is active
Normally ``pd.read_csv`` does this for us. However for ``dd.read_csv`` we
need to be consistent across multiple files and don't want to do these
heuristics each time so we use the pandas solution once, record the
results, and then send back a fully explicit kwargs dict to send to future
calls to ``pd.read_csv``.
Returns
-------
kwargs: dict
keyword arguments to give to pd.read_csv
"""
kwargs = merge(csv_defaults, kwargs)
sample_nrows = kwargs.pop('sample_nrows', 1000)
essentials = ['columns', 'names', 'header', 'parse_dates', 'dtype']
if set(essentials).issubset(kwargs):
return kwargs
# Let pandas infer on the first 100 rows
if '*' in fn:
fn = sorted(glob(fn))[0]
if 'names' not in kwargs:
kwargs['names'] = csv_names(fn, **kwargs)
if 'header' not in kwargs:
kwargs['header'] = infer_header(fn, **kwargs)
if kwargs['header'] is True:
kwargs['header'] = 0
try:
head = pd.read_csv(fn, *args, nrows=sample_nrows, **kwargs)
except StopIteration:
head = pd.read_csv(fn, *args, **kwargs)
if 'parse_dates' not in kwargs:
kwargs['parse_dates'] = [col for col in head.dtypes.index
if np.issubdtype(head.dtypes[col], np.datetime64)]
if 'dtype' not in kwargs:
kwargs['dtype'] = dict(head.dtypes)
for col in kwargs['parse_dates']:
del kwargs['dtype'][col]
kwargs['columns'] = list(head.columns)
return kwargs
@wraps(pd.read_csv)
def read_csv(fn, *args, **kwargs):
chunkbytes = kwargs.pop('chunkbytes', 2**25) # 50 MB
categorize = kwargs.pop('categorize', None)
index = kwargs.pop('index', None)
if index and categorize == None:
categorize = True
kwargs = fill_kwargs(fn, args, kwargs)
# Handle glob strings
if '*' in fn:
return concat([read_csv(f, *args, **kwargs) for f in sorted(glob(fn))])
columns = kwargs.pop('columns')
# Chunk sizes and numbers
total_bytes = file_size(fn, kwargs['compression'])
nchunks = int(ceil(total_bytes / chunkbytes))
divisions = [None] * (nchunks + 1)
header = kwargs.pop('header')
first_read_csv = partial(pd.read_csv, *args, header=header,
**dissoc(kwargs, 'compression'))
rest_read_csv = partial(pd.read_csv, *args, header=None,
**dissoc(kwargs, 'compression'))
# Create dask graph
name = 'read-csv' + next(tokens)
dsk = dict(((name, i), (rest_read_csv, (BytesIO,
(textblock, fn,
i*chunkbytes, (i+1) * chunkbytes,
kwargs['compression']))))
for i in range(1, nchunks))
dsk[(name, 0)] = (first_read_csv, (BytesIO,
(textblock, fn, 0, chunkbytes, kwargs['compression'])))
result = DataFrame(dsk, name, columns, divisions)
if categorize or index:
categories, quantiles = categories_and_quantiles(fn, args, kwargs,
index, categorize,
chunkbytes=chunkbytes)
if categorize:
func = partial(categorize_block, categories=categories)
result = result.map_partitions(func, columns=columns)
if index:
result = set_partition(result, index, quantiles)
return result
def infer_header(fn, encoding='utf-8', compression=None, **kwargs):
""" Guess if csv file has a header or not
This uses Pandas to read a sample of the file, then looks at the column
names to see if they are all word-like.
Returns True or False
"""
# See read_csv docs for header for reasoning
try:
df = pd.read_csv(fn, encoding=encoding, compression=compression,nrows=5)
except StopIteration:
df = pd.read_csv(fn, encoding=encoding, compression=compression)
return (len(df) > 0 and
all(re.match('^\s*\D\w*\s*$', n) for n in df.columns) and
not all(dt == 'O' for dt in df.dtypes))
def csv_names(fn, encoding='utf-8', compression=None, names=None,
parse_dates=None, usecols=None, dtype=None, **kwargs):
try:
df = pd.read_csv(fn, encoding=encoding, compression=compression,
names=names, parse_dates=parse_dates, nrows=5, **kwargs)
except StopIteration:
df = pd.read_csv(fn, encoding=encoding, compression=compression,
names=names, parse_dates=parse_dates, **kwargs)
return list(df.columns)
def categories_and_quantiles(fn, args, kwargs, index=None, categorize=None,
chunkbytes=2**26):
"""
Categories of Object columns and quantiles of index column for CSV
Computes both of the following in a single pass
1. The categories for all object dtype columns
2. The quantiles of the index column
Parameters
----------
fn: string
Filename of csv file
args: tuple
arguments to be passed in to read_csv function
kwargs: dict
keyword arguments to pass in to read_csv function
index: string or None
Name of column on which to compute quantiles
categorize: bool
Whether or not to compute categories of Object dtype columns
"""
kwargs = kwargs.copy()
compression = kwargs.get('compression', None)
total_bytes = file_size(fn, compression)
nchunks = int(ceil(total_bytes / chunkbytes))
if infer_header(fn, **kwargs):
kwargs['header'] = 0
one_chunk = pd.read_csv(fn, *args, nrows=100, **kwargs)
if categorize is not False:
category_columns = [c for c in one_chunk.dtypes.index
if one_chunk.dtypes[c] == 'O'
and c not in kwargs.get('parse_dates', ())]
else:
category_columns = []
cols = category_columns + ([index] if index else [])
dtypes = dict((c, one_chunk.dtypes[c]) for c in cols)
d = read_csv(fn, *args, **merge(kwargs,
dict(usecols=cols,
parse_dates=None,
dtype=dtypes)))
categories = [d[c].drop_duplicates() for c in category_columns]
import dask
if index:
quantiles = d[index].quantiles(np.linspace(0, 100, nchunks + 1))
result = compute(quantiles, *categories)
quantiles, categories = result[0].values, result[1:]
else:
categories = compute(*categories)
quantiles = None
categories = dict(zip(category_columns, categories))
return categories, quantiles
def from_array(x, chunksize=50000, columns=None):
""" Read dask Dataframe from any slicable array
Uses getitem syntax to pull slices out of the array. The array need not be
a NumPy array but must support slicing syntax
x[50000:100000]
and have 2 dimensions:
x.ndim == 2
or have a record dtype:
x.dtype == [('name', 'O'), ('balance', 'i8')]
"""
has_record_dtype = getattr(x.dtype, 'names', None) is not None
if x.ndim > 2:
raise ValueError('from_array does not input more than 2D array, got'
' array with shape %r' % (x.shape,))
if columns is None:
if has_record_dtype:
columns = tuple(x.dtype.names) # record array has named columns
elif x.ndim == 2:
columns = [str(i) for i in range(x.shape[1])]
if isinstance(x, da.Array):
return from_dask_array(x, columns=columns)
divisions = tuple(range(0, len(x), chunksize))
if divisions[-1] != len(x) - 1:
divisions = divisions + (len(x) - 1,)
name = 'from_array' + next(tokens)
dsk = dict(((name, i), (pd.DataFrame,
(getitem, x,
slice(i * chunksize, (i + 1) * chunksize))))
for i in range(0, int(ceil(len(x) / chunksize))))
return DataFrame(dsk, name, columns, divisions)
def from_pandas(data, npartitions):
"""Construct a dask object from a pandas object.
If given a ``pandas.Series`` a ``dask.Series`` will be returned. If given a
``pandas.DataFrame`` a ``dask.DataFrame`` will be returned. All other
pandas objects will raise a ``TypeError``.
Parameters
----------
df : pandas.DataFrame or pandas.Series
The DataFrame/Series with which to construct a dask DataFrame/Series
npartitions : int
The number of partitions of the index to create
Returns
-------
dask.DataFrame or dask.Series
A dask DataFrame/Series partitioned along the index
Examples
--------
>>> df = pd.DataFrame(dict(a=list('aabbcc'), b=list(range(6))),
... index=pd.date_range(start='20100101', periods=6))
>>> ddf = from_pandas(df, npartitions=3)
>>> ddf.divisions # doctest: +NORMALIZE_WHITESPACE
(Timestamp('2010-01-01 00:00:00', offset='D'),
Timestamp('2010-01-03 00:00:00', offset='D'),
Timestamp('2010-01-05 00:00:00', offset='D'),
Timestamp('2010-01-06 00:00:00', offset='D'))
>>> ddf = from_pandas(df.a, npartitions=3) # Works with Series too!
>>> ddf.divisions # doctest: +NORMALIZE_WHITESPACE
(Timestamp('2010-01-01 00:00:00', offset='D'),
Timestamp('2010-01-03 00:00:00', offset='D'),
Timestamp('2010-01-05 00:00:00', offset='D'),
Timestamp('2010-01-06 00:00:00', offset='D'))
Raises
------
TypeError
If something other than a ``pandas.DataFrame`` or ``pandas.Series`` is
passed in.
See Also
--------
from_array : Construct a dask.DataFrame from an array that has record dtype
from_bcolz : Construct a dask.DataFrame from a bcolz ctable
read_csv : Construct a dask.DataFrame from a CSV file
"""
columns = getattr(data, 'columns', getattr(data, 'name', None))
if columns is None and not isinstance(data, pd.Series):
raise TypeError("Input must be a pandas DataFrame or Series")
nrows = len(data)
chunksize = int(ceil(nrows / npartitions))
data = data.sort_index(ascending=True)
divisions = tuple(data.index[i]
for i in range(0, nrows, chunksize))
divisions = divisions + (data.index[-1],)
name = 'from_pandas' + next(tokens)
dsk = dict(((name, i), data.iloc[i * chunksize:(i + 1) * chunksize])
for i in range(npartitions - 1))
dsk[(name, npartitions - 1)] = data.iloc[chunksize*(npartitions - 1):]
return getattr(core, type(data).__name__)(dsk, name, columns, divisions)
def from_bcolz(x, chunksize=None, categorize=True, index=None, **kwargs):
""" Read dask Dataframe from bcolz.ctable
Parameters
----------
x : bcolz.ctable
Input data
chunksize : int (optional)
The size of blocks to pull out from ctable. Ideally as large as can
comfortably fit in memory
categorize : bool (defaults to True)
Automatically categorize all string dtypes
index : string (optional)
Column to make the index
See Also
--------
from_array: more generic function not optimized for bcolz
"""
import dask.array as da
import bcolz
if isinstance(x, (str, unicode)):
x = bcolz.ctable(rootdir=x)
bc_chunklen = max(x[name].chunklen for name in x.names)
if chunksize is None and bc_chunklen > 10000:
chunksize = bc_chunklen
categories = dict()
if categorize:
for name in x.names:
if (np.issubdtype(x.dtype[name], np.string_) or
np.issubdtype(x.dtype[name], np.unicode_) or
np.issubdtype(x.dtype[name], np.object_)):
a = da.from_array(x[name], chunks=(chunksize * len(x.names),))
categories[name] = da.unique(a)
columns = tuple(x.dtype.names)
divisions = (0,) + tuple(range(-1, len(x), chunksize))[1:]
if divisions[-1] != len(x) - 1:
divisions = divisions + (len(x) - 1,)
new_name = 'from_bcolz' + next(tokens)
dsk = dict(((new_name, i),
(dataframe_from_ctable,
x,
(slice(i * chunksize, (i + 1) * chunksize),),
None, categories))
for i in range(0, int(ceil(len(x) / chunksize))))
result = DataFrame(dsk, new_name, columns, divisions)
if index:
assert index in x.names
a = da.from_array(x[index], chunks=(chunksize * len(x.names),))
q = np.linspace(0, 100, len(x) // chunksize + 2)
divisions = da.percentile(a, q).compute()
return set_partition(result, index, divisions, **kwargs)
else:
return result
def dataframe_from_ctable(x, slc, columns=None, categories=None):
""" Get DataFrame from bcolz.ctable
Parameters
----------
x: bcolz.ctable
slc: slice
columns: list of column names or None
>>> import bcolz
>>> x = bcolz.ctable([[1, 2, 3, 4], [10, 20, 30, 40]], names=['a', 'b'])
>>> dataframe_from_ctable(x, slice(1, 3))
a b
0 2 20
1 3 30
>>> dataframe_from_ctable(x, slice(1, 3), columns=['b'])
b
0 20
1 30
>>> dataframe_from_ctable(x, slice(1, 3), columns='b')
0 20
1 30
Name: b, dtype: int64
"""
import bcolz
if columns is not None:
if isinstance(columns, tuple):
columns = list(columns)
x = x[columns]
name = 'from-bcolz' + next(tokens)
if isinstance(x, bcolz.ctable):
chunks = [x[name][slc] for name in x.names]
if categories is not None:
chunks = [pd.Categorical.from_codes(np.searchsorted(categories[name],
chunk),
categories[name], True)
if name in categories else chunk
for name, chunk in zip(x.names, chunks)]
return pd.DataFrame(dict(zip(x.names, chunks)))
elif isinstance(x, bcolz.carray):
chunk = x[slc]
if categories is not None and columns and columns in categories:
chunk = pd.Categorical.from_codes(
np.searchsorted(categories[columns], chunk),
categories[columns], True)
return | pd.Series(chunk, name=columns) | pandas.Series |
import argparse, joblib
import numpy as np
import pandas as pd
# import extra_funcs
from sklearn.ensemble import RandomForestClassifier
from xgboost import XGBClassifier
from lightgbm import LGBMClassifier
from pandarallel import pandarallel
from sgt import SGT
# set up argument parsing (make sure these match those in config.yml)
parser = argparse.ArgumentParser()
parser.add_argument("--infile", type=str, required=True)
args = parser.parse_args()
# READ DATA
data = pd.read_csv(args.infile)
# data = pd.read_csv("../submission/input.csv")
# embed both protein sequences respectively
sgt_ = SGT(kappa=5,
lengthsensitive=False,
mode='multiprocessing')
ids = ["ab_pair_{}".format(i) for i in range(data.shape[0])]
data["id"] = ids
data["Hchain"] = data["Hchain"].map(list)
data["Lchain"] = data["Hchain"].map(list)
heavy_embedding = sgt_.fit_transform(data[["id", "Hchain"]].rename(columns={"Hchain":"sequence"}))
light_embedding = sgt_.fit_transform(data[["id", "Lchain"]].rename(columns={"Lchain":"sequence"}))
#input transformed shoul have 800 cols containing 400 heavy and 400 light chains.
input_transformed = pd.concat([heavy_embedding.set_index("id"), light_embedding.set_index("id")], axis=1)
# PREDICT
modelfile = 'src/finalized_model1.sav'
loaded_model = joblib.load(modelfile)
y_pred = loaded_model.predict_proba(input_transformed)
# SAVE PREDICTIONS WITH THE COLUMN NAME prediction IN THE FILE predictions.csv
| pd.DataFrame(y_pred[:, 1], columns=['prediction']) | pandas.DataFrame |
import shapely
import pandas as pd
import networkx as nx
from tqdm import tqdm
from syspy.spatial import polygons, spatial
from syspy.syspy_utils import neighbors, syscolors
def merge_zonings(background, foreground, min_area_factor=0.01, min_area=None):
back = background.copy()
front = foreground.copy()
stencil = shapely.geometry.MultiPolygon(
list(front['geometry'])
).buffer(1e-9)
back['geometry'] = back['geometry'].apply(lambda g: g.difference(stencil))
back['geometry'] = polygons.biggest_polygons(list(back['geometry']))
back['area'] = [g.area for g in back['geometry']]
min_area = min_area if min_area else back['area'].mean() * min_area_factor
back = back.loc[back['area'] > min_area]
back['id'] = back.index
front['id'] = front.index
back['zoning'] = 'back'
front['zoning'] = 'front'
columns = ['zoning', 'id', 'geometry']
concatenated = pd.concat(
[back[columns], front[columns]]
)
df = concatenated
zones = list(df['geometry'])
clean_zones = polygons.clean_zoning(
zones,
buffer=1e-4,
fill_buffer=2e-3,
fill_gaps=False,
unite_gaps=True
)
df['geometry'] = clean_zones
return df.reset_index(drop=True)
def pool_and_geometries(pool, geometries):
done = []
while len(pool):
# start another snail
done.append(pool[0])
current = geometries[pool[0]]
pool = [p for p in pool if p not in done]
for i in range(len(pool)):
for p in pool:
if geometries[p].intersects(current):
done.append(p)
current = geometries[p]
pool = [p for p in pool if p not in done]
break
return done
def snail_number(zones, center):
distance_series = zones['geometry'].apply(lambda g: center.distance(g))
distance_series.name = 'cluster_distance'
distance_series.sort_values(inplace=True)
geometries = zones['geometry'].to_dict()
pool = list(distance_series.index)
done = pool_and_geometries(pool, geometries)
snail = pd.Series(done)
snail.index.name = 'cluster_snail'
snail.name = 'cluster'
indexed = snail.reset_index().set_index('cluster')['cluster_snail']
return indexed.loc[zones.index] # we use zones.index to sort the result
def cluster_snail_number(zones, n_clusters=20, buffer=1e-6):
# zones can be a series or a list
zones = pd.DataFrame(pd.Series(zones))
df = zones.copy()
# we want the geometries to intersect each other
# the total area of the zoning
union = shapely.geometry.MultiPolygon(
list(df['geometry'])
).buffer(buffer)
center = union.centroid
clusters, cluster_series = spatial.zone_clusters(df, n_clusters=n_clusters)
df['cluster'] = cluster_series
distance_series = clusters['geometry'].apply(lambda g: center.distance(g))
distance_series.name = 'cluster_distance'
distance_series.sort_values(inplace=True)
geometries = clusters['geometry'].to_dict()
snail = snail_number(clusters, center)
clusters['snail'] = snail
df = pd.merge(df, snail.reset_index(), on='cluster')
df['distance'] = df['geometry'].apply(lambda g: center.distance(g))
sorted_df = df.sort_values(by=['cluster_snail', 'distance'])
to_concat = []
for cluster in set(df['cluster']):
proto = sorted_df.copy()
proto = proto.loc[proto['cluster'] == cluster]
geometries = proto['geometry'].apply(
lambda g: g.buffer(buffer)).to_dict()
pool = list(proto.index)
done = pool_and_geometries(pool, geometries)
snail = pd.Series(done)
snail.index.name = 'snail'
snail.name = 'original_index'
proto.index.name = 'original_index'
proto.reset_index(inplace=True)
proto = pd.merge(proto, snail.reset_index(), on='original_index')
to_concat.append(proto)
concat = pd.concat(to_concat)
df = concat.copy()
df.sort_values(by=['cluster_snail', 'snail'], inplace=True)
df.reset_index(inplace=True, drop=True)
df.reset_index(inplace=True, drop=False)
if True:
df.drop('geometry', inplace=True, axis=1)
df = pd.merge(
df,
sorted_df[['geometry']],
left_on='original_index',
right_index=True
)
#
df.set_index('original_index', inplace=True)
return df.loc[zones.index]
def greedy_color(zoning, colors=syscolors.rainbow_shades, buffer=1e-6):
zoning = zoning.copy()
zoning['geometry'] = zoning['geometry'].apply(lambda g: g.buffer(buffer))
# TODO change the edge construction to make it independant from neighbors
n = neighbors.neighborhood_dataframe(zoning)
edges = n[['origin', 'destination']].values
g = nx.Graph()
g.add_edges_from(edges)
d = nx.coloring.greedy_color(
g,
strategy=nx.coloring.strategy_largest_first
)
color_list = list(colors)
def index_to_color(index):
return color_list[index]
return pd.Series(d).apply(index_to_color)
########################################################################
def intersection_area(geoa, geob):
if geoa.intersects(geob):
intersection = geoa.intersection(geob)
return intersection.area
else:
return 0
def intersection_area_matrix(x_geometries, y_geometries):
array = []
for g in tqdm(x_geometries, desc=str(len(y_geometries))):
array.append(
[
intersection_area(y_geometry, g)
for y_geometry in y_geometries
]
)
return array
def intersection_area_dataframe(front, back):
front.index.name = 'front_index'
back.index.name = 'back_index'
ia_matrix = intersection_area_matrix(
list(front['geometry']),
list(back['geometry'])
)
df = | pd.DataFrame(ia_matrix) | pandas.DataFrame |
import sys
sys.path.append("../../dies/")
sys.path.append("../dies/")
import copy
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from dies.data import (
combine_datasets,
create_databunch,
ds_from_df,
split_by_date,
train_test_split_dataset,
train_test_split_dataset_by_n_weeks,
convert_data_to_recurrent_data,
DatasetRecurrentData,
scale_datasets,
)
from dies.utils import listify
from dies.utils_pytorch import dev_to_np, np_to_dev
def get_dataset(file, test_date="2016-01-01", park_id=None, num_samples_per_day=-1):
df = pd.read_csv(file, sep=";")
df.TimeUTC = pd.to_datetime(df.TimeUTC, infer_datetime_format=True, utc=True)
df.set_index("TimeUTC", inplace=True)
cat_cols = ["DayOfYear", "Hour"]
x_cols = [c for c in df.columns if c not in ["PowerGeneration"] + cat_cols]
y_cols = "PowerGeneration"
if park_id is not None:
df["ParkId"] = park_id
cat_cols += ["ParkId"]
if num_samples_per_day != -1:
df = create_consistent_number_of_sampler_per_day(
df, num_samples_per_day=num_samples_per_day
)
ds = ds_from_df(df, x_columns=x_cols, cat_columns=cat_cols, y_columns=y_cols,)
ds_tr, ds_te = split_by_date(ds, test_date)
return ds_tr, ds_te
def create_databunch_mtl_mlp(
files, batch_size=512, device="cpu", test_date="2016-01-01", dim=1, scale_data=False
):
ds_trs, ds_vals, ds_tes = [], [], []
for idx, f in enumerate(files):
ds_tr, ds_te = get_dataset(f, park_id=idx)
ds_tr, ds_val = train_test_split_dataset_by_n_weeks(ds_tr)
ds_trs.append(ds_tr)
ds_vals.append(ds_val)
ds_tes.append(ds_te)
ds_trs = combine_datasets(ds_trs, dim=0)
ds_vals = combine_datasets(ds_vals, dim=0)
ds_tes = combine_datasets(ds_tes, dim=0)
if scale_data:
scale_datasets(ds_trs, [ds_vals, ds_tes], scaler=MinMaxScaler())
data_bunch = create_databunch(
ds_trs, ds_vals, test_ds=ds_tes, batch_size=int(batch_size), device=device
)
return data_bunch
def create_databunch_mtl(
files, batch_size=512, device="cpu", test_date="2016-01-01", dim=1, scale_data=False
):
ds_trs, ds_tes = [], []
for f in files:
ds_tr, ds_te = get_dataset(f)
ds_trs.append(ds_tr)
ds_tes.append(ds_te)
ds_trs = combine_datasets(ds_trs, dim=1)
ds_tes = combine_datasets(ds_tes, dim=1)
ds_trs, ds_vals = train_test_split_dataset_by_n_weeks(ds_trs)
if scale_data:
scale_datasets(ds_trs, [ds_vals, ds_tes], scaler=MinMaxScaler())
data_bunch = create_databunch(
ds_trs, ds_vals, test_ds=ds_tes, batch_size=int(batch_size), device=device
)
return data_bunch
def create_databunch_recurrent_data(
file, config, device, timesteps=24, scale_data=False
):
ds_tr, ds_te = get_dataset(file, num_samples_per_day=timesteps)
ds_tr, ds_val = train_test_split_dataset_by_n_weeks(ds_tr)
if scale_data:
scale_datasets(ds_tr, [ds_val, ds_te], scaler=MinMaxScaler())
ds_tr, ds_val, ds_te = create_recurrent_ds(
[ds_tr, ds_val, ds_te], timesteps=timesteps
)
data_bunch = create_databunch(
ds_tr,
ds_val,
test_ds=ds_te,
batch_size=int(config["batch_size"]),
device=device,
)
return data_bunch
def create_recurrent_ds(datasets, timesteps=24):
datasets = listify(datasets)
new_datasets = []
for ds in datasets:
new_x = np_to_dev(
convert_data_to_recurrent_data(dev_to_np(ds.x), timesteps=timesteps)
)
new_y = convert_data_to_recurrent_data(dev_to_np(ds.y), timesteps=timesteps)
ds = DatasetRecurrentData(new_x, new_y)
new_datasets.append(ds)
return new_datasets
def create_consistent_number_of_sampler_per_day(df, num_samples_per_day=24):
mask = df.resample("D").apply(len).PowerGeneration
mask = (mask < num_samples_per_day) & (mask > 0)
for i in range(len(mask)):
if mask[i]:
new_day = mask.index[i] + | pd.DateOffset(days=1) | pandas.DateOffset |
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 18 15:04:50 2018
@authors: a.pakbin, <NAME>
"""
import numpy as np
from copy import copy
import pandas as pd
pd.set_option('mode.chained_assignment', None)
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import roc_auc_score
import random as rnd
from xgboost.sklearn import XGBClassifier
import sys
import os
import matplotlib.pyplot as plt
import re
def data_reader(data_address, file_name, non_attribute_column_names=None,label_column_name=None):
data=pd.read_csv(data_address+'/'+file_name)
if non_attribute_column_names:
columns_to_drop=list(set(non_attribute_column_names)-set([label_column_name]))
data=data.drop(columns_to_drop, axis=1)
return data
def matrix_partitioner(df, proportion, label=None):
number_of_ones=int(round(proportion*len(df)))
ones=np.ones(number_of_ones)
zeros=np.zeros(len(df)-number_of_ones)
ones_and_zeros=np.append(ones,zeros)
permuted=np.random.permutation(ones_and_zeros)
boolean_permuted=permuted>0
if label:
return [df[boolean_permuted].reset_index(),df[~boolean_permuted].reset_index(),label[boolean_permuted],label[~boolean_permuted]]
else:
return [df[boolean_permuted].reset_index(),df[~boolean_permuted].reset_index()]
def dataframe_partitioner(df, output_label, proportion):
y=df[output_label].values
X=df.drop([output_label], axis=1)
return matrix_partitioner(X,label=y,proportion=proportion)
def one_hot_detacher(X, categorical_column_names):
one_hot_column_names=list()
for categorical_column in categorical_column_names:
for column_name in X.columns:
if column_name.startswith(categorical_column):
one_hot_column_names.append(column_name)
one_hot=X[one_hot_column_names]
X.drop(one_hot_column_names, axis=1, inplace=True)
return [X, one_hot]
def one_hot_attacher(X, one_hot):
return X.join(one_hot)
def normalize(X, data_type, categorical_column_names, training_mean=None, training_std=None):
[X, one_hot]=one_hot_detacher(X, categorical_column_names)
if data_type=='train_set':
mean=np.mean(X,axis=0)
std=np.var(X, axis=0)
elif data_type=='test_set':
mean=training_mean
std=training_std
aux_std=copy(std)
aux_std[aux_std==0]=1
normalized=(X-mean)/aux_std
complete_normalized=one_hot_attacher(normalized, one_hot)
if data_type=='train_set':
return [complete_normalized, mean, std]
elif data_type=='test_set':
return complete_normalized
def train_test_normalizer(X_train, X_test, categorical_column_names):
[X_TRAIN_NORMALIZED, X_TRAIN_MEAN, X_TRAIN_STD]=normalize(X=X_train, data_type='train_set', categorical_column_names=categorical_column_names)
X_TEST_NORMALIZED=normalize(X=X_test, data_type='test_set', categorical_column_names=categorical_column_names, training_mean=X_TRAIN_MEAN, training_std=X_TRAIN_STD)
return [X_TRAIN_NORMALIZED, X_TEST_NORMALIZED]
def possible_values_finder(data, categorical_column_names):
column_dict = dict()
for categorical_column_name in categorical_column_names:
unique_vals = list(set([str(x) for x in data[categorical_column_name].unique()])-set(['nan','NaN','NAN','null']))
column_dict[categorical_column_name]=unique_vals
return column_dict
def one_hot_encoder(X, categorical_column_names, possible_values):
for categorical_column_name in categorical_column_names:
possible_values_ = possible_values[categorical_column_name]
new_vals = [categorical_column_name + '_' + str(s) for s in possible_values_]
dummies = pd.get_dummies(X[categorical_column_name], prefix=categorical_column_name)
dummies = dummies.T.reindex(new_vals).T.fillna(0)
X = X.drop([categorical_column_name], axis=1)
X = X.join(dummies)
return X
def train_test_one_hot_encoder(X_train, X_test, categorical_column_names, possible_values):
X_TRAIN=one_hot_encoder(X_train, categorical_column_names, possible_values)
X_TEST=one_hot_encoder(X_test, categorical_column_names, possible_values)
return [X_TRAIN, X_TEST]
def categorical_distribution_finder(X, categorical_column_names):
NAMES=list()
DISTS=list()
for categorical_column_name in categorical_column_names:
names=list()
nom_of_all=0
quantity=list()
grouped= X.groupby([categorical_column_name])
for category, group in grouped:
names.append(category)
quantity.append(len(group))
nom_of_all=nom_of_all+len(group)
distribution = [float(x) / nom_of_all for x in quantity]
NAMES.append(names)
DISTS.append(distribution)
return(NAMES, DISTS)
def categorical_imputer(X, categorical_column_names, data_type='train', names=None, distributions=None):
if data_type=='train':
[names, distributions]=categorical_distribution_finder(X, categorical_column_names)
for idx, categorical_column_name in enumerate(categorical_column_names):
for i in range(0, len(X)):
if pd.isnull(X[categorical_column_name].iloc[i]):
X[categorical_column_name].iloc[i]=np.random.choice(names[idx], p=distributions[idx])
if data_type=='train':
return [X, names, distributions]
elif data_type=='test':
return X
def numerical_imputer(X, training_mean=None):
if training_mean is None:
training_mean=X.mean()
imputed=X.fillna(training_mean)
return [imputed, training_mean]
else:
imputed=X.fillna(training_mean)
return imputed
#
# X_train and X_test are data-frames of MIMIC3 data with certain columns dropped
# - the numerical imputation is straightforward: any missing values are replaced
# with the mean value for that column
#
def train_test_imputer(X_train, X_test, categorical_column_names):
[X_TRAIN_CAT_IMPUTED, NAMES, DISTS]=categorical_imputer(X_train, categorical_column_names)
X_TEST_CAT_IMPUTED=categorical_imputer(X_test, categorical_column_names, 'test', NAMES, DISTS)
[X_TRAIN_IMPUTED, X_TRAIN_MEAN]=numerical_imputer(X_TRAIN_CAT_IMPUTED)
X_TEST_IMPUTED=numerical_imputer(X_TEST_CAT_IMPUTED, X_TRAIN_MEAN)
return [X_TRAIN_IMPUTED, X_TEST_IMPUTED]
def auc_calculator(model, X, y, num_of_folds):
auc_list=list()
skf=StratifiedKFold(n_splits=num_of_folds, shuffle=True, random_state=rnd.randint(1,1e6))
for train_index, test_index in skf.split(X,y):
X_train, X_test = X.iloc[train_index], X.iloc[test_index]
y_train, y_test = y[train_index], y[test_index]
model.fit(X_train, y_train)
predictions=model.predict_proba(X_test)[:,1]
try:
auc=roc_auc_score(y_true=y_test, y_score=predictions)
except ValueError:
print("Exception in roc_auc_score(): trying to ignore")
auc = 0
auc_list.append(auc)
return sum(auc_list)/len(auc_list)
def grid_search(X, y, num_of_folds, verbose, first_dim, second_dim=None, third_dim=None, return_auc_values=False):
best_auc=0
best_auc_setting=None
auc_matrix=np.zeros((len(first_dim),len(second_dim),len(third_dim)))
for max_depth_index, max_depth in enumerate(first_dim):
for n_estimator_index, n_estimator in enumerate(second_dim):
for learning_rate_index, learning_rate in enumerate(third_dim):
model=XGBClassifier(max_depth=int(max_depth), n_estimators=int(n_estimator), learning_rate=learning_rate)
auc=auc_calculator(model, X, y, num_of_folds)
auc_matrix[max_depth_index, n_estimator_index, learning_rate_index]=auc
if auc>best_auc:
best_auc=auc
best_auc_setting=[max_depth,n_estimator,learning_rate]
if verbose==True:
sys.stdout.write('\r GRID SEARCHING XGB: progress: {0:.3f} % ...'.format(
(max_depth_index*(len(second_dim)*len(third_dim))+
n_estimator_index*(len(third_dim))+
learning_rate_index
+1)/(len(first_dim)*len(second_dim)*len(third_dim))*100))
print ('\n')
if return_auc_values:
return [best_auc_setting,auc_matrix]
else:
return best_auc_setting
def vectors_to_csv(address, file_name, vector_one, label_one, vector_two=None, label_two=None,vector_three=None, label_three=None):
if vector_two is None:
df=pd.DataFrame(data={label_one:vector_one})
elif vector_three is None:
df=pd.DataFrame(data={label_one:vector_one, label_two:vector_two})
else:
df=pd.DataFrame(data={label_one:vector_one, label_two:vector_two, label_three:vector_three})
df.to_csv(address+'/'+file_name+'.csv')
def create_subfolder_if_not_existing(dir):
if not os.path.exists(dir):
os.makedirs(dir)
def save_roc_curve(data_address, TPR, FPR, auc):
plt.figure()
plt.title('Receiver Operating Characteristic')
plt.plot(FPR, TPR, 'b', label = 'AUC = %0.2f' % auc)
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
# plt.show()
plt.savefig(data_address)
plt.close()
def feature_importance_updator(accumulative_feature_importance, new_importance):
if accumulative_feature_importance is None:
return new_importance
else:
return accumulative_feature_importance+new_importance
def feature_importance_saver(address, col_names, accumulative_feature_importance, num_of_folds):
mean_feature_importances=accumulative_feature_importance/num_of_folds
DF=pd.DataFrame(data={'FEATURE': col_names, 'IMPORTANCE': mean_feature_importances})
DF.to_csv(address+'/'+'feature_importances.csv')
DF=DF.sort_values(by='IMPORTANCE', ascending=False).reset_index(drop=True)
DF.to_csv(address+'/'+'feature_importances_sorted.csv')
def first_matching_ICD9_finder(code, convertor_dict):
ones=range(0,10)
for one in ones:
try:
Matching_ICD9s_name=convertor_dict[10*code+one]
return Matching_ICD9s_name
except:
continue
return 'UNKNOWN'
def convert_ICD9_codes(features_list, conversion_tables_address):
ICD9Codes=pd.read_csv(conversion_tables_address+'/'+'D_ICD_PROCEDURES.csv.gz')
convertor_dict=dict(zip(ICD9Codes['ICD9_CODE'],ICD9Codes['LONG_TITLE']))
feature_names = ['ICD9_'+str(feature[5:])+'_'+ first_matching_ICD9_finder(int(feature[5:]), convertor_dict)
if feature.startswith('ICD9_')
else feature
for feature in features_list]
return feature_names
def convert_items_n_labitems(features_list, conversion_tables_address):
RE_INT = re.compile(r'^[-+]?([1-9]\d*|0)$')
df_D_ITEMS = pd.read_csv(conversion_tables_address+'/'+'D_ITEMS.csv.gz')
df_D_LABITEMS = pd.read_csv(conversion_tables_address+'/'+'D_LABITEMS.csv.gz')
df_items = pd.concat([df_D_ITEMS[['ITEMID','LABEL']], df_D_LABITEMS[['ITEMID','LABEL']]]).set_index('ITEMID')
feature_names = [df_items.loc[int(feature.split('_')[0])].LABEL+' ('+feature.split('_')[1] + ')'
if RE_INT.match(feature.split('_')[0])
else feature for feature in features_list ]
return feature_names
def convert_numbers_to_names(features_list, conversion_tables_address):
return convert_ICD9_codes(convert_items_n_labitems(features_list, conversion_tables_address), conversion_tables_address)
#
# Coarsens the ICD codes to a higher level
# by dropping the last code digit - but, it looks like there may be some
# issues with the original code as it treats the ICD codes as numbers rather
# than strings and so doesn't take into account the semantically meaningful
# leading and trailing zeros.
#
def ICD9_categorizer(X):
# Get a list of the ICD columns in input X
ICD9_COLUMN_NAMES=[col for col in X.columns if str(col).startswith('ICD9_')]
# Make a DF that holds the ICD codes only for input X (?)
ICD9_categorized=pd.DataFrame(index=range(0,len(X)), columns=['ICD9_'+str(x) for x in range(0,1000)]).fillna(0)
# For each ICD column name:
for ICD9_column_name in ICD9_COLUMN_NAMES:
# Discard the last digit in the code number by doing integer division by 10
index=int(int(ICD9_column_name[5:])/10)
FITTING_CATEGORY='ICD9_'+str(index)
ICD9_categorized[FITTING_CATEGORY]=ICD9_categorized[FITTING_CATEGORY]+X[ICD9_column_name]
X=X.drop(ICD9_COLUMN_NAMES, axis=1)
X=X.join(ICD9_categorized)
return X
def save_fold_data(writing_dir, fold_number, icustay_id_train, X_TRAIN_NORMALIZED, y_train, icustay_id_test, X_TEST_NORMALIZED, y_test, convert_names, conversion_tables_address=None):
ICUSTAY_ID_TRAIN=pd.DataFrame(data={'ICUSTAY_ID': icustay_id_train})
Y_TRAIN=pd.DataFrame(data={'LABEL': y_train})
X_TRAIN_NORMALIZED=X_TRAIN_NORMALIZED.reset_index().drop(['index'],axis=1)
TRAINING=pd.concat([ICUSTAY_ID_TRAIN, X_TRAIN_NORMALIZED, Y_TRAIN], axis=1)
ICUSTAY_ID_TEST=pd.DataFrame(data={'ICUSTAY_ID': icustay_id_test})
Y_TEST=pd.DataFrame(data={'LABEL': y_test})
X_TEST_NORMALIZED=X_TEST_NORMALIZED.reset_index().drop(['index'],axis=1)
TESTING= | pd.concat([ICUSTAY_ID_TEST, X_TEST_NORMALIZED, Y_TEST], axis=1) | pandas.concat |
from string import ascii_letters
import struct
from uuid import uuid4
from datashape import var, R, Option, dshape
import numpy as np
from odo import resource, odo
import pandas as pd
import pytest
import sqlalchemy as sa
from warp_prism._warp_prism import (
postgres_signature,
raw_to_arrays,
test_overflow_operations as _test_overflow_operations,
)
from warp_prism import (
to_arrays,
to_dataframe,
null_values as null_values_for_type,
_typeid_map,
)
from warp_prism.tests import tmp_db_uri as tmp_db_uri_ctx
@pytest.fixture(scope='module')
def tmp_db_uri():
with tmp_db_uri_ctx() as db_uri:
yield db_uri
@pytest.fixture
def tmp_table_uri(tmp_db_uri):
return '%s::%s%s' % (tmp_db_uri, 'table_', uuid4().hex)
def check_roundtrip_nonnull(table_uri, data, dtype, sqltype):
"""Check the data roundtrip through postgres using warp_prism to read the
data
Parameters
----------
table_uri : str
The uri to a unique table.
data : np.array
The input data.
dtype : str
The dtype of the data.
sqltype : type
The sqlalchemy type of the data.
"""
input_dataframe = pd.DataFrame({'a': data})
table = odo(input_dataframe, table_uri, dshape=var * R['a': dtype])
# Ensure that odo created the table correctly. If these fail the other
# tests are not well defined.
assert table.columns.keys() == ['a']
assert isinstance(table.columns['a'].type, sqltype)
arrays = to_arrays(table)
assert len(arrays) == 1
array, mask = arrays['a']
assert (array == data).all()
assert mask.all()
output_dataframe = to_dataframe(table)
pd.util.testing.assert_frame_equal(output_dataframe, input_dataframe)
@pytest.mark.parametrize('dtype,sqltype,start,stop,step', (
('int16', sa.SmallInteger, 0, 5000, 1),
('int32', sa.Integer, 0, 5000, 1),
('int64', sa.BigInteger, 0, 5000, 1),
('float32', sa.REAL, 0, 2500, 0.5),
('float64', sa.FLOAT, 0, 2500, 0.5),
))
def test_numeric_type_nonnull(tmp_table_uri,
dtype,
sqltype,
start,
stop,
step):
data = np.arange(start, stop, step, dtype=dtype)
check_roundtrip_nonnull(tmp_table_uri, data, dtype, sqltype)
def test_bool_type_nonnull(tmp_table_uri):
data = np.array([True] * 2500 + [False] * 2500, dtype=bool)
check_roundtrip_nonnull(tmp_table_uri, data, 'bool', sa.Boolean)
def test_string_type_nonnull(tmp_table_uri):
data = np.array(list(ascii_letters) * 200, dtype='object')
check_roundtrip_nonnull(tmp_table_uri, data, 'string', sa.String)
def test_datetime_type_nonnull(tmp_table_uri):
data = pd.date_range(
'2000',
'2016',
).values.astype('datetime64[us]')
check_roundtrip_nonnull(tmp_table_uri, data, 'datetime', sa.DateTime)
def test_date_type_nonnull(tmp_table_uri):
data = pd.date_range(
'2000',
'2016',
).values.astype('datetime64[D]')
check_roundtrip_nonnull(tmp_table_uri, data, 'date', sa.Date)
def check_roundtrip_null_values(table_uri,
data,
dtype,
sqltype,
null_values,
mask,
*,
astype=False):
"""Check the data roundtrip through postgres using warp_prism to read the
data
Parameters
----------
table_uri : str
The uri to a unique table.
data : iterable[any]
The input data.
dtype : str
The dtype of the data.
sqltype : type
The sqlalchemy type of the data.
null_values : dict[str, any]
The value to coerce ``NULL`` to.
astype : bool, optional
Coerce the input data to the given dtype before making assertions about
the output data.
"""
table = resource(table_uri, dshape=var * R['a': Option(dtype)])
# Ensure that odo created the table correctly. If these fail the other
# tests are not well defined.
assert table.columns.keys() == ['a']
assert isinstance(table.columns['a'].type, sqltype)
table.insert().values([{'a': v} for v in data]).execute()
arrays = to_arrays(table)
assert len(arrays) == 1
array, actual_mask = arrays['a']
assert (actual_mask == mask).all()
assert (array[mask] == data[mask]).all()
output_dataframe = to_dataframe(table, null_values=null_values)
if astype:
data = data.astype(dshape(dtype).measure.to_numpy_dtype())
expected_dataframe = | pd.DataFrame({'a': data}) | pandas.DataFrame |
import pandas as pd
import numpy as np
from tqdm import tqdm
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
from scipy.spatial.distance import cdist
class KMeansModel:
def __init__(self, X, k=8, rs=1):
"""
Wrapper for the sklearn.KMeans algorithm that extracts relevant information.. Provides functionality to optimize
and build the model.
:param X: (pd.DataFrame) The (processed) input data for the model. Needs to be provided as a pandas.DataFrame.
Ideally, the index of the DataFrame refers to the identifiers within the data set.
:param k: (int) Cluster number to be used for the kmeans algorithm. Default: 8.
:param rs: (int) Random state to be used for the model. Default: 1
"""
self.X = X
self.k = k
self.rs = rs
# Storage for model
self.model = None
# Storage for results
self.clusters = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import os
from data.loaders.django_import import DjangoImport
from api.models import Policy, Program
import boto3
class PolicyImport(DjangoImport):
django_model = Policy
def process_frame(self):
self.data = pd.read_excel(self.file_loc, sheet_name='Policies')
def get_queryset(self):
return self.django_model.objects.all()
def generate_json(self):
for ix, row in self.data.iterrows():
if pd.isnull(row['ID']):
continue
body = {
'policy_id': row['ID'],
'policy_type': row['Policy_Type'],
'description': row['Description'],
'category': row['Category'],
'link1': row['Link 1'] if pd.notnull(row['Link 1']) else None,
'link1_name': row['Link 1 Name'] if pd.notnull(row['Link 1 Name']) else None,
'link2': row['Link 2'] if pd.notnull(row['Link 2']) else None,
'link2_name': row['Link 2 Name'] if pd.notnull(row['Link 2 Name']) else None,
'link3': row['Link 3'] if pd.notnull(row['Link 3']) else None,
'link3_name': row['Link 3 Name'] if pd.notnull(row['Link 3 Name']) else None,
}
yield body
class ProgramImport(DjangoImport):
django_model = Program
def process_frame(self):
self.data = pd.read_excel(self.file_loc, sheet_name='Program Inventory')
self.data = self.data[ | pd.notnull(self.data['Policy_ID']) | pandas.notnull |
"""Functions for pulling data primarily from the EIA's Form 860."""
import logging
import pandas as pd
import sqlalchemy as sa
import pudl
from pudl.metadata.fields import apply_pudl_dtypes
logger = logging.getLogger(__name__)
def utilities_eia860(pudl_engine, start_date=None, end_date=None):
"""Pull all fields from the EIA860 Utilities table.
Args:
pudl_engine (sqlalchemy.engine.Engine): SQLAlchemy connection engine
for the PUDL DB.
start_date (date-like): date-like object, including a string of the
form 'YYYY-MM-DD' which will be used to specify the date range of
records to be pulled. Dates are inclusive.
end_date (date-like): date-like object, including a string of the
form 'YYYY-MM-DD' which will be used to specify the date range of
records to be pulled. Dates are inclusive.
Returns:
pandas.DataFrame: A DataFrame containing all the fields of the EIA 860
Utilities table.
"""
pt = pudl.output.pudltabl.get_table_meta(pudl_engine)
# grab the entity table
utils_eia_tbl = pt["utilities_entity_eia"]
utils_eia_select = sa.sql.select(utils_eia_tbl)
utils_eia_df = pd.read_sql(utils_eia_select, pudl_engine)
# grab the annual eia entity table
utils_eia860_tbl = pt["utilities_eia860"]
utils_eia860_select = sa.sql.select(utils_eia860_tbl)
if start_date is not None:
start_date = pd.to_datetime(start_date)
utils_eia860_select = utils_eia860_select.where(
utils_eia860_tbl.c.report_date >= start_date
)
if end_date is not None:
end_date = pd.to_datetime(end_date)
utils_eia860_select = utils_eia860_select.where(
utils_eia860_tbl.c.report_date <= end_date
)
utils_eia860_df = pd.read_sql(utils_eia860_select, pudl_engine)
# grab the glue table for the utility_id_pudl
utils_g_eia_tbl = pt["utilities_eia"]
utils_g_eia_select = sa.sql.select(
utils_g_eia_tbl.c.utility_id_eia,
utils_g_eia_tbl.c.utility_id_pudl,
)
utils_g_eia_df = pd.read_sql(utils_g_eia_select, pudl_engine)
out_df = pd.merge(utils_eia_df, utils_eia860_df, how="left", on=["utility_id_eia"])
out_df = pd.merge(out_df, utils_g_eia_df, how="left", on=["utility_id_eia"])
out_df = (
out_df.assign(report_date=lambda x: pd.to_datetime(x.report_date))
.dropna(subset=["report_date", "utility_id_eia"])
.pipe(apply_pudl_dtypes, group="eia")
)
first_cols = [
"report_date",
"utility_id_eia",
"utility_id_pudl",
"utility_name_eia",
]
out_df = pudl.helpers.organize_cols(out_df, first_cols)
return out_df
def plants_eia860(pudl_engine, start_date=None, end_date=None):
"""Pull all fields from the EIA Plants tables.
Args:
pudl_engine (sqlalchemy.engine.Engine): SQLAlchemy connection engine
for the PUDL DB.
start_date (date-like): date-like object, including a string of the
form 'YYYY-MM-DD' which will be used to specify the date range of
records to be pulled. Dates are inclusive.
end_date (date-like): date-like object, including a string of the
form 'YYYY-MM-DD' which will be used to specify the date range of
records to be pulled. Dates are inclusive.
Returns:
pandas.DataFrame: A DataFrame containing all the fields of the EIA 860
Plants table.
"""
pt = pudl.output.pudltabl.get_table_meta(pudl_engine)
# grab the entity table
plants_eia_tbl = pt["plants_entity_eia"]
plants_eia_select = sa.sql.select(plants_eia_tbl)
plants_eia_df = pd.read_sql(plants_eia_select, pudl_engine)
# grab the annual table select
plants_eia860_tbl = pt["plants_eia860"]
plants_eia860_select = sa.sql.select(plants_eia860_tbl)
if start_date is not None:
start_date = pd.to_datetime(start_date)
plants_eia860_select = plants_eia860_select.where(
plants_eia860_tbl.c.report_date >= start_date
)
if end_date is not None:
end_date = pd.to_datetime(end_date)
plants_eia860_select = plants_eia860_select.where(
plants_eia860_tbl.c.report_date <= end_date
)
plants_eia860_df = pd.read_sql(plants_eia860_select, pudl_engine).assign(
report_date=lambda x: pd.to_datetime(x.report_date)
)
# plant glue table
plants_g_eia_tbl = pt["plants_eia"]
plants_g_eia_select = sa.sql.select(
plants_g_eia_tbl.c.plant_id_eia,
plants_g_eia_tbl.c.plant_id_pudl,
)
plants_g_eia_df = pd.read_sql(plants_g_eia_select, pudl_engine)
out_df = pd.merge(plants_eia_df, plants_eia860_df, how="left", on=["plant_id_eia"])
out_df = pd.merge(out_df, plants_g_eia_df, how="left", on=["plant_id_eia"])
utils_eia_tbl = pt["utilities_eia"]
utils_eia_select = sa.sql.select(utils_eia_tbl)
utils_eia_df = pd.read_sql(utils_eia_select, pudl_engine)
out_df = (
pd.merge(out_df, utils_eia_df, how="left", on=["utility_id_eia"])
.dropna(subset=["report_date", "plant_id_eia"])
.pipe(apply_pudl_dtypes, group="eia")
)
return out_df
def plants_utils_eia860(pudl_engine, start_date=None, end_date=None):
"""Create a dataframe of plant and utility IDs and names from EIA 860.
Returns a pandas dataframe with the following columns:
- report_date (in which data was reported)
- plant_name_eia (from EIA entity)
- plant_id_eia (from EIA entity)
- plant_id_pudl
- utility_id_eia (from EIA860)
- utility_name_eia (from EIA860)
- utility_id_pudl
Args:
pudl_engine (sqlalchemy.engine.Engine): SQLAlchemy connection engine
for the PUDL DB.
start_date (date-like): date-like object, including a string of the
form 'YYYY-MM-DD' which will be used to specify the date range of
records to be pulled. Dates are inclusive.
end_date (date-like): date-like object, including a string of the
form 'YYYY-MM-DD' which will be used to specify the date range of
records to be pulled. Dates are inclusive.
Returns:
pandas.DataFrame: A DataFrame containing plant and utility IDs and
names from EIA 860.
"""
# Contains the one-to-one mapping of EIA plants to their operators
plants_eia = (
plants_eia860(pudl_engine, start_date=start_date, end_date=end_date)
.drop(
[
"utility_id_pudl",
"city",
"state", # Avoid dupes in merge
"zip_code",
"street_address",
"utility_name_eia",
],
axis="columns",
)
.dropna(subset=["utility_id_eia"]) # Drop unmergable records
)
utils_eia = utilities_eia860(pudl_engine, start_date=start_date, end_date=end_date)
# to avoid duplicate columns on the merge...
out_df = pd.merge(
plants_eia, utils_eia, how="left", on=["report_date", "utility_id_eia"]
)
out_df = (
out_df.loc[
:,
[
"report_date",
"plant_id_eia",
"plant_name_eia",
"plant_id_pudl",
"utility_id_eia",
"utility_name_eia",
"utility_id_pudl",
],
]
.dropna(subset=["report_date", "plant_id_eia", "utility_id_eia"])
.pipe(apply_pudl_dtypes, group="eia")
)
return out_df
def generators_eia860(
pudl_engine: sa.engine.Engine,
start_date=None,
end_date=None,
unit_ids: bool = False,
fill_tech_desc: bool = True,
) -> pd.DataFrame:
"""Pull all fields reported in the generators_eia860 table.
Merge in other useful fields including the latitude & longitude of the
plant that the generators are part of, canonical plant & operator names and
the PUDL IDs of the plant and operator, for merging with other PUDL data
sources.
Fill in data for adjacent years if requested, but never fill in earlier
than the earliest working year of data for EIA923, and never add more than
one year on after the reported data (since there should at most be a one
year lag between EIA923 and EIA860 reporting)
This also fills the ``technology_description`` field according to matching
``energy_source_code_1`` values. It will only do so if the ``energy_source_code_1``
is consistent throughout years for a given plant.
Args:
pudl_engine: SQLAlchemy connection engine for the PUDL DB.
start_date (date-like): date-like object, including a string of the
form 'YYYY-MM-DD' which will be used to specify the date range of
records to be pulled. Dates are inclusive.
end_date (date-like): date-like object, including a string of the
form 'YYYY-MM-DD' which will be used to specify the date range of
records to be pulled. Dates are inclusive.
unit_ids: If True, use several heuristics to assign
individual generators to functional units. EXPERIMENTAL.
fill_tech_desc: If True, backfill the technology_description
field to years earlier than 2013 based on plant and
energy_source_code_1 and fill in technologies with only one matching code.
Returns:
A DataFrame containing all the fields of the EIA 860 Generators table.
"""
pt = pudl.output.pudltabl.get_table_meta(pudl_engine)
# Almost all the info we need will come from here.
gens_eia860_tbl = pt["generators_eia860"]
gens_eia860_select = sa.sql.select(gens_eia860_tbl)
# To get plant age
generators_entity_eia_tbl = pt["generators_entity_eia"]
generators_entity_eia_select = sa.sql.select(generators_entity_eia_tbl)
# To get the Lat/Lon coordinates
plants_entity_eia_tbl = pt["plants_entity_eia"]
plants_entity_eia_select = sa.sql.select(plants_entity_eia_tbl)
if start_date is not None:
start_date = pd.to_datetime(start_date)
gens_eia860_select = gens_eia860_select.where(
gens_eia860_tbl.c.report_date >= start_date
)
if end_date is not None:
end_date = pd.to_datetime(end_date)
gens_eia860_select = gens_eia860_select.where(
gens_eia860_tbl.c.report_date <= end_date
)
gens_eia860 = pd.read_sql(gens_eia860_select, pudl_engine)
generators_entity_eia_df = pd.read_sql(generators_entity_eia_select, pudl_engine)
plants_entity_eia_df = | pd.read_sql(plants_entity_eia_select, pudl_engine) | pandas.read_sql |
from telethon import functions, types
from scipy.optimize import minimize
from user import User
import numpy as np
import pandas as pd
import math
import time
import os
fL = open(os.getenv('SAVE_PATH') + "locations-"+str(time.time())+".csv", "w")
fL.write("user,lat,lon\n")
def loadLocated():
df = pd.read_csv(os.getenv('FILE_PATH'), index_col=[0])
return list(df.index)
located = loadLocated()
dictionary = {}
prev = {}
cleanUpTime = int(os.getenv('CLEANUP_TIME'))
latK = 110574
startLat = float(os.getenv('BASE_LAT'))
startLon = float(os.getenv('BASE_LON'))
lat = startLat
lon = startLon
def saveAndMerge(dstFile):
fL.flush()
os.fsync(fL.fileno())
dst = pd.read_csv(dstFile)
src = pd.read_csv(fL.name)
fdf = | pd.concat([dst, src]) | pandas.concat |
import pandas as pd
import ipywidgets as ipw
import ipysheet as ips
from cmdty_storage import CmdtyStorage, three_factor_seasonal_value, MultiFactorModel, multi_factor, RatchetInterp
from curves import max_smooth_interp, adjustments
from datetime import date, timedelta
from IPython.display import display
from ipywidgets.widgets.interaction import show_inline_matplotlib_plots
from collections import namedtuple
import itertools
import logging
import csv
import os
from PyQt5.QtWidgets import QFileDialog, QApplication
from datetime import datetime
# Shared variables
freq = 'D'
num_fwd_rows = 28
date_format = 'YYYY-MM-DD'
num_ratch_rows = 20
RatchetRow = namedtuple('RatchetRow', ['date', 'inventory', 'inject_rate', 'withdraw_rate'])
def str_to_bool(bool_text: str) -> bool:
bool_text_lower = bool_text.lower()
if bool_text_lower == 'true':
return True
elif bool_text_lower == 'false':
return False
else:
raise ValueError('bool_text parameter value of \'{}\' cannot be parsed to boolean.'.format(bool_text))
def select_file_open(header, filter):
dir = './'
app = QApplication([dir])
file_name = QFileDialog.getOpenFileName(None, header, dir, filter=filter)
return file_name[0]
def select_file_save(header, filter, default_file_name):
dir = './'
app = QApplication([dir])
default_file_path = os.path.join(dir, default_file_name)
file_name = QFileDialog.getSaveFileName(None, header, default_file_path, filter=filter)
return file_name[0]
def save_dict_to_csv(file_path, data_dict):
with open(file_path, mode='w', newline='') as csv_file:
csv_writer = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
csv_writer.writerow(['key', 'value'])
for key, value in data_dict.items():
csv_writer.writerow([key, value])
def load_csv_to_dict(file_path) -> dict:
data_dict = {}
with open(file_path, mode='r') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
if line_count == 0:
header_text = ','.join(row)
if header_text != 'key,value':
raise ValueError('Storage details header row must be \'key,value\' but is \'' + header_text + '\'.')
else:
data_dict[row[0]] = row[1]
line_count += 1
return data_dict
def dataframe_to_ipysheet(dataframe):
columns = dataframe.columns.tolist()
rows = dataframe.index.tolist()
cells = []
cells.append(ips.Cell(
value=[p.strftime('%Y-%m-%d') for p in dataframe.index],
row_start=0,
row_end=len(rows) - 1,
column_start=0,
column_end=0,
type='date',
date_format='YYYY-MM-DD',
squeeze_row=False,
squeeze_column=True
))
idx = 1
for c in columns:
cells.append(ips.Cell(
value=dataframe[c].values,
row_start=0,
row_end=len(rows) - 1,
column_start=idx,
column_end=idx,
type='numeric',
numeric_format='0.00',
squeeze_row=False,
squeeze_column=True
))
idx += 1
return ips.Sheet(
rows=len(rows),
columns=len(columns) + 1,
cells=cells,
row_headers=False,
column_headers=['period'] + [str(header) for header in columns])
# Set up logging
class OutputWidgetHandler(logging.Handler):
""" Custom logging handler sending logs to an output widget """
def __init__(self, *args, **kwargs):
super(OutputWidgetHandler, self).__init__(*args, **kwargs)
layout = {
'width': '50%',
'height': '160px',
'border': '1px solid black',
'overflow_y': 'auto',
}
self.out = ipw.Output(layout=layout)
def emit(self, record):
""" Overload of logging.Handler method """
formatted_record = self.format(record)
new_output = {
'name': 'stdout',
'output_type': 'stream',
'text': formatted_record + '\n'
}
self.out.outputs = (new_output,) + self.out.outputs
def clear_logs(self):
""" Clear the current logs """
self.out.clear_output()
logger = logging.getLogger('storage_gui')
log_handler = OutputWidgetHandler()
log_handler.setFormatter(logging.Formatter('%(asctime)s - [%(levelname)s] %(message)s'))
logger.addHandler(log_handler)
logger.setLevel(logging.INFO)
log_level_wgt = ipw.Dropdown(description='Log Level',
options=['Debug', 'Info', 'Warning', 'Error', 'Critical'],
value='Info')
multi_factor.logger.addHandler(log_handler)
multi_factor.logger.setLevel(logging.INFO)
def on_log_level_change(change):
try:
level_text = change['new']
level_int = getattr(logging, level_text.upper())
logger.setLevel(level_int)
multi_factor.logger.setLevel(level_int)
except Exception as e:
logger.exception(e)
log_level_wgt.observe(on_log_level_change, names='value')
def on_clear_logs_clicked(b):
try:
log_handler.clear_logs()
except Exception as e:
logger.exception(e)
btn_clear_logs = ipw.Button(description='Clear Log Display')
btn_clear_logs.on_click(on_clear_logs_clicked)
def create_tab(titles, children):
tab = ipw.Tab()
for idx, title in enumerate(titles):
tab.set_title(idx, title)
tab.children = children
return tab
def enumerate_ratchets():
ratchet_row = 0
while ratchet_row < num_ratch_rows and ratchet_input_sheet.cells[1].value[ratchet_row] != '':
yield RatchetRow(ratchet_input_sheet.cells[0].value[ratchet_row], ratchet_input_sheet.cells[1].value[ratchet_row],
ratchet_input_sheet.cells[2].value[ratchet_row], ratchet_input_sheet.cells[3].value[ratchet_row])
ratchet_row += 1
def read_ratchets():
ratchets = []
for ratchet in enumerate_ratchets():
if ratchet.date != '':
dt_item = (pd.Period(ratchet.date, freq=freq), [(ratchet.inventory, -ratchet.inject_rate,
ratchet.withdraw_rate)])
ratchets.append(dt_item)
else:
dt_item[1].append((ratchet.inventory, -ratchet.inject_rate,
ratchet.withdraw_rate))
return ratchets
# ======================================================================================================
# VALUATION DATA
def on_load_val_data_clicked(b):
try:
val_data_path = select_file_open('Select valuation data file', 'CSV File (*.csv)')
if val_data_path != '':
val_data_dict = load_csv_to_dict(val_data_path)
val_date_wgt.value = datetime.strptime(val_data_dict['val_date'], '%Y-%m-%d').date()
inventory_wgt.value = val_data_dict['inventory']
ir_wgt.value = val_data_dict['interest_rate']
discount_deltas_wgt.value = str_to_bool(val_data_dict['discount_deltas'])
except Exception as e:
logger.exception(e)
def on_save_val_data_clicked(b):
try:
val_data_path = select_file_save('Save valuation data to', 'CSV File (*.csv)', 'val_data.csv')
if val_data_path != '':
val_data_dict = val_data_to_dict()
save_dict_to_csv(val_data_path, val_data_dict)
except Exception as e:
logger.exception(e)
btn_load_val_data_wgt = ipw.Button(description='Load Valuation Data')
btn_load_val_data_wgt.on_click(on_load_val_data_clicked)
btn_save_val_data_wgt = ipw.Button(description='Save Valuation Data')
btn_save_val_data_wgt.on_click(on_save_val_data_clicked)
val_data_buttons = ipw.HBox([btn_load_val_data_wgt, btn_save_val_data_wgt])
val_date_wgt = ipw.DatePicker(description='Val Date', value=date.today())
inventory_wgt = ipw.FloatText(description='Inventory')
ir_wgt = ipw.FloatText(description='Intrst Rate %', step=0.005)
discount_deltas_wgt = ipw.Checkbox(description='Discount Deltas', value=False)
val_inputs_wgt = ipw.VBox([val_data_buttons, val_date_wgt, inventory_wgt, ir_wgt, discount_deltas_wgt])
def val_data_to_dict() -> dict:
val_data_dict = {'val_date': val_date_wgt.value,
'inventory': inventory_wgt.value,
'interest_rate': ir_wgt.value,
'discount_deltas': discount_deltas_wgt.value}
return val_data_dict
# ======================================================================================================
# FORWARD CURVE
def create_fwd_input_sheet(dates, prices, num_rows):
if len(dates) > num_rows:
raise ValueError('Length of dates cannot exceed number of rows {}.'.format(num_rows))
if len(prices) > num_rows:
raise ValueError('Length of prices cannot exceed number of rows {}.'.format(num_rows))
dates = dates + [None] * (num_rows - len(dates))
prices = prices + [None] * (num_rows - len(prices))
dates_cells = ips.Cell(value=dates, row_start=0, row_end=len(dates) - 1, column_start=0,
column_end=0, type='date', date_format=date_format, squeeze_row=False, squeeze_column=True)
prices_cells = ips.Cell(value=prices, row_start=0, row_end=len(prices) - 1, column_start=1,
column_end=1, type='numeric', numeric_format='0.000', squeeze_row=False,
squeeze_column=True)
cells = [dates_cells, prices_cells]
return ips.Sheet(rows=len(dates), columns=2, cells=cells, row_headers=False,
column_headers=['fwd_start', 'price'])
def reset_fwd_input_sheet(new_fwd_input_sheet):
# This code is very bad and brittle, but necessary hack to be able to update the fwd input sheet quickly
tuple_with_fwd_input = fwd_data_wgt.children[0].children
fwd_data_wgt.children[0].children = tuple_with_fwd_input[0:5] + (new_fwd_input_sheet,)
global fwd_input_sheet
fwd_input_sheet = new_fwd_input_sheet
def on_load_curve_params(b):
try:
curve_params_path = select_file_open('Select curve parameters file', 'CSV File (*.csv)')
if curve_params_path != '':
curve_params_dict = load_csv_to_dict(curve_params_path)
smooth_curve_wgt.value = str_to_bool(curve_params_dict['smooth_curve'])
apply_wkend_shaping_wgt.value = str_to_bool(curve_params_dict['apply_weekend_shaping'])
wkend_factor_wgt.value = curve_params_dict['weekend_shaping_factor']
except Exception as e:
logger.exception(e)
def on_save_curve_params(b):
try:
curve_params_path = select_file_save('Save curve params to', 'CSV File (*.csv)', 'curve_params.csv')
if curve_params_path != '':
curve_params_dict = {'smooth_curve': smooth_curve_wgt.value,
'apply_weekend_shaping': apply_wkend_shaping_wgt.value,
'weekend_shaping_factor': wkend_factor_wgt.value}
save_dict_to_csv(curve_params_path, curve_params_dict)
except Exception as e:
logger.exception(e)
btn_load_curve_params_wgt = ipw.Button(description='Load Curve Params')
btn_load_curve_params_wgt.on_click(on_load_curve_params)
btn_save_curve_params_wgt = ipw.Button(description='Save Curve Params')
btn_save_curve_params_wgt.on_click(on_save_curve_params)
curve_params_buttons = ipw.HBox([btn_load_curve_params_wgt, btn_save_curve_params_wgt])
fwd_input_sheet = create_fwd_input_sheet([''] * num_fwd_rows, [''] * num_fwd_rows, num_fwd_rows)
out_fwd_curve = ipw.Output()
smooth_curve_wgt = ipw.Checkbox(description='Apply Smoothing', value=False)
apply_wkend_shaping_wgt = ipw.Checkbox(description='Wkend Shaping', value=False, disabled=True)
wkend_factor_wgt = ipw.FloatText(description='Wkend shaping factor', step=0.005, disabled=True)
btn_plot_fwd_wgt = ipw.Button(description='Plot Forward Curve')
btn_export_daily_fwd_wgt = ipw.Button(description='Export Daily Curve')
btn_import_fwd_wgt = ipw.Button(description='Import Forward Curve')
btn_export_fwd_wgt = ipw.Button(description='Export Forward Curve')
btn_clear_fwd_wgt = ipw.Button(description='Clear Forward Curve')
def on_smooth_curve_change(change):
apply_wkend_shaping_wgt.disabled = not change['new']
smooth_curve_wgt.observe(on_smooth_curve_change, names='value')
def on_apply_wkend_shaping_change(change):
wkend_factor_wgt.disabled = not change['new']
apply_wkend_shaping_wgt.observe(on_apply_wkend_shaping_change, names='value')
def on_plot_fwd_clicked(b):
try:
out_fwd_curve.clear_output()
curve = read_fwd_curve()
with out_fwd_curve:
curve.plot()
show_inline_matplotlib_plots()
except Exception as e:
logger.exception(e)
def on_export_daily_fwd_clicked(b):
try:
fwd_curve_path = select_file_save('Save daily forward curve to', 'CSV File (*.csv)', 'daily_fwd_curve.csv')
if fwd_curve_path != '':
curve = read_fwd_curve()
curve.to_csv(fwd_curve_path, index_label='date', header=['price'])
except Exception as e:
logger.exception(e)
btn_plot_fwd_wgt.on_click(on_plot_fwd_clicked)
btn_export_daily_fwd_wgt.on_click(on_export_daily_fwd_clicked)
def on_import_fwd_curve_clicked(b):
try:
fwd_curve_path = select_file_open('Select forward curve file', 'CSV File (*.csv)')
if fwd_curve_path != '':
fwd_dates = []
fwd_prices = []
with open(fwd_curve_path, mode='r') as fwd_csv_file:
csv_reader = csv.DictReader(fwd_csv_file)
line_count = 0
for row in csv_reader:
if line_count == 0:
header_text = ','.join(row)
if header_text != 'contract_start,price':
raise ValueError('Forward curve header row must be \'contract_start,price\'.')
fwd_dates.append(row['contract_start'])
fwd_prices.append(float(row['price']))
line_count += 1
imported_fwd_input_sheet = create_fwd_input_sheet(fwd_dates, fwd_prices, num_fwd_rows)
reset_fwd_input_sheet(imported_fwd_input_sheet)
except Exception as e:
logger.exception(e)
def on_export_fwd_curve_clicked(b):
try:
fwd_curve_path = select_file_save('Save forward curve to', 'CSV File (*.csv)', 'fwd_curve_data.csv')
if fwd_curve_path != '':
rows = []
fwd_row = 0
for fwd_start, fwd_price in enumerate_fwd_points():
row = {'contract_start': fwd_start,
'price': fwd_price}
rows.append(row)
fwd_row += 1
with open(fwd_curve_path, mode='w', newline='') as fwd_csv_file:
writer = csv.DictWriter(fwd_csv_file, fieldnames=['contract_start', 'price'])
writer.writeheader()
writer.writerows(rows)
except Exception as e:
logger.exception(e)
def on_clear_fwd_curve_clicked(b):
try:
new_fwd_input_sheet = create_fwd_input_sheet([], [], num_fwd_rows)
reset_fwd_input_sheet(new_fwd_input_sheet)
except Exception as e:
logger.exception(e)
btn_import_fwd_wgt.on_click(on_import_fwd_curve_clicked)
btn_export_fwd_wgt.on_click(on_export_fwd_curve_clicked)
btn_clear_fwd_wgt.on_click(on_clear_fwd_curve_clicked)
fwd_data_wgt = ipw.HBox([ipw.VBox([curve_params_buttons, smooth_curve_wgt, apply_wkend_shaping_wgt, wkend_factor_wgt,
ipw.HBox([ipw.VBox([btn_import_fwd_wgt, btn_clear_fwd_wgt]), btn_export_fwd_wgt]),
fwd_input_sheet]),
ipw.VBox([btn_plot_fwd_wgt, btn_export_daily_fwd_wgt, out_fwd_curve])])
# ======================================================================================================
# STORAGE DETAILS
def create_numeric_col(values, col_num):
return ips.Cell(value=values, row_start=0, row_end=len(values) - 1, column_start=col_num,
column_end=col_num, type='numeric', numeric_format='0.000', squeeze_row=False,
squeeze_column=True)
def create_ratchets_sheet(dates, inventories, inject_rates, withdraw_rates, num_rows):
if len(inventories) > num_rows:
raise ValueError('Length of inventories in ratchets cannot exceed number of rows {}.'.format(num_rows))
dates = dates + [''] * (num_rows - len(dates))
inventories = inventories + [''] * (num_rows - len(inventories))
inject_rates = inject_rates + [''] * (num_rows - len(inject_rates))
withdraw_rates = withdraw_rates + [''] * (num_rows - len(withdraw_rates))
dates_cells = ips.Cell(value=dates, row_start=0, row_end=len(dates) - 1, column_start=0,
column_end=0, type='date', date_format=date_format, squeeze_row=False, squeeze_column=True)
inventory_cells = create_numeric_col(inventories, 1)
inject_rate_cells = create_numeric_col(inject_rates, 2)
withdraw_rate_cells = create_numeric_col(withdraw_rates, 3)
cells = [dates_cells, inventory_cells, inject_rate_cells, withdraw_rate_cells]
return ips.Sheet(rows=len(dates), columns=4, cells=cells, row_headers=False,
column_headers=['date', 'inventory', 'inject_rate', 'withdraw_rate'])
def on_save_storage_details_clicked(b):
try:
save_path = select_file_save('Save storage details to', 'CSV File (*.csv)', 'storage_details.csv')
if save_path != '':
with open(save_path, mode='w', newline='') as storage_details_file:
details_writer = csv.writer(storage_details_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
details_writer.writerow(['key', 'value'])
details_writer.writerow(['storage_start', start_wgt.value])
details_writer.writerow(['storage_end', end_wgt.value])
details_writer.writerow(['injection_cost', inj_cost_wgt.value])
details_writer.writerow(['withdrawal_cost', with_cost_wgt.value])
details_writer.writerow(['cmdty_consumed_inject', inj_consumed_wgt.value])
details_writer.writerow(['cmdty_consumed_withdraw', with_consumed_wgt.value])
storage_type = stor_type_wgt.value.lower()
details_writer.writerow(['storage_type', storage_type])
if storage_type == 'simple':
details_writer.writerow(['min_inventory', invent_min_wgt.value])
details_writer.writerow(['max_inventory', invent_max_wgt.value])
details_writer.writerow(['max_injection_rate', inj_rate_wgt.value])
details_writer.writerow(['max_withdrawal_rate', with_rate_wgt.value])
if storage_type == 'ratchets':
ratchets_save_path = select_file_save('Save storage ratchets to', 'CSV File (*.csv)',
'storage_ratchets.csv')
if ratchets_save_path != '':
with open(ratchets_save_path, mode='w', newline='') as storage_ratchets_file:
ratchets_writer = csv.writer(storage_ratchets_file, delimiter=',', quotechar='"',
quoting=csv.QUOTE_MINIMAL)
ratchets_writer.writerow(['date', 'inventory', 'inject_rate', 'withdraw_rate'])
for ratchet in enumerate_ratchets():
ratchets_writer.writerow(
[ratchet.date, ratchet.inventory, ratchet.inject_rate, ratchet.withdraw_rate])
except Exception as e:
logger.exception(e)
def on_load_storage_details_clicked(b):
try:
load_path = select_file_open('Open storage details from', 'CSV File (*.csv)')
if load_path != '':
details_dict = load_csv_to_dict(load_path)
start_wgt.value = datetime.strptime(details_dict['storage_start'], '%Y-%m-%d').date()
end_wgt.value = datetime.strptime(details_dict['storage_end'], '%Y-%m-%d').date()
inj_cost_wgt.value = details_dict['injection_cost']
with_cost_wgt.value = details_dict['withdrawal_cost']
inj_consumed_wgt.value = details_dict['cmdty_consumed_inject']
with_consumed_wgt.value = details_dict['cmdty_consumed_withdraw']
storage_type = details_dict['storage_type']
if storage_type == 'simple':
stor_type_wgt.value = 'Simple'
invent_min_wgt.value = details_dict['min_inventory']
invent_max_wgt.value = details_dict['max_inventory']
inj_rate_wgt.value = details_dict['max_injection_rate']
with_rate_wgt.value = details_dict['max_withdrawal_rate']
if storage_type == 'ratchets':
ratchets_load_path = select_file_open('Open storage details from', 'CSV File (*.csv)')
if ratchets_load_path != '':
dates = []
inventories = []
inject_rates = []
withdraw_rates = []
with open(ratchets_load_path, mode='r') as ratchets_file:
csv_reader = csv.reader(ratchets_file, delimiter=',')
line_count = 0
for row in csv_reader:
if line_count == 0:
header_text = ','.join(row)
if header_text != 'date,inventory,inject_rate,withdraw_rate':
raise ValueError(
'Storage details header row must be \'date,inventory,inject_rate,withdraw_rate\' but is \'' + header_text + '\'.')
else:
dates.append(row[0])
inventories.append(float(row[1]))
inject_rates.append(float(row[2]))
withdraw_rates.append(float(row[3]))
line_count += 1
new_ratchets_sheet = create_ratchets_sheet(dates, inventories, inject_rates, withdraw_rates,
num_ratch_rows)
reset_ratchets_sheet(new_ratchets_sheet)
stor_type_wgt.value = 'Ratchets'
except Exception as e:
logger.exception(e)
def reset_ratchets_sheet(new_ratchets_sheet):
try:
global ratchet_input_sheet
ratchet_input_sheet = new_ratchets_sheet
storage_details_wgt.children = (storage_common_wgt, ipw.VBox([btn_clear_ratchets_wgt, ratchet_input_sheet]))
except Exception as e:
logger.exception(e)
def on_clear_ratchets_clicked(b):
try:
new_ratchets_sheet = create_ratchets_sheet([], [], [], [], num_ratch_rows)
reset_ratchets_sheet(new_ratchets_sheet)
except Exception as e:
logger.exception(e)
btn_save_storage_details_wgt = ipw.Button(description='Save Storage Details')
btn_save_storage_details_wgt.on_click(on_save_storage_details_clicked)
btn_load_storage_details_wgt = ipw.Button(description='Load Storage Details')
btn_load_storage_details_wgt.on_click(on_load_storage_details_clicked)
btn_clear_ratchets_wgt = ipw.Button(description='Clear Ratchets')
btn_clear_ratchets_wgt.on_click(on_clear_ratchets_clicked)
storage_load_save_hbox = ipw.HBox([btn_load_storage_details_wgt, btn_save_storage_details_wgt])
# Common storage properties
stor_type_wgt = ipw.RadioButtons(options=['Simple', 'Ratchets'], description='Storage Type')
start_wgt = ipw.DatePicker(description='Start')
end_wgt = ipw.DatePicker(description='End')
inj_cost_wgt = ipw.FloatText(description='Injection Cost')
inj_consumed_wgt = ipw.FloatText(description='Inj % Consumed', step=0.001)
with_cost_wgt = ipw.FloatText(description='Withdrw Cost')
with_consumed_wgt = ipw.FloatText(description='With % Consumed', step=0.001)
storage_common_wgt = ipw.VBox([storage_load_save_hbox,
ipw.HBox([ipw.VBox([
start_wgt, end_wgt, inj_cost_wgt, with_cost_wgt]),
ipw.VBox([stor_type_wgt, inj_consumed_wgt, with_consumed_wgt])])])
# Simple storage type properties
invent_min_wgt = ipw.FloatText(description='Min Inventory')
invent_max_wgt = ipw.FloatText(description='Max Inventory')
inj_rate_wgt = ipw.FloatText(description='Injection Rate')
with_rate_wgt = ipw.FloatText(description='Withdrw Rate')
storage_simple_wgt = ipw.VBox([invent_min_wgt, invent_max_wgt, inj_rate_wgt, with_rate_wgt])
# Ratchet storage type properties
ratchet_input_sheet = create_ratchets_sheet([], [], [], [], num_ratch_rows)
# Compose storage
storage_details_wgt = ipw.VBox([storage_common_wgt, storage_simple_wgt])
def on_stor_type_change(change):
if change['new'] == 'Simple':
storage_details_wgt.children = (storage_common_wgt, storage_simple_wgt)
else:
storage_details_wgt.children = (storage_common_wgt, ipw.VBox([btn_clear_ratchets_wgt, ratchet_input_sheet]))
stor_type_wgt.observe(on_stor_type_change, names='value')
# ======================================================================================================
# VOLATILITY PARAMS
def on_load_vol_params_clicked(b):
try:
vol_data_path = select_file_open('Select volatility parameters file', 'CSV File (*.csv)')
if vol_data_path != '':
vol_data_dict = load_csv_to_dict(vol_data_path)
spot_mr_wgt.value = vol_data_dict['spot_mean_reversion']
spot_vol_wgt.value = vol_data_dict['spot_vol']
lt_vol_wgt.value = vol_data_dict['long_term_vol']
seas_vol_wgt.value = vol_data_dict['seasonal_vol']
except Exception as e:
logger.exception(e)
def on_save_vol_params_clicked(b):
try:
vol_params_path = select_file_save('Save volatility parameters to', 'CSV File (*.csv)', 'vol_params.csv')
if vol_params_path != '':
vol_params_dict = vol_data_to_dict()
save_dict_to_csv(vol_params_path, vol_params_dict)
except Exception as e:
logger.exception(e)
btn_load_vol_params_wgt = ipw.Button(description='Load Vol Params')
btn_load_vol_params_wgt.on_click(on_load_vol_params_clicked)
btn_save_vol_params_wgt = ipw.Button(description='Save Vol Params')
btn_save_vol_params_wgt.on_click(on_save_vol_params_clicked)
vol_params_buttons = ipw.HBox([btn_load_vol_params_wgt, btn_save_vol_params_wgt])
spot_vol_wgt = ipw.FloatText(description='Spot Vol', step=0.01)
spot_mr_wgt = ipw.FloatText(description='Spot Mean Rev', step=0.01)
lt_vol_wgt = ipw.FloatText(description='Long Term Vol', step=0.01)
seas_vol_wgt = ipw.FloatText(description='Seasonal Vol', step=0.01)
btn_plot_vol = ipw.Button(description='Plot Forward Vol')
out_vols = ipw.Output()
vol_params_wgt = ipw.HBox([ipw.VBox([vol_params_buttons, spot_vol_wgt, spot_mr_wgt, lt_vol_wgt, seas_vol_wgt,
btn_plot_vol]), out_vols])
def vol_data_to_dict() -> dict:
return {'spot_mean_reversion': spot_mr_wgt.value,
'spot_vol': spot_vol_wgt.value,
'long_term_vol': lt_vol_wgt.value,
'seasonal_vol': seas_vol_wgt.value}
# Plotting vol
def btn_plot_vol_clicked(b):
out_vols.clear_output()
with out_vols:
if val_date_wgt.value is None or end_wgt.value is None:
print('Enter val date and storage end date.')
return
vol_model = MultiFactorModel.for_3_factor_seasonal(freq, spot_mr_wgt.value, spot_vol_wgt.value,
lt_vol_wgt.value, seas_vol_wgt.value, val_date_wgt.value,
end_wgt.value)
start_vol_period = pd.Period(val_date_wgt.value, freq=freq)
end_vol_period = start_vol_period + 1
periods = pd.period_range(start=end_vol_period, end=end_wgt.value, freq=freq)
fwd_vols = [vol_model.integrated_vol(start_vol_period, end_vol_period, period) for period in periods]
fwd_vol_series = pd.Series(data=fwd_vols, index=periods)
fwd_vol_series.plot()
show_inline_matplotlib_plots()
btn_plot_vol.on_click(btn_plot_vol_clicked)
# ======================================================================================================
# TECHNICAL PARAMETERS
def on_load_tech_params(b):
try:
tech_params_path = select_file_open('Select technical params file', 'CSV File (*.csv)')
if tech_params_path != '':
tech_params_dict = load_csv_to_dict(tech_params_path)
num_sims_wgt.value = tech_params_dict['num_sims']
basis_funcs_input_wgt.value = tech_params_dict['basis_funcs']
random_seed_wgt.value = tech_params_dict['seed']
seed_is_random_wgt.value = str_to_bool(tech_params_dict['seed_is_random'])
fwd_sim_seed_wgt.value = tech_params_dict['fwd_sim_seed']
fwd_sim_seed_set_wgt.value = str_to_bool(tech_params_dict['set_fwd_sim_seed'])
extra_decisions_wgt.value = tech_params_dict['extra_decisions']
grid_points_wgt.value = tech_params_dict['num_inventory_grid_points']
num_tol_wgt.value = tech_params_dict['numerical_tolerance']
except Exception as e:
logger.exception(e)
def on_save_tech_params(b):
try:
tech_params_path = select_file_save('Save technical params to', 'CSV File (*.csv)', 'tech_params.csv')
if tech_params_path != '':
tech_params_dict = tech_params_to_dict()
save_dict_to_csv(tech_params_path, tech_params_dict)
except Exception as e:
logger.exception(e)
btn_load_tech_params = ipw.Button(description='Load Tech Params')
btn_load_tech_params.on_click(on_load_tech_params)
btn_save_tech_params = ipw.Button(description='Save Tech Params')
btn_save_tech_params.on_click(on_save_tech_params)
tech_params_buttons = ipw.HBox([btn_load_tech_params, btn_save_tech_params])
num_sims_wgt = ipw.IntText(description='Num Sims', value=4000, step=500)
extra_decisions_wgt = ipw.IntText(description='Extra Decisions', value=0, step=1)
seed_is_random_wgt = ipw.Checkbox(description='Seed is Random', value=False)
random_seed_wgt = ipw.IntText(description='Seed', value=11)
fwd_sim_seed_set_wgt = ipw.Checkbox(description='Set fwd sim seed', value=True)
fwd_sim_seed_wgt = ipw.IntText(description='Fwd Sim Seed', value=13, disabled=False)
grid_points_wgt = ipw.IntText(description='Grid Points', value=100, step=10)
basis_funcs_label_wgt = ipw.Label('Basis Functions')
basis_funcs_legend_wgt = ipw.VBox([ipw.Label('1=Constant'),
ipw.Label('s=Spot Price'),
ipw.Label('x_st=Short-term Factor'),
ipw.Label('x_sw=Sum/Win Factor'),
ipw.Label('x_lt=Long-term Factor')])
basis_funcs_input_wgt = ipw.Textarea(
value='1 + x_st + x_sw + x_lt + x_st**2 + x_sw**2 + x_lt**2 + x_st**3 + x_sw**3 + x_lt**3',
layout=ipw.Layout(width='95%', height='95%'))
basis_func_wgt = ipw.HBox([ipw.VBox([basis_funcs_label_wgt, basis_funcs_legend_wgt]), basis_funcs_input_wgt])
num_tol_wgt = ipw.FloatText(description='Numerical Tol', value=1E-10, step=1E-9)
def on_seed_is_random_change(change):
random_seed_wgt.disabled = change['new']
seed_is_random_wgt.observe(on_seed_is_random_change, names='value')
def on_fwd_sim_seed_set_change(change):
fwd_sim_seed_wgt.disabled = not change['new']
fwd_sim_seed_set_wgt.observe(on_fwd_sim_seed_set_change, names='value')
tech_params_wgt = ipw.VBox([tech_params_buttons, ipw.HBox(
[ipw.VBox([num_sims_wgt, extra_decisions_wgt, seed_is_random_wgt, random_seed_wgt, fwd_sim_seed_set_wgt,
fwd_sim_seed_wgt, grid_points_wgt, num_tol_wgt]), basis_func_wgt])])
def tech_params_to_dict() -> dict:
return {'num_sims': num_sims_wgt.value,
'basis_funcs': basis_funcs_input_wgt.value,
'seed': random_seed_wgt.value,
'seed_is_random': seed_is_random_wgt.value,
'fwd_sim_seed': fwd_sim_seed_wgt.value,
'set_fwd_sim_seed': fwd_sim_seed_set_wgt.value,
'extra_decisions': extra_decisions_wgt.value,
'num_inventory_grid_points': grid_points_wgt.value,
'numerical_tolerance': num_tol_wgt.value}
# ======================================================================================================
# COMPOSE INPUT TABS
tab_in_titles = ['Valuation Data', 'Forward Curve', 'Storage Details', 'Volatility Params', 'Technical Params']
tab_in_children = [val_inputs_wgt, fwd_data_wgt, storage_details_wgt, vol_params_wgt, tech_params_wgt]
tab_in = create_tab(tab_in_titles, tab_in_children)
# ======================================================================================================
# OUTPUT WIDGETS
progress_wgt = ipw.FloatProgress(min=0.0, max=1.0)
full_value_wgt = ipw.Text(description='Full Value', disabled=True)
intr_value_wgt = ipw.Text(description='Intr. Value', disabled=True)
extr_value_wgt = ipw.Text(description='Extr. Value', disabled=True)
value_wgts = [full_value_wgt, intr_value_wgt, extr_value_wgt]
values_wgt = ipw.VBox(value_wgts)
out_summary = ipw.Output()
summary_vbox = ipw.HBox([values_wgt, out_summary])
sheet_out_layout = {
'width': '100%',
'height': '300px',
'overflow_y': 'auto'}
out_triggers_plot = ipw.Output()
# Buttons to export table results
def create_deltas_dataframe():
return pd.DataFrame(index=val_results_3f.deltas.index,
data={'full_delta': val_results_3f.deltas,
'intrinsic_delta': intr_delta})
def create_triggers_dataframe():
trigger_prices_frame = val_results_3f.trigger_prices.copy()
trigger_prices_frame['expected_inventory'] = val_results_3f.expected_profile['inventory']
trigger_prices_frame['fwd_price'] = active_fwd_curve
return trigger_prices_frame
def on_export_summary_click(b):
try:
csv_path = select_file_save('Save table to', 'CSV File (*.csv)', 'storage_deltas.csv')
if csv_path != '':
deltas_frame = create_deltas_dataframe()
deltas_frame.to_csv(csv_path)
except Exception as e:
logger.exception(e)
def on_export_triggers_click(b):
try:
csv_path = select_file_save('Save table to', 'CSV File (*.csv)', 'trigger_prices.csv')
if csv_path != '':
triggers_frame = create_triggers_dataframe()
triggers_frame.to_csv(csv_path)
except Exception as e:
logger.exception(e)
btn_export_summary_wgt = ipw.Button(description='Export Data', disabled=True)
btn_export_summary_wgt.on_click(on_export_summary_click)
btn_export_triggers_wgt = ipw.Button(description='Export Data', disabled=True)
btn_export_triggers_wgt.on_click(on_export_triggers_click)
tab_out_titles = ['Summary', 'Summary Table', 'Trigger Prices Chart', 'Trigger Prices Table']
tab_out_children = [summary_vbox, btn_export_summary_wgt, out_triggers_plot, btn_export_triggers_wgt]
tab_output = create_tab(tab_out_titles, tab_out_children)
def set_tab_output_child(child_index, new_child):
child_list = list(tab_output.children)
child_list[child_index] = new_child
tab_output.children = tuple(child_list)
def on_progress(progress):
progress_wgt.value = progress
# Inputs Not Defined in GUI
def twentieth_of_next_month(period): return period.asfreq('M').asfreq('D', 'end') + 20
def enumerate_fwd_points():
fwd_row = 0
while fwd_input_sheet.cells[0].value[fwd_row] is not None and fwd_input_sheet.cells[0].value[fwd_row] != '':
fwd_start = fwd_input_sheet.cells[0].value[fwd_row]
fwd_price = fwd_input_sheet.cells[1].value[fwd_row]
yield fwd_start, fwd_price
fwd_row += 1
if fwd_row == num_fwd_rows:
break
def read_fwd_curve():
fwd_periods = []
fwd_prices = []
for fwd_start, fwd_price in enumerate_fwd_points():
fwd_periods.append(pd.Period(fwd_start, freq=freq))
fwd_prices.append(fwd_price)
if smooth_curve_wgt.value:
p1, p2 = itertools.tee(fwd_periods)
next(p2, None)
contracts = []
for start, end, price in zip(p1, p2, fwd_prices):
contracts.append((start, end - 1, price))
weekend_adjust = None
if apply_wkend_shaping_wgt.value:
wkend_factor = wkend_factor_wgt.value
weekend_adjust = adjustments.dayofweek(default=1.0, saturday=wkend_factor, sunday=wkend_factor)
return max_smooth_interp(contracts, freq=freq, mult_season_adjust=weekend_adjust)
else:
return pd.Series(fwd_prices, pd.PeriodIndex(fwd_periods)).resample(freq).fillna('pad')
def btn_clicked(b):
progress_wgt.value = 0.0
for vw in value_wgts:
vw.value = ''
btn_calculate.disabled = True
out_summary.clear_output()
out_triggers_plot.clear_output()
try:
global fwd_curve
logger.debug('Reading forward curve.')
fwd_curve = read_fwd_curve()
logger.debug('Forward curve read successfully.')
global storage
global val_results_3f
if stor_type_wgt.value == 'Simple':
storage = CmdtyStorage(freq, storage_start=start_wgt.value, storage_end=end_wgt.value,
injection_cost=inj_cost_wgt.value, withdrawal_cost=with_cost_wgt.value,
min_inventory=invent_min_wgt.value, max_inventory=invent_max_wgt.value,
max_injection_rate=inj_rate_wgt.value, max_withdrawal_rate=with_rate_wgt.value,
cmdty_consumed_inject=inj_consumed_wgt.value,
cmdty_consumed_withdraw=with_consumed_wgt.value)
else:
ratchets = read_ratchets()
storage = CmdtyStorage(freq, storage_start=start_wgt.value, storage_end=end_wgt.value,
injection_cost=inj_cost_wgt.value, withdrawal_cost=with_cost_wgt.value,
ratchets=ratchets, ratchet_interp = RatchetInterp.LINEAR)
interest_rate_curve = pd.Series(index=pd.period_range(val_date_wgt.value,
twentieth_of_next_month(
| pd.Period(end_wgt.value, freq='D') | pandas.Period |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from collections import UserList
import io
import pathlib
import pytest
import socket
import threading
import weakref
import numpy as np
import pyarrow as pa
from pyarrow.tests.util import changed_environ
try:
from pandas.testing import assert_frame_equal, assert_series_equal
import pandas as pd
except ImportError:
pass
class IpcFixture:
write_stats = None
def __init__(self, sink_factory=lambda: io.BytesIO()):
self._sink_factory = sink_factory
self.sink = self.get_sink()
def get_sink(self):
return self._sink_factory()
def get_source(self):
return self.sink.getvalue()
def write_batches(self, num_batches=5, as_table=False):
nrows = 5
schema = pa.schema([('one', pa.float64()), ('two', pa.utf8())])
writer = self._get_writer(self.sink, schema)
batches = []
for i in range(num_batches):
batch = pa.record_batch(
[np.random.randn(nrows),
['foo', None, 'bar', 'bazbaz', 'qux']],
schema=schema)
batches.append(batch)
if as_table:
table = pa.Table.from_batches(batches)
writer.write_table(table)
else:
for batch in batches:
writer.write_batch(batch)
self.write_stats = writer.stats
writer.close()
return batches
class FileFormatFixture(IpcFixture):
is_file = True
options = None
def _get_writer(self, sink, schema):
return pa.ipc.new_file(sink, schema, options=self.options)
def _check_roundtrip(self, as_table=False):
batches = self.write_batches(as_table=as_table)
file_contents = pa.BufferReader(self.get_source())
reader = pa.ipc.open_file(file_contents)
assert reader.num_record_batches == len(batches)
for i, batch in enumerate(batches):
# it works. Must convert back to DataFrame
batch = reader.get_batch(i)
assert batches[i].equals(batch)
assert reader.schema.equals(batches[0].schema)
assert isinstance(reader.stats, pa.ipc.ReadStats)
assert isinstance(self.write_stats, pa.ipc.WriteStats)
assert tuple(reader.stats) == tuple(self.write_stats)
class StreamFormatFixture(IpcFixture):
# ARROW-6474, for testing writing old IPC protocol with 4-byte prefix
use_legacy_ipc_format = False
# ARROW-9395, for testing writing old metadata version
options = None
is_file = False
def _get_writer(self, sink, schema):
return pa.ipc.new_stream(
sink,
schema,
use_legacy_format=self.use_legacy_ipc_format,
options=self.options,
)
class MessageFixture(IpcFixture):
def _get_writer(self, sink, schema):
return pa.RecordBatchStreamWriter(sink, schema)
@pytest.fixture
def ipc_fixture():
return IpcFixture()
@pytest.fixture
def file_fixture():
return FileFormatFixture()
@pytest.fixture
def stream_fixture():
return StreamFormatFixture()
@pytest.fixture(params=[
pytest.param(
pytest.lazy_fixture('file_fixture'),
id='File Format'
),
pytest.param(
pytest.lazy_fixture('stream_fixture'),
id='Stream Format'
)
])
def format_fixture(request):
return request.param
def test_empty_file():
buf = b''
with pytest.raises(pa.ArrowInvalid):
pa.ipc.open_file(pa.BufferReader(buf))
def test_file_simple_roundtrip(file_fixture):
file_fixture._check_roundtrip(as_table=False)
def test_file_write_table(file_fixture):
file_fixture._check_roundtrip(as_table=True)
@pytest.mark.parametrize("sink_factory", [
lambda: io.BytesIO(),
lambda: pa.BufferOutputStream()
])
def test_file_read_all(sink_factory):
fixture = FileFormatFixture(sink_factory)
batches = fixture.write_batches()
file_contents = pa.BufferReader(fixture.get_source())
reader = pa.ipc.open_file(file_contents)
result = reader.read_all()
expected = pa.Table.from_batches(batches)
assert result.equals(expected)
def test_open_file_from_buffer(file_fixture):
# ARROW-2859; APIs accept the buffer protocol
file_fixture.write_batches()
source = file_fixture.get_source()
reader1 = pa.ipc.open_file(source)
reader2 = pa.ipc.open_file(pa.BufferReader(source))
reader3 = pa.RecordBatchFileReader(source)
result1 = reader1.read_all()
result2 = reader2.read_all()
result3 = reader3.read_all()
assert result1.equals(result2)
assert result1.equals(result3)
st1 = reader1.stats
assert st1.num_messages == 6
assert st1.num_record_batches == 5
assert reader2.stats == st1
assert reader3.stats == st1
@pytest.mark.pandas
def test_file_read_pandas(file_fixture):
frames = [batch.to_pandas() for batch in file_fixture.write_batches()]
file_contents = pa.BufferReader(file_fixture.get_source())
reader = pa.ipc.open_file(file_contents)
result = reader.read_pandas()
expected = pd.concat(frames).reset_index(drop=True)
assert_frame_equal(result, expected)
def test_file_pathlib(file_fixture, tmpdir):
file_fixture.write_batches()
source = file_fixture.get_source()
path = tmpdir.join('file.arrow').strpath
with open(path, 'wb') as f:
f.write(source)
t1 = pa.ipc.open_file(pathlib.Path(path)).read_all()
t2 = pa.ipc.open_file(pa.OSFile(path)).read_all()
assert t1.equals(t2)
def test_empty_stream():
buf = io.BytesIO(b'')
with pytest.raises(pa.ArrowInvalid):
pa.ipc.open_stream(buf)
@pytest.mark.pandas
def test_stream_categorical_roundtrip(stream_fixture):
df = pd.DataFrame({
'one': np.random.randn(5),
'two': pd.Categorical(['foo', np.nan, 'bar', 'foo', 'foo'],
categories=['foo', 'bar'],
ordered=True)
})
batch = pa.RecordBatch.from_pandas(df)
with stream_fixture._get_writer(stream_fixture.sink, batch.schema) as wr:
wr.write_batch(batch)
table = (pa.ipc.open_stream(pa.BufferReader(stream_fixture.get_source()))
.read_all())
assert_frame_equal(table.to_pandas(), df)
def test_open_stream_from_buffer(stream_fixture):
# ARROW-2859
stream_fixture.write_batches()
source = stream_fixture.get_source()
reader1 = pa.ipc.open_stream(source)
reader2 = pa.ipc.open_stream(pa.BufferReader(source))
reader3 = pa.RecordBatchStreamReader(source)
result1 = reader1.read_all()
result2 = reader2.read_all()
result3 = reader3.read_all()
assert result1.equals(result2)
assert result1.equals(result3)
st1 = reader1.stats
assert st1.num_messages == 6
assert st1.num_record_batches == 5
assert reader2.stats == st1
assert reader3.stats == st1
assert tuple(st1) == tuple(stream_fixture.write_stats)
@pytest.mark.pandas
def test_stream_write_dispatch(stream_fixture):
# ARROW-1616
df = pd.DataFrame({
'one': np.random.randn(5),
'two': pd.Categorical(['foo', np.nan, 'bar', 'foo', 'foo'],
categories=['foo', 'bar'],
ordered=True)
})
table = pa.Table.from_pandas(df, preserve_index=False)
batch = pa.RecordBatch.from_pandas(df, preserve_index=False)
with stream_fixture._get_writer(stream_fixture.sink, table.schema) as wr:
wr.write(table)
wr.write(batch)
table = (pa.ipc.open_stream(pa.BufferReader(stream_fixture.get_source()))
.read_all())
assert_frame_equal(table.to_pandas(),
pd.concat([df, df], ignore_index=True))
@pytest.mark.pandas
def test_stream_write_table_batches(stream_fixture):
# ARROW-504
df = pd.DataFrame({
'one': np.random.randn(20),
})
b1 = pa.RecordBatch.from_pandas(df[:10], preserve_index=False)
b2 = pa.RecordBatch.from_pandas(df, preserve_index=False)
table = pa.Table.from_batches([b1, b2, b1])
with stream_fixture._get_writer(stream_fixture.sink, table.schema) as wr:
wr.write_table(table, max_chunksize=15)
batches = list(pa.ipc.open_stream(stream_fixture.get_source()))
assert list(map(len, batches)) == [10, 15, 5, 10]
result_table = pa.Table.from_batches(batches)
assert_frame_equal(result_table.to_pandas(),
pd.concat([df[:10], df, df[:10]],
ignore_index=True))
@pytest.mark.parametrize('use_legacy_ipc_format', [False, True])
def test_stream_simple_roundtrip(stream_fixture, use_legacy_ipc_format):
stream_fixture.use_legacy_ipc_format = use_legacy_ipc_format
batches = stream_fixture.write_batches()
file_contents = pa.BufferReader(stream_fixture.get_source())
reader = pa.ipc.open_stream(file_contents)
assert reader.schema.equals(batches[0].schema)
total = 0
for i, next_batch in enumerate(reader):
assert next_batch.equals(batches[i])
total += 1
assert total == len(batches)
with pytest.raises(StopIteration):
reader.read_next_batch()
@pytest.mark.zstd
def test_compression_roundtrip():
sink = io.BytesIO()
values = np.random.randint(0, 3, 10000)
table = pa.Table.from_arrays([values], names=["values"])
options = pa.ipc.IpcWriteOptions(compression='zstd')
with pa.ipc.RecordBatchFileWriter(
sink, table.schema, options=options) as writer:
writer.write_table(table)
len1 = len(sink.getvalue())
sink2 = io.BytesIO()
codec = pa.Codec('zstd', compression_level=5)
options = pa.ipc.IpcWriteOptions(compression=codec)
with pa.ipc.RecordBatchFileWriter(
sink2, table.schema, options=options) as writer:
writer.write_table(table)
len2 = len(sink2.getvalue())
# In theory len2 should be less than len1 but for this test we just want
# to ensure compression_level is being correctly passed down to the C++
# layer so we don't really care if it makes it worse or better
assert len2 != len1
t1 = pa.ipc.open_file(sink).read_all()
t2 = pa.ipc.open_file(sink2).read_all()
assert t1 == t2
def test_write_options():
options = pa.ipc.IpcWriteOptions()
assert options.allow_64bit is False
assert options.use_legacy_format is False
assert options.metadata_version == pa.ipc.MetadataVersion.V5
options.allow_64bit = True
assert options.allow_64bit is True
options.use_legacy_format = True
assert options.use_legacy_format is True
options.metadata_version = pa.ipc.MetadataVersion.V4
assert options.metadata_version == pa.ipc.MetadataVersion.V4
for value in ('V5', 42):
with pytest.raises((TypeError, ValueError)):
options.metadata_version = value
assert options.compression is None
for value in ['lz4', 'zstd']:
if pa.Codec.is_available(value):
options.compression = value
assert options.compression == value
options.compression = value.upper()
assert options.compression == value
options.compression = None
assert options.compression is None
with pytest.raises(TypeError):
options.compression = 0
assert options.use_threads is True
options.use_threads = False
assert options.use_threads is False
if pa.Codec.is_available('lz4'):
options = pa.ipc.IpcWriteOptions(
metadata_version=pa.ipc.MetadataVersion.V4,
allow_64bit=True,
use_legacy_format=True,
compression='lz4',
use_threads=False)
assert options.metadata_version == pa.ipc.MetadataVersion.V4
assert options.allow_64bit is True
assert options.use_legacy_format is True
assert options.compression == 'lz4'
assert options.use_threads is False
def test_write_options_legacy_exclusive(stream_fixture):
with pytest.raises(
ValueError,
match="provide at most one of options and use_legacy_format"):
stream_fixture.use_legacy_ipc_format = True
stream_fixture.options = pa.ipc.IpcWriteOptions()
stream_fixture.write_batches()
@pytest.mark.parametrize('options', [
pa.ipc.IpcWriteOptions(),
pa.ipc.IpcWriteOptions(allow_64bit=True),
pa.ipc.IpcWriteOptions(use_legacy_format=True),
pa.ipc.IpcWriteOptions(metadata_version=pa.ipc.MetadataVersion.V4),
pa.ipc.IpcWriteOptions(use_legacy_format=True,
metadata_version=pa.ipc.MetadataVersion.V4),
])
def test_stream_options_roundtrip(stream_fixture, options):
stream_fixture.use_legacy_ipc_format = None
stream_fixture.options = options
batches = stream_fixture.write_batches()
file_contents = pa.BufferReader(stream_fixture.get_source())
message = pa.ipc.read_message(stream_fixture.get_source())
assert message.metadata_version == options.metadata_version
reader = pa.ipc.open_stream(file_contents)
assert reader.schema.equals(batches[0].schema)
total = 0
for i, next_batch in enumerate(reader):
assert next_batch.equals(batches[i])
total += 1
assert total == len(batches)
with pytest.raises(StopIteration):
reader.read_next_batch()
def test_dictionary_delta(format_fixture):
ty = pa.dictionary(pa.int8(), pa.utf8())
data = [["foo", "foo", None],
["foo", "bar", "foo"], # potential delta
["foo", "bar"], # nothing new
["foo", None, "bar", "quux"], # potential delta
["bar", "quux"], # replacement
]
batches = [
pa.RecordBatch.from_arrays([pa.array(v, type=ty)], names=['dicts'])
for v in data]
batches_delta_only = batches[:4]
schema = batches[0].schema
def write_batches(batches, as_table=False):
with format_fixture._get_writer(pa.MockOutputStream(),
schema) as writer:
if as_table:
table = pa.Table.from_batches(batches)
writer.write_table(table)
else:
for batch in batches:
writer.write_batch(batch)
return writer.stats
if format_fixture.is_file:
# File format cannot handle replacement
with pytest.raises(pa.ArrowInvalid):
write_batches(batches)
# File format cannot handle delta if emit_deltas
# is not provided
with pytest.raises(pa.ArrowInvalid):
write_batches(batches_delta_only)
else:
st = write_batches(batches)
assert st.num_record_batches == 5
assert st.num_dictionary_batches == 4
assert st.num_replaced_dictionaries == 3
assert st.num_dictionary_deltas == 0
format_fixture.use_legacy_ipc_format = None
format_fixture.options = pa.ipc.IpcWriteOptions(
emit_dictionary_deltas=True)
if format_fixture.is_file:
# File format cannot handle replacement
with pytest.raises(pa.ArrowInvalid):
write_batches(batches)
else:
st = write_batches(batches)
assert st.num_record_batches == 5
assert st.num_dictionary_batches == 4
assert st.num_replaced_dictionaries == 1
assert st.num_dictionary_deltas == 2
st = write_batches(batches_delta_only)
assert st.num_record_batches == 4
assert st.num_dictionary_batches == 3
assert st.num_replaced_dictionaries == 0
assert st.num_dictionary_deltas == 2
format_fixture.options = pa.ipc.IpcWriteOptions(
unify_dictionaries=True
)
st = write_batches(batches, as_table=True)
assert st.num_record_batches == 5
if format_fixture.is_file:
assert st.num_dictionary_batches == 1
assert st.num_replaced_dictionaries == 0
assert st.num_dictionary_deltas == 0
else:
assert st.num_dictionary_batches == 4
assert st.num_replaced_dictionaries == 3
assert st.num_dictionary_deltas == 0
def test_envvar_set_legacy_ipc_format():
schema = pa.schema([pa.field('foo', pa.int32())])
writer = pa.ipc.new_stream(pa.BufferOutputStream(), schema)
assert not writer._use_legacy_format
assert writer._metadata_version == pa.ipc.MetadataVersion.V5
writer = pa.ipc.new_file(pa.BufferOutputStream(), schema)
assert not writer._use_legacy_format
assert writer._metadata_version == pa.ipc.MetadataVersion.V5
with changed_environ('ARROW_PRE_0_15_IPC_FORMAT', '1'):
writer = pa.ipc.new_stream(pa.BufferOutputStream(), schema)
assert writer._use_legacy_format
assert writer._metadata_version == pa.ipc.MetadataVersion.V5
writer = pa.ipc.new_file(pa.BufferOutputStream(), schema)
assert writer._use_legacy_format
assert writer._metadata_version == pa.ipc.MetadataVersion.V5
with changed_environ('ARROW_PRE_1_0_METADATA_VERSION', '1'):
writer = pa.ipc.new_stream(pa.BufferOutputStream(), schema)
assert not writer._use_legacy_format
assert writer._metadata_version == pa.ipc.MetadataVersion.V4
writer = pa.ipc.new_file(pa.BufferOutputStream(), schema)
assert not writer._use_legacy_format
assert writer._metadata_version == pa.ipc.MetadataVersion.V4
with changed_environ('ARROW_PRE_1_0_METADATA_VERSION', '1'):
with changed_environ('ARROW_PRE_0_15_IPC_FORMAT', '1'):
writer = pa.ipc.new_stream(pa.BufferOutputStream(), schema)
assert writer._use_legacy_format
assert writer._metadata_version == pa.ipc.MetadataVersion.V4
writer = pa.ipc.new_file(pa.BufferOutputStream(), schema)
assert writer._use_legacy_format
assert writer._metadata_version == pa.ipc.MetadataVersion.V4
def test_stream_read_all(stream_fixture):
batches = stream_fixture.write_batches()
file_contents = pa.BufferReader(stream_fixture.get_source())
reader = pa.ipc.open_stream(file_contents)
result = reader.read_all()
expected = pa.Table.from_batches(batches)
assert result.equals(expected)
@pytest.mark.pandas
def test_stream_read_pandas(stream_fixture):
frames = [batch.to_pandas() for batch in stream_fixture.write_batches()]
file_contents = stream_fixture.get_source()
reader = pa.ipc.open_stream(file_contents)
result = reader.read_pandas()
expected = pd.concat(frames).reset_index(drop=True)
assert_frame_equal(result, expected)
@pytest.fixture
def example_messages(stream_fixture):
batches = stream_fixture.write_batches()
file_contents = stream_fixture.get_source()
buf_reader = pa.BufferReader(file_contents)
reader = pa.MessageReader.open_stream(buf_reader)
return batches, list(reader)
def test_message_ctors_no_segfault():
with pytest.raises(TypeError):
repr(pa.Message())
with pytest.raises(TypeError):
repr(pa.MessageReader())
def test_message_reader(example_messages):
_, messages = example_messages
assert len(messages) == 6
assert messages[0].type == 'schema'
assert isinstance(messages[0].metadata, pa.Buffer)
assert isinstance(messages[0].body, pa.Buffer)
assert messages[0].metadata_version == pa.MetadataVersion.V5
for msg in messages[1:]:
assert msg.type == 'record batch'
assert isinstance(msg.metadata, pa.Buffer)
assert isinstance(msg.body, pa.Buffer)
assert msg.metadata_version == pa.MetadataVersion.V5
def test_message_serialize_read_message(example_messages):
_, messages = example_messages
msg = messages[0]
buf = msg.serialize()
reader = pa.BufferReader(buf.to_pybytes() * 2)
restored = pa.ipc.read_message(buf)
restored2 = pa.ipc.read_message(reader)
restored3 = pa.ipc.read_message(buf.to_pybytes())
restored4 = pa.ipc.read_message(reader)
assert msg.equals(restored)
assert msg.equals(restored2)
assert msg.equals(restored3)
assert msg.equals(restored4)
with pytest.raises(pa.ArrowInvalid, match="Corrupted message"):
pa.ipc.read_message(pa.BufferReader(b'ab'))
with pytest.raises(EOFError):
pa.ipc.read_message(reader)
@pytest.mark.gzip
def test_message_read_from_compressed(example_messages):
# Part of ARROW-5910
_, messages = example_messages
for message in messages:
raw_out = pa.BufferOutputStream()
with pa.output_stream(raw_out, compression='gzip') as compressed_out:
message.serialize_to(compressed_out)
compressed_buf = raw_out.getvalue()
result = pa.ipc.read_message(pa.input_stream(compressed_buf,
compression='gzip'))
assert result.equals(message)
def test_message_read_record_batch(example_messages):
batches, messages = example_messages
for batch, message in zip(batches, messages[1:]):
read_batch = pa.ipc.read_record_batch(message, batch.schema)
assert read_batch.equals(batch)
def test_read_record_batch_on_stream_error_message():
# ARROW-5374
batch = pa.record_batch([pa.array([b"foo"], type=pa.utf8())],
names=['strs'])
stream = pa.BufferOutputStream()
with pa.ipc.new_stream(stream, batch.schema) as writer:
writer.write_batch(batch)
buf = stream.getvalue()
with pytest.raises(IOError,
match="type record batch but got schema"):
pa.ipc.read_record_batch(buf, batch.schema)
# ----------------------------------------------------------------------
# Socket streaming testa
class StreamReaderServer(threading.Thread):
def init(self, do_read_all):
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._sock.bind(('127.0.0.1', 0))
self._sock.listen(1)
host, port = self._sock.getsockname()
self._do_read_all = do_read_all
self._schema = None
self._batches = []
self._table = None
return port
def run(self):
connection, client_address = self._sock.accept()
try:
source = connection.makefile(mode='rb')
reader = pa.ipc.open_stream(source)
self._schema = reader.schema
if self._do_read_all:
self._table = reader.read_all()
else:
for i, batch in enumerate(reader):
self._batches.append(batch)
finally:
connection.close()
def get_result(self):
return(self._schema, self._table if self._do_read_all
else self._batches)
class SocketStreamFixture(IpcFixture):
def __init__(self):
# XXX(wesm): test will decide when to start socket server. This should
# probably be refactored
pass
def start_server(self, do_read_all):
self._server = StreamReaderServer()
port = self._server.init(do_read_all)
self._server.start()
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._sock.connect(('127.0.0.1', port))
self.sink = self.get_sink()
def stop_and_get_result(self):
import struct
self.sink.write(struct.pack('Q', 0))
self.sink.flush()
self._sock.close()
self._server.join()
return self._server.get_result()
def get_sink(self):
return self._sock.makefile(mode='wb')
def _get_writer(self, sink, schema):
return pa.RecordBatchStreamWriter(sink, schema)
@pytest.fixture
def socket_fixture():
return SocketStreamFixture()
def test_socket_simple_roundtrip(socket_fixture):
socket_fixture.start_server(do_read_all=False)
writer_batches = socket_fixture.write_batches()
reader_schema, reader_batches = socket_fixture.stop_and_get_result()
assert reader_schema.equals(writer_batches[0].schema)
assert len(reader_batches) == len(writer_batches)
for i, batch in enumerate(writer_batches):
assert reader_batches[i].equals(batch)
def test_socket_read_all(socket_fixture):
socket_fixture.start_server(do_read_all=True)
writer_batches = socket_fixture.write_batches()
_, result = socket_fixture.stop_and_get_result()
expected = pa.Table.from_batches(writer_batches)
assert result.equals(expected)
# ----------------------------------------------------------------------
# Miscellaneous IPC tests
@pytest.mark.pandas
def test_ipc_file_stream_has_eos():
# ARROW-5395
df = pd.DataFrame({'foo': [1.5]})
batch = pa.RecordBatch.from_pandas(df)
sink = pa.BufferOutputStream()
write_file(batch, sink)
buffer = sink.getvalue()
# skip the file magic
reader = pa.ipc.open_stream(buffer[8:])
# will fail if encounters footer data instead of eos
rdf = reader.read_pandas()
| assert_frame_equal(df, rdf) | pandas.testing.assert_frame_equal |
import unittest
import pandas as pd
from pandas.util.testing import assert_series_equal
import numpy as np
from easyframes.easyframes import hhkit
class TestStataMerge(unittest.TestCase):
def setUp(self):
"""
df_original = pd.read_csv('sample_hh_dataset.csv')
df = df_original.copy()
print(df.to_dict())
"""
self.df_master = pd.DataFrame(
{'educ': {0: 'secondary', 1: 'bachelor', 2: 'primary', 3: 'higher', 4: 'bachelor', 5: 'secondary',
6: 'higher', 7: 'higher', 8: 'primary', 9: 'primary'},
'hh': {0: 1, 1: 1, 2: 1, 3: 2, 4: 3, 5: 3, 6: 4, 7: 4, 8: 4, 9: 4},
'id': {0: 1, 1: 2, 2: 3, 3: 1, 4: 1, 5: 2, 6: 1, 7: 2, 8: 3, 9: 4},
'has_car': {0: 1, 1: 1, 2: 1, 3: 1, 4: 0, 5: 0, 6: 1, 7: 1, 8: 1, 9: 1},
'weighthh': {0: 2, 1: 2, 2: 2, 3: 3, 4: 2, 5: 2, 6: 3, 7: 3, 8: 3, 9: 3},
'house_rooms': {0: 3, 1: 3, 2: 3, 3: 2, 4: 1, 5: 1, 6: 3, 7: 3, 8: 3, 9: 3},
'prov': {0: 'BC', 1: 'BC', 2: 'BC', 3: 'Alberta', 4: 'BC', 5: 'BC', 6: 'Alberta',
7: 'Alberta', 8: 'Alberta', 9: 'Alberta'},
'age': {0: 44, 1: 43, 2: 13, 3: 70, 4: 23, 5: 20, 6: 37, 7: 35, 8: 8, 9: 15},
'fridge': {0: 'yes', 1: 'yes', 2: 'yes', 3: 'no', 4: 'yes', 5: 'yes', 6: 'no',
7: 'no', 8: 'no', 9: 'no'},
'male': {0: 1, 1: 0, 2: 1, 3: 1, 4: 1, 5: 0, 6: 1, 7: 0, 8: 0, 9: 0}})
self.df_using_hh = pd.DataFrame(
{'hh': {0: 2, 1: 4, 2: 5, 3: 6, 4: 7},
'has_fence': {0: 1, 1: 0, 2: 1, 3: 1, 4: 0}
})
self.df_using_ind = pd.DataFrame(
{'empl': {0: 'not employed', 1: 'full-time', 2: 'part-time', 3: 'part-time', 4: 'full-time', 5: 'part-time',
6: 'self-employed', 7: 'full-time', 8: 'self-employed'},
'hh': {0: 1, 1: 1, 2: 1, 3: 2, 4: 5, 5: 5, 6: 4, 7: 4, 8: 4},
'id': {0: 1, 1: 2, 2: 4, 3: 1, 4: 1, 5: 2, 6: 1, 7: 2, 8: 5}
})
# @unittest.skip("demonstrating skipping")
def test_new_columns_added_merging_hh_level(self):
myhhkit = hhkit(self.df_master)
# myhhkit.from_dict(self.df_master)
myhhkit_using_hh = hhkit(self.df_using_hh)
# myhhkit_using_hh.from_dict(self.df_using_hh)
myhhkit.statamerge(myhhkit_using_hh, on=['hh'])
list_of_columns = myhhkit.df.columns.values.tolist()
self.assertIn('has_fence',list_of_columns)
# also check that the values are correct
correct_values = pd.Series([np.nan, np.nan, np.nan, 1, np.nan, np.nan, 0, 0, 0, 0, 1, 1, 0])
assert_series_equal(correct_values, myhhkit.df['has_fence'])
# @unittest.skip("demonstrating skipping")
def test_new_columns_added_merging_ind_level(self):
myhhkit = hhkit(self.df_master)
# myhhkit.from_dict(self.df_master)
myhhkit_using_ind = hhkit(self.df_using_ind)
# myhhkit_using_ind.from_dict(self.df_using_ind)
myhhkit.statamerge(myhhkit_using_ind, on=['hh','id'])
list_of_columns = myhhkit.df.columns.values.tolist()
self.assertIn('empl',list_of_columns)
# also check that the values are correct
correct_values = pd.Series(['not employed', 'full-time', np.nan, 'part-time', np.nan, np.nan,
'self-employed', 'full-time', np.nan, np.nan, 'part-time', 'full-time', 'part-time', 'self-employed'])
assert_series_equal(correct_values, myhhkit.df['empl'])
# @unittest.skip("demonstrating skipping")
def test_check_proper_merged_variable_created_and_is_correct_hh_level(self):
myhhkit = hhkit(self.df_master)
# myhhkit.from_dict(self.df_master)
myhhkit_using_hh = hhkit(self.df_using_hh)
# myhhkit_using_hh.from_dict(self.df_using_hh)
correct_values = pd.Series([1, 1, 1, 3, 1, 1, 3, 3, 3, 3, 2, 2, 2])
myhhkit.statamerge(myhhkit_using_hh, on=['hh'], mergevarname='_merge_hh')
assert_series_equal(correct_values, myhhkit.df['_merge_hh'])
def test_check_proper_merged_variable_created_and_is_correct_ind_level(self):
myhhkit = hhkit(self.df_master)
# myhhkit.from_dict(self.df_master)
myhhkit_using_ind = hhkit(self.df_using_ind)
# myhhkit_using_ind.from_dict(self.df_using_ind)
correct_values = | pd.Series([3, 3, 1, 3, 1, 1, 3, 3, 1, 1, 2, 2, 2, 2]) | pandas.Series |
from openpyxl import Workbook
from openpyxl.cell.cell import Cell
from openpyxl.styles import Alignment, Font, PatternFill, Border, Side
from openpyxl.utils.dataframe import dataframe_to_rows
import pandas as pd
import re
import os
from _Classes.PyscopusModified import ScopusModified
from _Funções_e_Valores.verify_authors import search_authors_list, treat_exceptions
from _Funções_e_Valores.values import ND, EXCEL_FILE_NAME, HAS_EVENTS, FILE
from _Classes.Graph import Graphs, Graphs_Proceedings_Journals
class ExcelFile(Workbook):
def __init__(self, data):
super(ExcelFile, self).__init__()
self.reports = data.reports
self.authors = pd.DataFrame(data.authors_dict)
self.art_prof = data.art_prof
self.artppg = data.artppg
self.averages = data.authors_average
self.irestritos_2016 = data.irestritos_2016
self.igerais_2016 = data.igerais_2016
self.irestritos_2019 = data.irestritos_2019
self.igerais_2019 = data.igerais_2019
self.authors_indicators_2016 = data.authors_indicators_2016
self.authors_indicators_2019 = data.authors_indicators_2019
self.general_indicators_2016 = data.general_indicators_2016
self.general_indicators_2019 = data.general_indicators_2019
self.authors_indicators_2016_journals = data.authors_indicators_2016_journals
self.authors_indicators_2019_journals = data.authors_indicators_2019_journals
self.general_indicators_2016_journals = data.general_indicators_2016_journals
self.general_indicators_2019_journals = data.general_indicators_2019_journals
self.authors_indicators_2016_proceedings = data.authors_indicators_2016_proceedings
self.authors_indicators_2019_proceedings = data.authors_indicators_2019_proceedings
self.general_indicators_2016_proceedings = data.general_indicators_2016_proceedings
self.general_indicators_2019_proceedings = data.general_indicators_2019_proceedings
self.egress_list = data.egress_list
self.students_list = data.students_list
self.exceptions = data.exceptions
self.journals_upperstrata_2019 = data.journals_upperstrata_2019
self.journals_upperstrata_SE_2019 = data.journals_upperstrata_SE_2019
self.journals_upperstrata_2016 = data.journals_upperstrata_2016
self.journals_upperstrata_SE_2016 = data.journals_upperstrata_SE_2016
self.journals = data.journals
self.proceedings = data.proceedings
self.journal_metrics_2019 = data.journal_metrics_2019
self.journal_metrics_2016 = data.journal_metrics_2016
self.proceedings_metrics_2019 = data.proceedings_metrics_2019
self.proceedings_metrics_2016 = data.proceedings_metrics_2016
for pos, egress in enumerate(self.egress_list):
self.egress_list[pos].name = treat_exceptions(egress.name.strip())
for pos, student in enumerate(self.students_list):
self.students_list[pos].name = treat_exceptions(student.name.strip())
self.add_info()
# self.altera_authors()
self.apply_style()
# self.converte_valores()
self.apply_dimensions()
self.apply_colors()
self.apply_filters()
def styled_cells(self, data, ws, paint=True, qualis_year=None): # Apply a specific style to the cell
for c in data:
c = Cell(ws, column="A", row=1, value=c)
if c.value != None and str(c.value) != "nan":
if c.value == "Porcentagem alunos/egressos":
c.value = "Porcentagem" # Change the name of the cell
if data[0] in ["Periódicos", "A1-A4", "A1", "A2", "A3", "A4", "Irestrito", "Irestrito Periódicos", "Irestrito Anais"]: # Check the cell column
c.font = Font(color='FFFAFA') # Change the color of the text (to white)
c.fill = PatternFill(fill_type='solid', start_color='00B050', end_color='00B050') # Change the background color of the cell (to green)
elif qualis_year == "2016" and data[0] in ["A1-B1", "B1"]:
c.font = Font(color='FFFAFA') # Change the color of the text (to white)
c.fill = PatternFill(fill_type='solid', start_color='00B050', end_color='00B050') # Change the background color of the cell (to green)
elif data[0] != "Outros" and data[0] != "Número médio de docentes" and paint == True: # Check the cell column
c.fill = PatternFill(fill_type='solid', start_color='FFFFCC', end_color='FFFFCC') # Change the background color of the cell (simmilar to yellow)
if c.value in ["Tipo/Qualis CC 2016", "Tipo/Qualis 2019", "Quantidade", "Porcentagem", "Quantidade com alunos/egressos", "% Alunos/Egressos", "Índice", "Acumulado", "Média por docente", "Número médio de docentes", "Nome Periódico", "Qualis 2019/ISSN Impresso", "Qualis 2019/ISSN Online", "Métrica CC 2016", "Métrica 2019", "Qtd.", "Qtd. %", "Periódicos e Anais - Qualis 2016", "Periódicos e Anais - Qualis 2019", "Periódicos - Qualis 2016", "Periódicos - Qualis 2019", "Anais - Qualis 2016", "Anais - Qualis 2019"]:
c.font = Font(bold=True) # Set the text bold
if c.value != "Número médio de docentes" and c.value != "Periódicos e Anais - Qualis 2016" and c.value != "Periódicos e Anais - Qualis 2019":
bd = Side(border_style="thin", color="000000") # Black border
c.border = Border(left=bd, top=bd, right=bd, bottom=bd) # Set the border
c.alignment = Alignment(horizontal='center', vertical='center') # Center alignment
yield c
def add_parameters_sheet(self):
ws = self.active # First sheet
ws.title = 'Parâmetros'
ws.append(["Estrato", "Peso"]) # Add two columns
# Columns values:
strata_list = ["A1", "A2", "A3", "A4", "B1", "B2", "B3", "B4", ]
weights = [1.000, 0.875, 0.750, 0.625, 0.500, 0.200, 0.100, 0.050, ]
for pos, strata in enumerate(strata_list):
ws.append([strata, weights[pos]]) # Add the values to the columns
ws.append([None, None]) # Blank line
ws.append(self.styled_cells(["Número médio de docentes"], ws)) # Add ND
ws.append(["ND", ND])
def add_indicators_sheet(self):
ws = self.create_sheet("Indicadores")
ws.append(["Qualis 2013-2016"])
ws.append(["Irestrito", "Acumulado", "Médio", None, "Igeral", "Acumulado", "Médio"]) # Add two columns
for pos, item in enumerate(self.irestritos_2016.items()):
item = list(item)
mean = round(item[1]/ND, 2)
mean = str(mean)
mean = mean.replace('.', ',')
item.append(mean)
item.append(None)
item.append(list(self.igerais_2016.items())[pos][0])
item.append(list(self.igerais_2016.items())[pos][1])
mean_2 = float(str(list(self.igerais_2016.items())[pos][1]).replace(',', '.'))
mean_2 = round(mean_2/ND, 2)
mean_2 = str(mean_2).replace('.', ',')
item.append(mean_2)
ws.append(item)
ws.append([None, None, None, None, None, None, None])
ws.append(["Qualis 2017-2020"])
ws.append(["Irestrito", "Acumulado", "Médio", None, "Igeral", "Acumulado", "Médio"]) # Add two columns
for pos, item in enumerate(self.irestritos_2019.items()):
item = list(item)
mean = round(item[1]/ND, 2)
mean = str(mean)
mean = mean.replace('.', ',')
item.append(mean)
item.append(None)
item.append(list(self.igerais_2019.items())[pos][0])
item.append(list(self.igerais_2019.items())[pos][1])
mean_2 = float(str(list(self.igerais_2019.items())[pos][1]).replace(',', '.'))
mean_2 = round(mean_2/ND, 2)
mean_2 = str(mean_2).replace('.', ',')
item.append(mean_2)
ws.append(item)
def add_authors_sheet(self):
ws = self.create_sheet("Autores") # Sheet with a list of authors
self.authors = self.authors.rename(columns={"Author": "Autor"})
for row in dataframe_to_rows(self.authors, index=False, header=True): # Add the dataframe rows to the sheet
ws.append(row)
def get_authors_list(self): # Get a list with the authors names
authors_list = []
for pos, author in enumerate(self.reports["Author"]):
if FILE == "EGRESSOS 2017-2020":
first_name = author.split(" ")[0]
second_name = author.split(" ")[1]
if len(second_name) < 3:
second_name = second_name + " " + author.split(" ")[2]
authors_list.append(first_name + " " + second_name)
else:
if author.split(" ")[0] not in authors_list: # Add the author first name to the list if its not already in there
authors_list.append(author.split(" ")[0])
else:
# If there's already an author with this name both of them will also carry their second name
found = False
for author2 in self.reports["Author"]:
if author2.split(" ")[0] == author.split(" ")[0] and found == False:
found = True
for pos, autor3 in enumerate(authors_list):
if autor3 == author2.split(" ")[0]:
authors_list[pos] = f"{author2.split(' ')[0]} {author2.split(' ')[1]}"
authors_list.append(f"{author.split(' ')[0]} {author.split(' ')[1]}")
return authors_list
def add_graphs_sheet(self, authors_list):
authors_indicators_2016_copy = []
for table in self.authors_indicators_2016:
authors_indicators_2016_copy.append(table.copy()) # Creates a copy of each table and add to the indicators_copy list
for pos, table in enumerate(authors_indicators_2016_copy):
name_list = []
for i in range(len(table.index)):
name_list.append(authors_list[pos])
table["Nome Autor"] = name_list # Add a list of authors names to the table
authors_indicators_2016_copy[pos] = table
authors_indicators_2016_copy = pd.concat(authors_indicators_2016_copy, ignore_index=True, sort=False)
authors_indicators_2019_copy = []
for table in self.authors_indicators_2019:
authors_indicators_2019_copy.append(table.copy()) # Creates a copy of each table and add to the indicators_copy list
for pos, table in enumerate(authors_indicators_2019_copy):
name_list = []
for i in range(len(table.index)):
name_list.append(authors_list[pos])
table["Nome Autor"] = name_list # Add a list of authors names to the table
authors_indicators_2019_copy[pos] = table
authors_indicators_2019_copy = pd.concat(authors_indicators_2019_copy, ignore_index=True, sort=False)
ws = self.create_sheet("Gráficos Q2016") # Creates the graphs sheet
graphs_2016 = Graphs(authors_indicators_2016_copy, self.journals_upperstrata_2016, self.journals_upperstrata_SE_2016, authors_list, "CC 2016", "temp") # Generates the graphs
ws = graphs_2016.add_graphs(ws) # Add the graphs
ws = self.create_sheet("Gráficos Q2019") # Creates the graphs sheet
graphs_2019 = Graphs(authors_indicators_2019_copy, self.journals_upperstrata_2019, self.journals_upperstrata_2019, authors_list, "2019", "temp2") # Generates the graphs
ws = graphs_2019.add_graphs(ws) # Add the graphs
def build_general_summary(self, ws, authors_list, qualis_year):
ws.append(self.styled_cells([f"Periódicos e Anais - Qualis {qualis_year}", None, None, None, None, None,
None, None, None, None, None, None, None, None, None, None, None, None, None, None, None,
None, None, None, None, None, None], ws, paint=False))
ws.merge_cells('A1:D1')
if qualis_year == "2016":
indicators = self.authors_indicators_2016
summary = pd.DataFrame(columns=["Nome", "Autores/Artigo", "Irestrito", "Igeral", "Periódicos", "Anais", "A1-B1", "A1", "A2", "B1", "B2-B5", "B2", "B3", "B4", "B5", "Outros", "Periódicos A/E", "Anais A/E", "A1-B1 A/E", "A1 A/E", "A2 A/E", "B1 A/E", "B2-B5 A/E", "B2 A/E", "B3 A/E", "B4 A/E", "B5 A/E", "Outros A/E", "Periódicos A1-B1", "Periódicos A1-B1 com alunos/egressos"])
positions = {"Irestrito": 14, "Igeral": 15, "Periódicos": 0, "Anais": 1, "A1-B1": 2, "A1": 3, "A2": 4, "B1":5, "B2-B5": 6, "B2": 7, "B3": 8, "B4": 9, "B5": 10, "Outros": 11}
elif qualis_year == "2019":
indicators = self.authors_indicators_2019
summary = pd.DataFrame(columns=["Nome", "Autores/Artigo", "Irestrito", "Igeral", "Periódicos", "Anais", "A1-A4", "A1", "A2", "A3", "A4", "B1-B4", "B1", "B2", "B3", "B4", "Outros", "Periódicos A/E", "Anais A/E", "A1-A4 A/E", "A1 A/E", "A2 A/E", "A3 A/E", "A4 A/E", "B1-B4 A/E", "B1 A/E", "B2 A/E", "B3 A/E", "B4 A/E", "Outros A/E", "Periódicos A1-A4", "Periódicos A1-A4 com alunos/egressos"])
positions = {"Irestrito": 15, "Igeral": 16, "Periódicos": 0, "Anais": 1, "A1-A4": 2, "A1": 3, "A2": 4, "A3": 5, "A4": 6, "B1-B4": 7, "B1": 8, "B2": 9, "B3": 10, "B4": 11, "Outros": 12}
for pos, table in enumerate(indicators):
row = []
row.append(authors_list[pos])
try:
average = str(self.averages[pos]).replace("Média de autores/artigo = ", "")
row.append(float(average))
except:
row.append("")
for key in positions.keys():
row.append(table["Quantidade"][positions[key]])
for key in positions.keys():
if key != "Irestrito" and key != "Igeral":
try:
row.append(int(table["Quantidade com alunos/egressos"][positions[key]]))
except:
row.append(flaot(table["Quantidade com alunos/egressos"][positions[key]]))
if qualis_year == "2016":
row.append(self.journals_upperstrata_2016[pos])
row.append(self.journals_upperstrata_SE_2016[pos])
elif qualis_year == "2019":
row.append(self.journals_upperstrata_2019[pos])
row.append(self.journals_upperstrata_SE_2019[pos])
summary.loc[len(summary)] = row
row1 = []
row2 = []
for column in summary.columns:
total_ppg = 0
if column != "Autores/Artigo" and column != "Nome" and column != "Irestrito" and column != "Igeral":
for data in summary[column]:
total_ppg += data
doc_ppg = total_ppg/ND
total_ppg = round(total_ppg, 1)
doc_ppg = round(doc_ppg, 1)
elif column == "Nome":
total_ppg = "PPGtotal"
doc_ppg = "PPGdoc"
elif column == "Irestrito":
if qualis_year == "2016":
total_ppg = self.irestritos_2016['Total sem trava']
elif qualis_year == "2019":
total_ppg = self.irestritos_2019['Total sem trava']
doc_ppg = total_ppg/ND
total_ppg = round(total_ppg, 1)
doc_ppg = round(doc_ppg, 1)
elif column == "Igeral":
if qualis_year == "2016":
total_ppg = self.igerais_2016['Total sem trava']
elif qualis_year == "2019":
total_ppg = self.igerais_2019['Total sem trava']
doc_ppg = total_ppg/ND
total_ppg = round(total_ppg, 1)
doc_ppg = round(doc_ppg, 1)
else:
total_ppg = "-"
doc_ppg = "-"
row1.append(total_ppg)
row2.append(doc_ppg)
summary.loc[len(summary)] = row1
summary.loc[len(summary)] = row2
ws.merge_cells('B2:D2')
ws["A2"] = " "
ws["B2"] = "Índices"
if qualis_year == "2016":
ws.merge_cells('E2:P2')
ws["E2"] = "Publicações totais"
ws.merge_cells('Q2:AB2')
ws["Q2"] = "Publicações com alunos/egressos"
ws.merge_cells('AC2:AD2')
ws["AC2"] = " "
else:
ws.merge_cells('E2:Q2')
ws["E2"] = "Publicações totais"
ws.merge_cells('R2:AD2')
ws["R2"] = "Publicações com alunos/egressos"
ws.merge_cells('AE2:AF2')
ws["AE2"] = " "
bd = Side(border_style="thin", color="000000") # Black border
row = list(ws.rows)[1]
for pos, cell in enumerate(row):
cell.border = Border(left=bd, top=bd, right=bd, bottom=bd) # Set the border
summary = pd.DataFrame(summary)
rows_count = 2 # The title + the first row
for row in dataframe_to_rows(summary, index=False, header=True):
ws.append(row)
rows_count += 1
return (ws, rows_count)
def build_separated_summary(self, ws, authors_list, qualis_year, rows_count, indicators, pub_type):
indicators = indicators
if qualis_year == "2016":
summary = pd.DataFrame(columns=["Nome", "Autores/Artigo", "Irestrito", "Igeral", "A1-B1", "A1", "A2", "B1", "B2-B5", "B2", "B3", "B4", "B5", "Outros", "A1-B1 A/E", "A1 A/E", "A2 A/E", "B1 A/E", "B2-B5 A/E", "B2 A/E", "B3 A/E", "B4 A/E", "B5 A/E", "Outros A/E"])
positions = {"Irestrito": 12, "Igeral": 13, "A1-B1": 0, "A1": 1, "A2": 2, "B1":3, "B2-B5": 4, "B2": 5, "B3": 6, "B4": 7, "B5": 8, "Outros": 9}
elif qualis_year == "2019":
summary = pd.DataFrame(columns=["Nome", "Autores/Artigo", "Irestrito", "Igeral", "A1-A4", "A1", "A2", "A3", "A4", "B1-B4", "B1", "B2", "B3", "B4", "Outros", "A1-A4 A/E", "A1 A/E", "A2 A/E", "A3 A/E", "A4 A/E", "B1-B4 A/E", "B1 A/E", "B2 A/E", "B3 A/E", "B4 A/E", "Outros A/E"])
positions = {"Irestrito": 13, "Igeral": 14, "A1-A4": 0, "A1": 1, "A2": 2, "A3": 3, "A4": 4, "B1-B4": 5, "B1": 6, "B2": 7, "B3": 8, "B4": 9, "Outros": 10}
for pos, table in enumerate(indicators):
row = []
row.append(authors_list[pos])
try:
average = str(self.averages[pos]).replace("Média de autores/artigo = ", "")
row.append(float(average))
except:
row.append("")
for key in positions.keys():
row.append(table["Quantidade"][positions[key]])
for key in positions.keys():
if key != "Irestrito" and key != "Igeral":
try:
row.append(int(table["Quantidade com alunos/egressos"][positions[key]]))
except:
row.append(flaot(table["Quantidade com alunos/egressos"][positions[key]]))
summary.loc[len(summary)] = row
row1 = []
row2 = []
for column in summary.columns:
total_ppg = 0
if column != "Autores/Artigo" and column != "Nome" and column != "Irestrito" and column != "Igeral":
for data in summary[column]:
total_ppg += data
doc_ppg = total_ppg/ND
total_ppg = round(total_ppg, 1)
doc_ppg = round(doc_ppg, 1)
elif column == "Nome":
total_ppg = "PPGtotal"
doc_ppg = "PPGdoc"
elif column == "Irestrito":
if qualis_year == "2016":
if pub_type == "journals":
total_ppg = self.irestritos_2016['Periódicos']
elif pub_type == "proceedings":
total_ppg = self.irestritos_2016['Anais sem trava']
elif qualis_year == "2019":
if pub_type == "journals":
total_ppg = self.irestritos_2019['Periódicos']
elif pub_type == "proceedings":
total_ppg = self.irestritos_2019['Anais sem trava']
doc_ppg = total_ppg/ND
total_ppg = round(total_ppg, 1)
doc_ppg = round(doc_ppg, 1)
elif column == "Igeral":
if qualis_year == "2016":
if pub_type == "journals":
total_ppg = self.igerais_2016['Periódicos']
elif pub_type == "proceedings":
total_ppg = self.igerais_2016['Anais sem trava']
elif qualis_year == "2019":
if pub_type == "journals":
total_ppg = self.igerais_2019['Periódicos']
elif pub_type == "proceedings":
total_ppg = self.igerais_2019['Anais sem trava']
doc_ppg = total_ppg/ND
total_ppg = round(total_ppg, 1)
doc_ppg = round(doc_ppg, 1)
else:
total_ppg = "-"
doc_ppg = "-"
row1.append(total_ppg)
row2.append(doc_ppg)
summary.loc[len(summary)] = row1
summary.loc[len(summary)] = row2
ws.merge_cells(f'B{rows_count+1}:D{rows_count+1}')
ws[f"A{rows_count+1}"] = " "
ws[f"B{rows_count+1}"] = "Índices"
if qualis_year == "2016":
ws.merge_cells(f'E{rows_count+1}:N{rows_count+1}')
ws[f"E{rows_count+1}"] = "Publicações totais"
ws.merge_cells(f'O{rows_count+1}:X{rows_count+1}')
ws[f"O{rows_count+1}"] = "Publicações com alunos/egressos"
bd = Side(border_style="thin", color="000000") # Black border
row = list(ws.rows)[rows_count]
for pos, cell in enumerate(row):
if pos < 24:
cell.border = Border(left=bd, top=bd, right=bd, bottom=bd) # Set the border
else:
ws.merge_cells(f'E{rows_count+1}:O{rows_count+1}')
ws[f"E{rows_count+1}"] = "Publicações totais"
ws.merge_cells(f'P{rows_count+1}:Z{rows_count+1}')
ws[f"P{rows_count+1}"] = "Publicações com alunos/egressos"
bd = Side(border_style="thin", color="000000") # Black border
row = list(ws.rows)[rows_count]
for pos, cell in enumerate(row):
if pos < 26:
cell.border = Border(left=bd, top=bd, right=bd, bottom=bd) # Set the border
summary = pd.DataFrame(summary)
rows_count += 1 # The first row
for row in dataframe_to_rows(summary, index=False, header=True):
ws.append(row)
rows_count += 1
return (ws, rows_count)
def add_summary_sheet(self, authors_list, qualis_year):
ws = self.create_sheet(f"Resumo Q{qualis_year}") # Creates the summary sheet
ws, rows_count = self.build_general_summary(ws, authors_list, qualis_year)
self.summary_size = rows_count
ws.append([None, None, None, None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None])
ws.append([None, None, None, None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None])
ws.append(self.styled_cells([f"Periódicos - Qualis {qualis_year}", None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None], ws, paint=False))
ws.merge_cells(f'A{rows_count+3}:D{rows_count+3}')
bd = Side(border_style="thin", color="ffffff")
ws[f"A{rows_count+2}"].border = Border(left=bd, top=bd, right=bd, bottom=bd)
ws[f"A{rows_count+3}"].border = Border(left=bd, top=bd, right=bd, bottom=bd)
ws[f"B{rows_count+3}"].border = Border(left=bd, top=bd, right=bd, bottom=bd)
ws[f"C{rows_count+3}"].border = Border(left=bd, top=bd, right=bd, bottom=bd)
ws[f"D{rows_count+3}"].border = Border(left=bd, top=bd, right=bd, bottom=bd)
rows_count += 3
if qualis_year == "2016":
indicators = self.authors_indicators_2016_journals
else:
indicators = self.authors_indicators_2019_journals
ws, rows_count = self.build_separated_summary(ws, authors_list, qualis_year, rows_count, indicators, pub_type="journals")
ws.append([None, None, None, None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None])
ws.append([None, None, None, None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None])
ws.append(self.styled_cells([f"Anais - Qualis {qualis_year}", None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None], ws, paint=False))
ws.merge_cells(f'A{rows_count+3}:D{rows_count+3}')
bd = Side(border_style="thin", color="ffffff")
ws[f"A{rows_count+3}"].border = Border(left=bd, top=bd, right=bd, bottom=bd)
ws[f"B{rows_count+3}"].border = Border(left=bd, top=bd, right=bd, bottom=bd)
ws[f"C{rows_count+3}"].border = Border(left=bd, top=bd, right=bd, bottom=bd)
ws[f"D{rows_count+3}"].border = Border(left=bd, top=bd, right=bd, bottom=bd)
rows_count += 3
if qualis_year == "2016":
indicators = self.authors_indicators_2016_proceedings
else:
indicators = self.authors_indicators_2019_proceedings
ws, rows_count = self.build_separated_summary(ws, authors_list, qualis_year, rows_count, indicators, pub_type="proceedings")
def add_artprof_sheet(self):
ws = self.create_sheet("Art|Prof")
for row in dataframe_to_rows(self.art_prof, index=False, header=True):
ws.append(row)
def add_artppg_sheet(self):
ws = self.create_sheet("Art|PPG")
for row in dataframe_to_rows(self.artppg, index=False, header=True):
ws.append(row)
# for col in ws.columns:
# if col[0].column_letter == 'I':
# for cell in col:
# if cell.value != " ":
# cell.number_format = "$"
ws.append([""])
for row in dataframe_to_rows(self.general_indicators_2016, index=False, header=True):
ws.append(self.styled_cells(row, ws, qualis_year="2016"))
ws.append([""])
for row in dataframe_to_rows(self.general_indicators_2019, index=False, header=True):
ws.append(self.styled_cells(row, ws, qualis_year="2019"))
ws.append([None, None, None])
ws.append([None, None, None])
ws.append([None, None, None])
def add_proceedingsppg_sheet(self):
ws = self.create_sheet("Anais|PPG")
df = pd.DataFrame()
df["Nome de Publicação"] = self.proceedings["Nome de Publicação"]
df["Sigla"] = self.proceedings["SIGLA"]
df["Qualis CC 2016"] = self.proceedings["Qualis CC 2016"]
df["Qualis 2019"] = self.proceedings["Qualis 2019"]
df["Quantidade"] = self.proceedings["Quantidade"]
sum_ = 0
for i in df["Quantidade"]:
sum_ += i
percentages = []
for i in df["Quantidade"]:
percentages.append(f"{round(100/sum_ * i, 1)}%")
df["Porcentagem"] = percentages
for row in dataframe_to_rows(df, index=False, header=True):
ws.append(row)
ws.append([None])
for row in dataframe_to_rows(self.proceedings_metrics_2016, index=False, header=True):
ws.append(self.styled_cells(row, ws, paint=False))
ws.append([None])
for row in dataframe_to_rows(self.proceedings_metrics_2019, index=False, header=True):
ws.append(self.styled_cells(row, ws, paint=False))
graphs = Graphs_Proceedings_Journals(df.copy(), "Anais de Eventos Utilizados para Publicação")
ws = graphs.add_graphs(ws)
def add_journalsppg_sheet(self):
ws = self.create_sheet("Periódicos|PPG")
df = | pd.DataFrame() | pandas.DataFrame |
"""Test log.py"""
from typing import Any
import pandas as pd # type: ignore
import pytest
from leabra7 import events
from leabra7 import log
from leabra7 import specs
# Test log.DataFrameBuffer
def test_dataframebuffer_can_record_observations() -> None:
dfb = log.DataFrameBuffer()
dfb.append(pd.DataFrame({"unit": [0, 1], "act": [0.5, 0.3]}))
dfb.append( | pd.DataFrame({"unit": [0, 1], "act": [0.6, 0.7]}) | pandas.DataFrame |
import sys
import dask
import dask.dataframe as dd
from distributed import Executor
from distributed.utils_test import cluster, loop, gen_cluster
from distributed.collections import (_futures_to_dask_dataframe,
futures_to_dask_dataframe, _futures_to_dask_array,
futures_to_dask_array, _stack, stack)
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pytest
from toolz import identity
from tornado import gen
from tornado.ioloop import IOLoop
dfs = [pd.DataFrame({'x': [1, 2, 3]}, index=[0, 10, 20]),
pd.DataFrame({'x': [4, 5, 6]}, index=[30, 40, 50]),
pd.DataFrame({'x': [7, 8, 9]}, index=[60, 70, 80])]
def assert_equal(a, b):
assert type(a) == type(b)
if isinstance(a, pd.DataFrame):
tm.assert_frame_equal(a, b)
elif isinstance(a, pd.Series):
| tm.assert_series_equal(a, b) | pandas.util.testing.assert_series_equal |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 9 16:22:47 2020
@author: mrizwan
"""
import pandas as pd
# Initialize dataframes
df1 = pd.read_csv('one.csv')
print(df1)
'''
year name id education
0 2010 andy 101 bachelor
1 2012 peter 102 master
2 2009 mark 103 school
'''
df2 = | pd.read_csv('two.csv') | pandas.read_csv |
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import numpy as np
import pandas as pd
import pytest
try:
import pyarrow as pa
except ImportError: # pragma: no cover
pa = None
try:
import fastparquet as fp
except ImportError: # pragma: no cover
fp = None
from .... import dataframe as md
from .... import tensor as mt
from ...datasource.read_csv import DataFrameReadCSV
from ...datasource.read_sql import DataFrameReadSQL
from ...datasource.read_parquet import DataFrameReadParquet
@pytest.mark.parametrize('chunk_size', [2, (2, 3)])
def test_set_index(setup, chunk_size):
df1 = pd.DataFrame([[1, 3, 3], [4, 2, 6], [7, 8, 9]],
index=['a1', 'a2', 'a3'], columns=['x', 'y', 'z'])
df2 = md.DataFrame(df1, chunk_size=chunk_size)
expected = df1.set_index('y', drop=True)
df3 = df2.set_index('y', drop=True)
pd.testing.assert_frame_equal(
expected, df3.execute().fetch())
expected = df1.set_index('y', drop=False)
df4 = df2.set_index('y', drop=False)
pd.testing.assert_frame_equal(
expected, df4.execute().fetch())
expected = df1.set_index('y')
df2.set_index('y', inplace=True)
pd.testing.assert_frame_equal(
expected, df2.execute().fetch())
def test_iloc_getitem(setup):
df1 = pd.DataFrame([[1, 3, 3], [4, 2, 6], [7, 8, 9]],
index=['a1', 'a2', 'a3'], columns=['x', 'y', 'z'])
df2 = md.DataFrame(df1, chunk_size=2)
# plain index
expected = df1.iloc[1]
df3 = df2.iloc[1]
result = df3.execute(extra_config={'check_series_name': False}).fetch()
pd.testing.assert_series_equal(
expected, result)
# plain index on axis 1
expected = df1.iloc[:2, 1]
df4 = df2.iloc[:2, 1]
pd.testing.assert_series_equal(
expected, df4.execute().fetch())
# slice index
expected = df1.iloc[:, 2:4]
df5 = df2.iloc[:, 2:4]
pd.testing.assert_frame_equal(
expected, df5.execute().fetch())
# plain fancy index
expected = df1.iloc[[0], [0, 1, 2]]
df6 = df2.iloc[[0], [0, 1, 2]]
pd.testing.assert_frame_equal(
expected, df6.execute().fetch())
# plain fancy index with shuffled order
expected = df1.iloc[[0], [1, 2, 0]]
df7 = df2.iloc[[0], [1, 2, 0]]
pd.testing.assert_frame_equal(
expected, df7.execute().fetch())
# fancy index
expected = df1.iloc[[1, 2], [0, 1, 2]]
df8 = df2.iloc[[1, 2], [0, 1, 2]]
pd.testing.assert_frame_equal(
expected, df8.execute().fetch())
# fancy index with shuffled order
expected = df1.iloc[[2, 1], [1, 2, 0]]
df9 = df2.iloc[[2, 1], [1, 2, 0]]
pd.testing.assert_frame_equal(
expected, df9.execute().fetch())
# one fancy index
expected = df1.iloc[[2, 1]]
df10 = df2.iloc[[2, 1]]
pd.testing.assert_frame_equal(
expected, df10.execute().fetch())
# plain index
expected = df1.iloc[1, 2]
df11 = df2.iloc[1, 2]
assert expected == df11.execute().fetch()
# bool index array
expected = df1.iloc[[True, False, True], [2, 1]]
df12 = df2.iloc[[True, False, True], [2, 1]]
pd.testing.assert_frame_equal(
expected, df12.execute().fetch())
# bool index array on axis 1
expected = df1.iloc[[2, 1], [True, False, True]]
df14 = df2.iloc[[2, 1], [True, False, True]]
pd.testing.assert_frame_equal(
expected, df14.execute().fetch())
# bool index
expected = df1.iloc[[True, False, True], [2, 1]]
df13 = df2.iloc[md.Series([True, False, True], chunk_size=1), [2, 1]]
pd.testing.assert_frame_equal(
expected, df13.execute().fetch())
# test Series
data = pd.Series(np.arange(10))
series = md.Series(data, chunk_size=3).iloc[:3]
pd.testing.assert_series_equal(
series.execute().fetch(), data.iloc[:3])
series = md.Series(data, chunk_size=3).iloc[4]
assert series.execute().fetch() == data.iloc[4]
series = md.Series(data, chunk_size=3).iloc[[2, 3, 4, 9]]
pd.testing.assert_series_equal(
series.execute().fetch(), data.iloc[[2, 3, 4, 9]])
series = md.Series(data, chunk_size=3).iloc[[4, 3, 9, 2]]
pd.testing.assert_series_equal(
series.execute().fetch(), data.iloc[[4, 3, 9, 2]])
series = md.Series(data).iloc[5:]
pd.testing.assert_series_equal(
series.execute().fetch(), data.iloc[5:])
# bool index array
selection = np.random.RandomState(0).randint(2, size=10, dtype=bool)
series = md.Series(data).iloc[selection]
pd.testing.assert_series_equal(
series.execute().fetch(), data.iloc[selection])
# bool index
series = md.Series(data).iloc[md.Series(selection, chunk_size=4)]
pd.testing.assert_series_equal(
series.execute().fetch(), data.iloc[selection])
# test index
data = pd.Index(np.arange(10))
index = md.Index(data, chunk_size=3)[:3]
pd.testing.assert_index_equal(
index.execute().fetch(), data[:3])
index = md.Index(data, chunk_size=3)[4]
assert index.execute().fetch() == data[4]
index = md.Index(data, chunk_size=3)[[2, 3, 4, 9]]
pd.testing.assert_index_equal(
index.execute().fetch(), data[[2, 3, 4, 9]])
index = md.Index(data, chunk_size=3)[[4, 3, 9, 2]]
pd.testing.assert_index_equal(
index.execute().fetch(), data[[4, 3, 9, 2]])
index = md.Index(data)[5:]
pd.testing.assert_index_equal(
index.execute().fetch(), data[5:])
# bool index array
selection = np.random.RandomState(0).randint(2, size=10, dtype=bool)
index = md.Index(data)[selection]
pd.testing.assert_index_equal(
index.execute().fetch(), data[selection])
index = md.Index(data)[mt.tensor(selection, chunk_size=4)]
pd.testing.assert_index_equal(
index.execute().fetch(), data[selection])
def test_iloc_setitem(setup):
df1 = pd.DataFrame([[1, 3, 3], [4, 2, 6], [7, 8, 9]],
index=['a1', 'a2', 'a3'], columns=['x', 'y', 'z'])
df2 = md.DataFrame(df1, chunk_size=2)
# plain index
expected = df1
expected.iloc[1] = 100
df2.iloc[1] = 100
pd.testing.assert_frame_equal(
expected, df2.execute().fetch())
# slice index
expected.iloc[:, 2:4] = 1111
df2.iloc[:, 2:4] = 1111
pd.testing.assert_frame_equal(
expected, df2.execute().fetch())
# plain fancy index
expected.iloc[[0], [0, 1, 2]] = 2222
df2.iloc[[0], [0, 1, 2]] = 2222
pd.testing.assert_frame_equal(
expected, df2.execute().fetch())
# fancy index
expected.iloc[[1, 2], [0, 1, 2]] = 3333
df2.iloc[[1, 2], [0, 1, 2]] = 3333
pd.testing.assert_frame_equal(
expected, df2.execute().fetch())
# plain index
expected.iloc[1, 2] = 4444
df2.iloc[1, 2] = 4444
pd.testing.assert_frame_equal(
expected, df2.execute().fetch())
# test Series
data = pd.Series(np.arange(10))
series = md.Series(data, chunk_size=3)
series.iloc[:3] = 1
data.iloc[:3] = 1
pd.testing.assert_series_equal(
series.execute().fetch(), data)
series.iloc[4] = 2
data.iloc[4] = 2
pd.testing.assert_series_equal(
series.execute().fetch(), data)
series.iloc[[2, 3, 4, 9]] = 3
data.iloc[[2, 3, 4, 9]] = 3
pd.testing.assert_series_equal(
series.execute().fetch(), data)
series.iloc[5:] = 4
data.iloc[5:] = 4
pd.testing.assert_series_equal(
series.execute().fetch(), data)
# test Index
data = pd.Index(np.arange(10))
index = md.Index(data, chunk_size=3)
with pytest.raises(TypeError):
index[5:] = 4
def test_loc_getitem(setup):
rs = np.random.RandomState(0)
# index and columns are labels
raw1 = pd.DataFrame(rs.randint(10, size=(5, 4)),
index=['a1', 'a2', 'a3', 'a4', 'a5'],
columns=['a', 'b', 'c', 'd'])
# columns are labels
raw2 = raw1.copy()
raw2.reset_index(inplace=True, drop=True)
# columns are non unique and monotonic
raw3 = raw1.copy()
raw3.columns = ['a', 'b', 'b', 'd']
# columns are non unique and non monotonic
raw4 = raw1.copy()
raw4.columns = ['b', 'a', 'b', 'd']
# index that is timestamp
raw5 = raw1.copy()
raw5.index = pd.date_range('2020-1-1', periods=5)
raw6 = raw1[:0]
df1 = md.DataFrame(raw1, chunk_size=2)
df2 = md.DataFrame(raw2, chunk_size=2)
df3 = md.DataFrame(raw3, chunk_size=2)
df4 = md.DataFrame(raw4, chunk_size=2)
df5 = md.DataFrame(raw5, chunk_size=2)
df6 = md.DataFrame(raw6)
df = df2.loc[3, 'b']
result = df.execute().fetch()
expected = raw2.loc[3, 'b']
assert result == expected
df = df1.loc['a3', 'b']
result = df.execute(extra_config={'check_shape': False}).fetch()
expected = raw1.loc['a3', 'b']
assert result == expected
# test empty list
df = df1.loc[[]]
result = df.execute().fetch()
expected = raw1.loc[[]]
pd.testing.assert_frame_equal(result, expected)
df = df2.loc[[]]
result = df.execute().fetch()
expected = raw2.loc[[]]
pd.testing.assert_frame_equal(result, expected)
df = df2.loc[1:4, 'b':'d']
result = df.execute().fetch()
expected = raw2.loc[1:4, 'b': 'd']
pd.testing.assert_frame_equal(result, expected)
df = df2.loc[:4, 'b':]
result = df.execute().fetch()
expected = raw2.loc[:4, 'b':]
pd.testing.assert_frame_equal(result, expected)
# slice on axis index whose index_value does not have value
df = df1.loc['a2':'a4', 'b':]
result = df.execute().fetch()
expected = raw1.loc['a2':'a4', 'b':]
pd.testing.assert_frame_equal(result, expected)
df = df2.loc[:, 'b']
result = df.execute().fetch()
expected = raw2.loc[:, 'b']
pd.testing.assert_series_equal(result, expected)
# 'b' is non-unique
df = df3.loc[:, 'b']
result = df.execute().fetch()
expected = raw3.loc[:, 'b']
pd.testing.assert_frame_equal(result, expected)
# 'b' is non-unique, and non-monotonic
df = df4.loc[:, 'b']
result = df.execute().fetch()
expected = raw4.loc[:, 'b']
pd.testing.assert_frame_equal(result, expected)
# label on axis 0
df = df1.loc['a2', :]
result = df.execute().fetch()
expected = raw1.loc['a2', :]
pd.testing.assert_series_equal(result, expected)
# label-based fancy index
df = df2.loc[[3, 0, 1], ['c', 'a', 'd']]
result = df.execute().fetch()
expected = raw2.loc[[3, 0, 1], ['c', 'a', 'd']]
pd.testing.assert_frame_equal(result, expected)
# label-based fancy index, asc sorted
df = df2.loc[[0, 1, 3], ['a', 'c', 'd']]
result = df.execute().fetch()
expected = raw2.loc[[0, 1, 3], ['a', 'c', 'd']]
pd.testing.assert_frame_equal(result, expected)
# label-based fancy index in which non-unique exists
selection = rs.randint(2, size=(5,), dtype=bool)
df = df3.loc[selection, ['b', 'a', 'd']]
result = df.execute().fetch()
expected = raw3.loc[selection, ['b', 'a', 'd']]
pd.testing.assert_frame_equal(result, expected)
df = df3.loc[md.Series(selection), ['b', 'a', 'd']]
result = df.execute().fetch()
expected = raw3.loc[selection, ['b', 'a', 'd']]
pd.testing.assert_frame_equal(result, expected)
# label-based fancy index on index
# whose index_value does not have value
df = df1.loc[['a3', 'a1'], ['b', 'a', 'd']]
result = df.execute(extra_config={'check_nsplits': False}).fetch()
expected = raw1.loc[['a3', 'a1'], ['b', 'a', 'd']]
pd.testing.assert_frame_equal(result, expected)
# get timestamp by str
df = df5.loc['20200101']
result = df.execute(extra_config={'check_series_name': False}).fetch(
extra_config={'check_series_name': False})
expected = raw5.loc['20200101']
pd.testing.assert_series_equal(result, expected)
# get timestamp by str, return scalar
df = df5.loc['2020-1-1', 'c']
result = df.execute().fetch()
expected = raw5.loc['2020-1-1', 'c']
assert result == expected
# test empty df
df = df6.loc[[]]
result = df.execute().fetch()
expected = raw6.loc[[]]
pd.testing.assert_frame_equal(result, expected)
def test_dataframe_getitem(setup):
data = pd.DataFrame(np.random.rand(10, 5), columns=['c1', 'c2', 'c3', 'c4', 'c5'])
df = md.DataFrame(data, chunk_size=2)
data2 = data.copy()
data2.index = pd.date_range('2020-1-1', periods=10)
mdf = md.DataFrame(data2, chunk_size=3)
series1 = df['c2']
pd.testing.assert_series_equal(
series1.execute().fetch(), data['c2'])
series2 = df['c5']
pd.testing.assert_series_equal(
series2.execute().fetch(), data['c5'])
df1 = df[['c1', 'c2', 'c3']]
pd.testing.assert_frame_equal(
df1.execute().fetch(), data[['c1', 'c2', 'c3']])
df2 = df[['c3', 'c2', 'c1']]
pd.testing.assert_frame_equal(
df2.execute().fetch(), data[['c3', 'c2', 'c1']])
df3 = df[['c1']]
pd.testing.assert_frame_equal(
df3.execute().fetch(), data[['c1']])
df4 = df[['c3', 'c1', 'c2', 'c1']]
pd.testing.assert_frame_equal(
df4.execute().fetch(), data[['c3', 'c1', 'c2', 'c1']])
df5 = df[np.array(['c1', 'c2', 'c3'])]
pd.testing.assert_frame_equal(
df5.execute().fetch(), data[['c1', 'c2', 'c3']])
df6 = df[['c3', 'c2', 'c1']]
pd.testing.assert_frame_equal(
df6.execute().fetch(), data[['c3', 'c2', 'c1']])
df7 = df[1:7:2]
pd.testing.assert_frame_equal(
df7.execute().fetch(), data[1:7:2])
series3 = df['c1'][0]
assert series3.execute().fetch() == data['c1'][0]
df8 = mdf[3:7]
pd.testing.assert_frame_equal(
df8.execute().fetch(), data2[3:7])
df9 = mdf['2020-1-2': '2020-1-5']
pd.testing.assert_frame_equal(
df9.execute().fetch(), data2['2020-1-2': '2020-1-5'])
def test_dataframe_getitem_bool(setup):
data = pd.DataFrame(np.random.rand(10, 5), columns=['c1', 'c2', 'c3', 'c4', 'c5'])
df = md.DataFrame(data, chunk_size=2)
mask_data = data.c1 > 0.5
mask = md.Series(mask_data, chunk_size=2)
# getitem by mars series
assert df[mask].execute().fetch().shape == data[mask_data].shape
pd.testing.assert_frame_equal(
df[mask].execute().fetch(), data[mask_data])
# getitem by pandas series
pd.testing.assert_frame_equal(
df[mask_data].execute().fetch(), data[mask_data])
# getitem by mars series with alignment but no shuffle
mask_data = pd.Series([True, True, True, False, False, True, True, False, False, True],
index=range(9, -1, -1))
mask = md.Series(mask_data, chunk_size=2)
pd.testing.assert_frame_equal(
df[mask].execute().fetch(), data[mask_data])
# getitem by mars series with shuffle alignment
mask_data = pd.Series([True, True, True, False, False, True, True, False, False, True],
index=[0, 3, 6, 2, 9, 8, 5, 7, 1, 4])
mask = md.Series(mask_data, chunk_size=2)
pd.testing.assert_frame_equal(
df[mask].execute().fetch().sort_index(), data[mask_data])
# getitem by mars series with shuffle alignment and extra element
mask_data = pd.Series([True, True, True, False, False, True, True, False, False, True, False],
index=[0, 3, 6, 2, 9, 8, 5, 7, 1, 4, 10])
mask = md.Series(mask_data, chunk_size=2)
pd.testing.assert_frame_equal(
df[mask].execute().fetch().sort_index(), data[mask_data])
# getitem by DataFrame with all bool columns
r = df[df > 0.5]
result = r.execute().fetch()
pd.testing.assert_frame_equal(result, data[data > 0.5])
# getitem by tensor mask
r = df[(df['c1'] > 0.5).to_tensor()]
result = r.execute().fetch()
pd.testing.assert_frame_equal(result, data[data['c1'] > 0.5])
def test_dataframe_getitem_using_attr(setup):
data = pd.DataFrame(np.random.rand(10, 5), columns=['c1', 'c2', 'key', 'dtypes', 'size'])
df = md.DataFrame(data, chunk_size=2)
series1 = df.c2
pd.testing.assert_series_equal(
series1.execute().fetch(), data.c2)
# accessing column using attribute shouldn't overwrite existing attributes
assert df.key == getattr(getattr(df, '_data'), '_key')
assert df.size == data.size
pd.testing.assert_series_equal(df.dtypes, data.dtypes)
# accessing non-existing attributes should trigger exception
with pytest.raises(AttributeError):
_ = df.zzz # noqa: F841
def test_series_getitem(setup):
data = pd.Series(np.random.rand(10))
series = md.Series(data)
assert series[1].execute().fetch() == data[1]
data = pd.Series(np.random.rand(10), name='a')
series = md.Series(data, chunk_size=4)
for i in range(10):
series1 = series[i]
assert series1.execute().fetch() == data[i]
series2 = series[[0, 1, 2, 3, 4]]
pd.testing.assert_series_equal(
series2.execute().fetch(), data[[0, 1, 2, 3, 4]])
series3 = series[[4, 3, 2, 1, 0]]
pd.testing.assert_series_equal(
series3.execute().fetch(), data[[4, 3, 2, 1, 0]])
series4 = series[[1, 2, 3, 2, 1, 0]]
pd.testing.assert_series_equal(
series4.execute().fetch(), data[[1, 2, 3, 2, 1, 0]])
#
index = ['i' + str(i) for i in range(20)]
data = pd.Series(np.random.rand(20), index=index, name='a')
series = md.Series(data, chunk_size=3)
for idx in index:
series1 = series[idx]
assert series1.execute().fetch() == data[idx]
selected = ['i1', 'i2', 'i3', 'i4', 'i5']
series2 = series[selected]
pd.testing.assert_series_equal(
series2.execute().fetch(), data[selected])
selected = ['i4', 'i7', 'i0', 'i1', 'i5']
series3 = series[selected]
pd.testing.assert_series_equal(
series3.execute().fetch(), data[selected])
selected = ['i0', 'i1', 'i5', 'i4', 'i0', 'i1']
series4 = series[selected]
pd.testing.assert_series_equal(
series4.execute().fetch(), data[selected])
selected = ['i0']
series5 = series[selected]
pd.testing.assert_series_equal(
series5.execute().fetch(), data[selected])
data = pd.Series(np.random.rand(10,))
series = md.Series(data, chunk_size=3)
selected = series[:2]
pd.testing.assert_series_equal(
selected.execute().fetch(), data[:2])
selected = series[2:8:2]
pd.testing.assert_series_equal(
selected.execute().fetch(), data[2:8:2])
data = pd.Series(np.random.rand(9), index=['c' + str(i) for i in range(9)])
series = md.Series(data, chunk_size=3)
selected = series[:'c2']
pd.testing.assert_series_equal(
selected.execute().fetch(), data[:'c2'])
selected = series['c2':'c9']
pd.testing.assert_series_equal(
selected.execute().fetch(), data['c2':'c9'])
def test_head(setup):
data = pd.DataFrame(np.random.rand(10, 5), columns=['c1', 'c2', 'c3', 'c4', 'c5'])
df = md.DataFrame(data, chunk_size=2)
pd.testing.assert_frame_equal(
df.head().execute().fetch(), data.head())
pd.testing.assert_frame_equal(
df.head(3).execute().fetch(), data.head(3))
pd.testing.assert_frame_equal(
df.head(-3).execute().fetch(), data.head(-3))
pd.testing.assert_frame_equal(
df.head(8).execute().fetch(), data.head(8))
pd.testing.assert_frame_equal(
df.head(-8).execute().fetch(), data.head(-8))
pd.testing.assert_frame_equal(
df.head(13).execute().fetch(), data.head(13))
pd.testing.assert_frame_equal(
df.head(-13).execute().fetch(), data.head(-13))
def test_tail(setup):
data = pd.DataFrame(np.random.rand(10, 5), columns=['c1', 'c2', 'c3', 'c4', 'c5'])
df = md.DataFrame(data, chunk_size=2)
pd.testing.assert_frame_equal(
df.tail().execute().fetch(), data.tail())
pd.testing.assert_frame_equal(
df.tail(3).execute().fetch(), data.tail(3))
pd.testing.assert_frame_equal(
df.tail(-3).execute().fetch(), data.tail(-3))
pd.testing.assert_frame_equal(
df.tail(8).execute().fetch(), data.tail(8))
pd.testing.assert_frame_equal(
df.tail(-8).execute().fetch(), data.tail(-8))
pd.testing.assert_frame_equal(
df.tail(13).execute().fetch(), data.tail(13))
pd.testing.assert_frame_equal(
df.tail(-13).execute().fetch(), data.tail(-13))
def test_at(setup):
data = pd.DataFrame(np.random.rand(10, 5), columns=['c' + str(i) for i in range(5)],
index=['i' + str(i) for i in range(10)])
df = md.DataFrame(data, chunk_size=3)
data2 = data.copy()
data2.index = np.arange(10)
df2 = md.DataFrame(data2, chunk_size=3)
with pytest.raises(ValueError):
_ = df.at[['i3, i4'], 'c1']
result = df.at['i3', 'c1'].execute().fetch()
assert result == data.at['i3', 'c1']
result = df['c1'].at['i2'].execute().fetch()
assert result == data['c1'].at['i2']
result = df2.at[3, 'c2'].execute().fetch()
assert result == data2.at[3, 'c2']
result = df2.loc[3].at['c2'].execute().fetch()
assert result == data2.loc[3].at['c2']
def test_iat(setup):
data = pd.DataFrame(np.random.rand(10, 5), columns=['c' + str(i) for i in range(5)],
index=['i' + str(i) for i in range(10)])
df = md.DataFrame(data, chunk_size=3)
with pytest.raises(ValueError):
_ = df.iat[[1, 2], 3]
result = df.iat[3, 4].execute().fetch()
assert result == data.iat[3, 4]
result = df.iloc[:, 2].iat[3].execute().fetch()
assert result == data.iloc[:, 2].iat[3]
def test_setitem(setup):
data = pd.DataFrame(np.random.rand(10, 5), columns=['c' + str(i) for i in range(5)],
index=['i' + str(i) for i in range(10)])
data2 = np.random.rand(10)
data3 = np.random.rand(10, 2)
df = md.DataFrame(data, chunk_size=3)
df['c3'] = df['c3'] + 1
df['c10'] = 10
df[4] = mt.tensor(data2, chunk_size=4)
df['d1'] = df['c4'].mean()
df['e1'] = data2 * 2
result = df.execute().fetch()
expected = data.copy()
expected['c3'] = expected['c3'] + 1
expected['c10'] = 10
expected[4] = data2
expected['d1'] = data['c4'].mean()
expected['e1'] = data2 * 2
pd.testing.assert_frame_equal(result, expected)
# test set multiple cols with scalar
df = md.DataFrame(data, chunk_size=3)
df[['c0', 'c2']] = 1
df[['c1', 'c10']] = df['c4'].mean()
df[['c11', 'c12']] = mt.tensor(data3, chunk_size=4)
result = df.execute().fetch()
expected = data.copy()
expected[['c0', 'c2']] = 1
expected[['c1', 'c10']] = expected['c4'].mean()
expected[['c11', 'c12']] = data3
pd.testing.assert_frame_equal(result, expected)
# test set multiple rows
df = md.DataFrame(data, chunk_size=3)
df[['c1', 'c4', 'c10']] = df[['c2', 'c3', 'c4']] * 2
result = df.execute().fetch()
expected = data.copy()
expected[['c1', 'c4', 'c10']] = expected[['c2', 'c3', 'c4']] * 2
pd.testing.assert_frame_equal(result, expected)
# test setitem into empty DataFrame
df = md.DataFrame()
df['a'] = md.Series(np.arange(1, 11), chunk_size=3)
pd.testing.assert_index_equal(df.index_value.to_pandas(),
pd.RangeIndex(10))
result = df.execute().fetch()
expected = pd.DataFrame()
expected['a'] = pd.Series(np.arange(1, 11))
pd.testing.assert_frame_equal(result, expected)
df['b'] = md.Series(np.arange(2, 12), index=pd.RangeIndex(1, 11),
chunk_size=3)
result = df.execute().fetch()
expected['b'] = pd.Series(np.arange(2, 12), index=pd.RangeIndex(1, 11))
pd.testing.assert_frame_equal(result, expected)
def test_reset_index_execution(setup):
data = pd.DataFrame([('bird', 389.0),
('bird', 24.0),
('mammal', 80.5),
('mammal', np.nan)],
index=['falcon', 'parrot', 'lion', 'monkey'],
columns=('class', 'max_speed'))
df = md.DataFrame(data)
df2 = df.reset_index()
result = df2.execute().fetch()
expected = data.reset_index()
pd.testing.assert_frame_equal(result, expected)
df = md.DataFrame(data, chunk_size=2)
df2 = df.reset_index()
result = df2.execute().fetch()
expected = data.reset_index()
pd.testing.assert_frame_equal(result, expected)
df = md.DataFrame(data, chunk_size=1)
df2 = df.reset_index(drop=True)
result = df2.execute().fetch()
expected = data.reset_index(drop=True)
pd.testing.assert_frame_equal(result, expected)
index = pd.MultiIndex.from_tuples([('bird', 'falcon'),
('bird', 'parrot'),
('mammal', 'lion'),
('mammal', 'monkey')],
names=['class', 'name'])
data = pd.DataFrame([('bird', 389.0),
('bird', 24.0),
('mammal', 80.5),
('mammal', np.nan)],
index=index,
columns=('type', 'max_speed'))
df = md.DataFrame(data, chunk_size=1)
df2 = df.reset_index(level='class')
result = df2.execute().fetch()
expected = data.reset_index(level='class')
pd.testing.assert_frame_equal(result, expected)
columns = pd.MultiIndex.from_tuples([('speed', 'max'), ('species', 'type')])
data.columns = columns
df = md.DataFrame(data, chunk_size=2)
df2 = df.reset_index(level='class', col_level=1, col_fill='species')
result = df2.execute().fetch()
expected = data.reset_index(level='class', col_level=1, col_fill='species')
pd.testing.assert_frame_equal(result, expected)
df = md.DataFrame(data, chunk_size=3)
df.reset_index(level='class', col_level=1, col_fill='species', inplace=True)
result = df.execute().fetch()
expected = data.reset_index(level='class', col_level=1, col_fill='species')
pd.testing.assert_frame_equal(result, expected)
# Test Series
s = pd.Series([1, 2, 3, 4], name='foo',
index=pd.Index(['a', 'b', 'c', 'd'], name='idx'))
series = md.Series(s)
s2 = series.reset_index(name='bar')
result = s2.execute().fetch()
expected = s.reset_index(name='bar')
pd.testing.assert_frame_equal(result, expected)
series = md.Series(s, chunk_size=2)
s2 = series.reset_index(drop=True)
result = s2.execute().fetch()
expected = s.reset_index(drop=True)
pd.testing.assert_series_equal(result, expected)
# Test Unknown shape
data1 = pd.DataFrame(np.random.rand(10, 3), index=[0, 10, 2, 3, 4, 5, 6, 7, 8, 9])
df1 = md.DataFrame(data1, chunk_size=5)
data2 = pd.DataFrame(np.random.rand(10, 3), index=[11, 1, 2, 5, 7, 6, 8, 9, 10, 3])
df2 = md.DataFrame(data2, chunk_size=6)
df = (df1 + df2).reset_index(incremental_index=True)
result = df.execute().fetch()
pd.testing.assert_index_equal(result.index, pd.RangeIndex(12))
# Inconsistent with Pandas when input dataframe's shape is unknown.
result = result.sort_values(by=result.columns[0])
expected = (data1 + data2).reset_index()
np.testing.assert_array_equal(result.to_numpy(), expected.to_numpy())
data1 = pd.Series(np.random.rand(10,), index=[0, 10, 2, 3, 4, 5, 6, 7, 8, 9])
series1 = md.Series(data1, chunk_size=3)
data2 = pd.Series(np.random.rand(10,), index=[11, 1, 2, 5, 7, 6, 8, 9, 10, 3])
series2 = md.Series(data2, chunk_size=3)
df = (series1 + series2).reset_index(incremental_index=True)
result = df.execute().fetch()
pd.testing.assert_index_equal(result.index, pd.RangeIndex(12))
# Inconsistent with Pandas when input dataframe's shape is unknown.
result = result.sort_values(by=result.columns[0])
expected = (data1 + data2).reset_index()
np.testing.assert_array_equal(result.to_numpy(), expected.to_numpy())
series1 = md.Series(data1, chunk_size=3)
series1.reset_index(inplace=True, drop=True)
result = series1.execute().fetch()
pd.testing.assert_index_equal(result.index, pd.RangeIndex(10))
# case from https://github.com/mars-project/mars/issues/1286
data = pd.DataFrame(np.random.rand(10, 3), columns=list('abc'))
df = md.DataFrame(data, chunk_size=3)
r = df.sort_values('a').reset_index(drop=True, incremental_index=True)
result = r.execute().fetch()
expected = data.sort_values('a').reset_index(drop=True)
pd.testing.assert_frame_equal(result, expected)
def test_rename(setup):
rs = np.random.RandomState(0)
raw = pd.DataFrame(rs.rand(10, 4), columns=['A', 'B', 'C', 'D'])
df = md.DataFrame(raw, chunk_size=3)
with pytest.warns(Warning):
df.rename(str, errors='raise')
with pytest.raises(NotImplementedError):
df.rename({"A": "a", "B": "b"}, axis=1, copy=False)
r = df.rename(str)
pd.testing.assert_frame_equal(r.execute().fetch(),
raw.rename(str))
r = df.rename({"A": "a", "B": "b"}, axis=1)
pd.testing.assert_frame_equal(r.execute().fetch(),
raw.rename({"A": "a", "B": "b"}, axis=1))
df.rename({"A": "a", "B": "b"}, axis=1, inplace=True)
pd.testing.assert_frame_equal(df.execute().fetch(),
raw.rename({"A": "a", "B": "b"}, axis=1))
raw = pd.DataFrame(rs.rand(10, 4),
columns=pd.MultiIndex.from_tuples((('A', 'C'), ('A', 'D'), ('B', 'E'), ('B', 'F'))))
df = md.DataFrame(raw, chunk_size=3)
r = df.rename({"C": "a", "D": "b"}, level=1, axis=1)
pd.testing.assert_frame_equal(r.execute().fetch(),
raw.rename({"C": "a", "D": "b"}, level=1, axis=1))
raw = pd.Series(rs.rand(10), name='series')
series = md.Series(raw, chunk_size=3)
r = series.rename('new_series')
pd.testing.assert_series_equal(r.execute().fetch(),
raw.rename('new_series'))
r = series.rename(lambda x: 2 ** x)
pd.testing.assert_series_equal(r.execute().fetch(),
raw.rename(lambda x: 2 ** x))
with pytest.raises(TypeError):
series.name = {1: 10, 2: 20}
series.name = 'new_series'
pd.testing.assert_series_equal(series.execute().fetch(),
raw.rename('new_series'))
raw = pd.MultiIndex.from_frame(pd.DataFrame(rs.rand(10, 2), columns=['A', 'B']))
idx = md.Index(raw)
r = idx.rename(['C', 'D'])
pd.testing.assert_index_equal(r.execute().fetch(),
raw.rename(['C', 'D']))
r = idx.set_names('C', level=0)
pd.testing.assert_index_equal(r.execute().fetch(),
raw.set_names('C', level=0))
def test_rename_axis(setup):
rs = np.random.RandomState(0)
# test dataframe cases
raw = pd.DataFrame(rs.rand(10, 4), columns=['A', 'B', 'C', 'D'])
df = md.DataFrame(raw, chunk_size=3)
r = df.rename_axis('idx')
pd.testing.assert_frame_equal(r.execute().fetch(),
raw.rename_axis('idx'))
r = df.rename_axis('cols', axis=1)
pd.testing.assert_frame_equal(r.execute().fetch(),
raw.rename_axis('cols', axis=1))
df.rename_axis('c', axis=1, inplace=True)
pd.testing.assert_frame_equal(df.execute().fetch(),
raw.rename_axis('c', axis=1))
df.columns.name = 'df_cols'
pd.testing.assert_frame_equal(df.execute().fetch(),
raw.rename_axis('df_cols', axis=1))
# test dataframe cases with MultiIndex
raw = pd.DataFrame(
rs.rand(10, 4), columns= | pd.MultiIndex.from_tuples([('A', 1), ('B', 2), ('C', 3), ('D', 4)]) | pandas.MultiIndex.from_tuples |
# Required imports
import os
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib as mpl
import pylab
import scipy
import random
import datetime
import re
import time
from math import sqrt
import matplotlib.dates as mdates
from matplotlib.dates import date2num, num2date
get_ipython().run_line_magic('matplotlib', 'inline')
from sklearn import preprocessing
pd.set_option('display.max_columns', None) # to view all columns
from scipy.optimize import curve_fit
from supersmoother import SuperSmoother
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import make_pipeline
from sklearn.gaussian_process.kernels import Matern, WhiteKernel
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression, Ridge, Lasso, RidgeCV, LassoCV
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import mean_squared_error
import scipy.stats as stats
import warnings
warnings.filterwarnings("ignore")
from pyproj import Proj, Transformer
from ipyleaflet import (Map, basemaps, WidgetControl, GeoJSON,
LayersControl, Icon, Marker,FullScreenControl,
CircleMarker, Popup, AwesomeIcon)
from ipywidgets import HTML
plt.rcParams["font.family"] = "Times New Roman"
class functions:
def __init__(self, data):
self.setData(data)
self.__jointData = [None, 0]
# DATA VALIDATION
def __isValid_Data(self, data):
if(str(type(data)).lower().find('dataframe') == -1):
return (False, 'Make sure the data is a pandas DataFrame.\n')
if(not self.__hasColumns_Data(data)):
return (False, 'Make sure that ALL of the columns specified in the REQUIREMENTS are present.\n')
else:
return (True, None)
def __isValid_Construction_Data(self, data):
if(str(type(data)).lower().find('dataframe') == -1):
return (False, 'Make sure the data is a pandas DataFrame.\n')
if(not self.__hasColumns_Construction_Data(data)):
return (False, 'Make sure that ALL of the columns specified in the REQUIREMENTS are present.\n')
else:
return (True, None)
# COLUMN VALIDATION
def __hasColumns_Data(self, data):
find = ['COLLECTION_DATE','STATION_ID','ANALYTE_NAME','RESULT','RESULT_UNITS']
cols = list(data.columns)
cols = [x.upper() for x in cols]
hasCols = all(item in cols for item in find)
return hasCols
def __hasColumns_Construction_Data(self, data):
find = ['STATION_ID', 'AQUIFER', 'WELL_USE', 'LATITUDE', 'LONGITUDE', 'GROUND_ELEVATION', 'TOTAL_DEPTH']
cols = list(data.columns)
cols = [x.upper() for x in cols]
hasCols = all(item in cols for item in find)
return hasCols
# SETTING DATA
def setData(self, data, verbose=True):
validation = self.__isValid_Data(data)
if(validation[0]):
# Make all columns all caps
cols_upper = [x.upper() for x in list(data.columns)]
data.columns = cols_upper
self.data = data
if(verbose):
print('Successfully imported the data!\n')
self.__set_units()
else:
print('ERROR: {}'.format(validation[1]))
return self.REQUIREMENTS_DATA()
def setConstructionData(self, construction_data, verbose=True):
validation = self.__isValid_Construction_Data(construction_data)
if(validation[0]):
# Make all columns all caps
cols_upper = [x.upper() for x in list(construction_data.columns)]
construction_data.columns = cols_upper
self.construction_data = construction_data.set_index(['STATION_ID'])
if(verbose):
print('Successfully imported the construction data!\n')
else:
print('ERROR: {}'.format(validation[1]))
return self.REQUIREMENTS_CONSTRUCTION_DATA()
def jointData_is_set(self, lag):
if(str(type(self.__jointData[0])).lower().find('dataframe') == -1):
return False
else:
if(self.__jointData[1]==lag):
return True
else:
return False
def set_jointData(self, data, lag):
self.__jointData[0] = data
self.__jointData[1] = lag
# GETTING DATA
def getData(self):
return self.data
def get_Construction_Data(self):
return self.construction_data
# MESSAGES FOR INVALID DATA
def REQUIREMENTS_DATA(self):
print('PYLENM DATA REQUIREMENTS:\nThe imported data needs to meet ALL of the following conditions to have a successful import:')
print(' 1) Data should be a pandas dataframe.')
print(" 2) Data must have these column names: \n ['COLLECTION_DATE','STATION_ID','ANALYTE_NAME','RESULT','RESULT_UNITS']")
def REQUIREMENTS_CONSTRUCTION_DATA(self):
print('PYLENM CONSTRUCTION REQUIREMENTS:\nThe imported construction data needs to meet ALL of the following conditions to have a successful import:')
print(' 1) Data should be a pandas dataframe.')
print(" 2) Data must have these column names: \n ['station_id', 'aquifer', 'well_use', 'latitude', 'longitude', 'ground_elevation', 'total_depth']")
# Helper function for plot_correlation
# Sorts analytes in a specific order: 'TRITIUM', 'URANIUM-238','IODINE-129','SPECIFIC CONDUCTANCE', 'PH', 'DEPTH_TO_WATER'
def __custom_analyte_sort(self, analytes):
my_order = 'TURISPDABCEFGHJKLMNOQVWXYZ-_abcdefghijklmnopqrstuvwxyz135790 2468'
return sorted(analytes, key=lambda word: [my_order.index(c) for c in word])
def __plotUpperHalf(self, *args, **kwargs):
corr_r = args[0].corr(args[1], 'pearson')
corr_text = f"{corr_r:2.2f}"
ax = plt.gca()
ax.set_axis_off()
marker_size = abs(corr_r) * 10000
ax.scatter([.5], [.5], marker_size, [corr_r], alpha=0.6, cmap="coolwarm",
vmin=-1, vmax=1, transform=ax.transAxes)
font_size = abs(corr_r) * 40 + 5
ax.annotate(corr_text, [.5, .48,], xycoords="axes fraction", # [.5, .48,]
ha='center', va='center', fontsize=font_size, fontweight='bold')
# Description:
# Removes all columns except 'COLLECTION_DATE', 'STATION_ID', 'ANALYTE_NAME', 'RESULT', and 'RESULT_UNITS'.
# If the user specifies additional columns in addition to the ones listed above, those columns will be kept.
# The function returns a dataframe and has an optional parameter to be able to save the dataframe to a csv file.
# Parameters:
# data (dataframe): data to simplify
# inplace (bool): save data to current working dataset
# columns (list of strings): list of any additional columns on top of ['COLLECTION_DATE', 'STATION_ID', 'ANALYTE_NAME', 'RESULT', and 'RESULT_UNITS'] to be kept in the dataframe.
# save_csv (bool): flag to determine whether or not to save the dataframe to a csv file.
# file_name (string): name of the csv file you want to save
# save_dir (string): name of the directory you want to save the csv file to
def simplify_data(self, data=None, inplace=False, columns=None, save_csv=False, file_name= 'data_simplified', save_dir='data/'):
if(str(type(data)).lower().find('dataframe') == -1):
data = self.data
else:
data = data
if(columns==None):
sel_cols = ['COLLECTION_DATE','STATION_ID','ANALYTE_NAME','RESULT','RESULT_UNITS']
else:
hasColumns = all(item in list(data.columns) for item in columns)
if(hasColumns):
sel_cols = ['COLLECTION_DATE','STATION_ID','ANALYTE_NAME','RESULT','RESULT_UNITS'] + columns
else:
print('ERROR: specified column(s) do not exist in the data')
return None
data = data[sel_cols]
data.COLLECTION_DATE = pd.to_datetime(data.COLLECTION_DATE)
data = data.sort_values(by="COLLECTION_DATE")
dup = data[data.duplicated(['COLLECTION_DATE', 'STATION_ID','ANALYTE_NAME', 'RESULT'])]
data = data.drop(dup.index)
data = data.reset_index().drop('index', axis=1)
if(save_csv):
if not os.path.exists(save_dir):
os.makedirs(save_dir)
data.to_csv(save_dir + file_name + '.csv')
print('Successfully saved "' + file_name +'.csv" in ' + save_dir)
if(inplace):
self.setData(data, verbose=False)
return data
# Description:
# Returns the Maximum Concentration Limit value for the specified analyte.
# Example: 'TRITIUM' returns 1.3
# Parameters:
# analyte_name (string): name of the analyte to be processed
def get_MCL(self, analyte_name):
mcl_dictionary = {'TRITIUM': 1.3, 'URANIUM-238': 1.31, 'NITRATE-NITRITE AS NITROGEN': 1,
'TECHNETIUM-99': 2.95, 'IODINE-129': 0, 'STRONTIUM-90': 0.9
}
return mcl_dictionary[analyte_name]
def __set_units(self):
analytes = list(np.unique(self.data[['ANALYTE_NAME']]))
mask1 = ~self.data[['ANALYTE_NAME','RESULT_UNITS']].duplicated()
res = self.data[['ANALYTE_NAME','RESULT_UNITS']][mask1]
mask2 = ~self.data[['ANALYTE_NAME']].duplicated()
res = res[mask2]
unit_dictionary = pd.Series(res.RESULT_UNITS.values,index=res.ANALYTE_NAME).to_dict()
self.unit_dictionary = unit_dictionary
# Description:
# Returns the unit of the analyte you specify.
# Example: 'DEPTH_TO_WATER' returns 'ft'
# Parameters:
# analyte_name (string): name of the analyte to be processed
def get_unit(self, analyte_name):
return self.unit_dictionary[analyte_name]
# Description:
# Filters construction data based on one column. You only specify ONE column to filter by, but can selected MANY values for the entry.
# Parameters:
# data (dataframe): dataframe to filter
# col (string): column to filter. Example: col='STATION_ID'
# equals (list of strings): values to filter col by. Examples: equals=['FAI001A', 'FAI001B']
def filter_by_column(self, data=None, col=None, equals=[]):
if(data is None):
return 'ERROR: DataFrame was not provided to this function.'
else:
if(str(type(data)).lower().find('dataframe') == -1):
return 'ERROR: Data provided is not a pandas DataFrame.'
else:
data = data
# DATA VALIDATION
if(col==None):
return 'ERROR: Specify a column name to filter by.'
data_cols = list(data.columns)
if((col in data_cols)==False): # Make sure column name exists
return 'Error: Column name "{}" does not exist'.format(col)
if(equals==[]):
return 'ERROR: Specify a value that "{}" should equal to'.format(col)
data_val = list(data[col])
for value in equals:
if((value in data_val)==False):
return 'ERROR: No value equal to "{}" in "{}".'.format(value, col)
# QUERY
final_data = pd.DataFrame()
for value in equals:
current_data = data[data[col]==value]
final_data = pd.concat([final_data, current_data])
return final_data
# Description:
# Returns a list of the well names filtered by the unit(s) specified.
# Parameters:
# units (list of strings): Letter of the well to be filtered (e.g. [‘A’] or [‘A’, ‘D’])
def filter_wells(self, units):
data = self.data
if(units==None):
units= ['A', 'B', 'C', 'D']
def getUnits():
wells = list(np.unique(data.STATION_ID))
wells = pd.DataFrame(wells, columns=['STATION_ID'])
for index, row in wells.iterrows():
mo = re.match('.+([0-9])[^0-9]*$', row.STATION_ID)
last_index = mo.start(1)
wells.at[index, 'unit'] = row.STATION_ID[last_index+1:]
u = wells.unit.iloc[index]
if(len(u)==0): # if has no letter, use D
wells.at[index, 'unit'] = 'D'
if(len(u)>1): # if has more than 1 letter, remove the extra letter
if(u.find('R')>0):
wells.at[index, 'unit'] = u[:-1]
else:
wells.at[index, 'unit'] = u[1:]
u = wells.unit.iloc[index]
if(u=='A' or u=='B' or u=='C' or u=='D'):
pass
else:
wells.at[index, 'unit'] = 'D'
return wells
df = getUnits()
res = df.loc[df.unit.isin(units)]
return list(res.STATION_ID)
# Description:
# Removes outliers from a dataframe based on the z_scores and returns the new dataframe.
# Parameters:
# data (dataframe): data for the outliers to removed from
# z_threshold (float): z_score threshold to eliminate.
def remove_outliers(self, data, z_threshold=4):
z = np.abs(stats.zscore(data))
row_loc = np.unique(np.where(z > z_threshold)[0])
data = data.drop(data.index[row_loc])
return data
# Description:
# Returns a csv file saved to save_dir with details pertaining to the specified analyte.
# Details include the well names, the date ranges and the number of unique samples.
# Parameters:
# analyte_name (string): name of the analyte to be processed
# save_dir (string): name of the directory you want to save the csv file to
def get_analyte_details(self, analyte_name, filter=False, col=None, equals=[], save_to_file = False, save_dir='analyte_details'):
data = self.data
data = data[data.ANALYTE_NAME == analyte_name].reset_index().drop('index', axis=1)
data = data[~data.RESULT.isna()]
data = data.drop(['ANALYTE_NAME', 'RESULT', 'RESULT_UNITS'], axis=1)
data.COLLECTION_DATE = pd.to_datetime(data.COLLECTION_DATE)
if(filter):
filter_res = self.filter_by_column(data=self.construction_data, col=col, equals=equals)
if('ERROR:' in str(filter_res)):
return filter_res
query_wells = list(data.STATION_ID.unique())
filter_wells = list(filter_res.index.unique())
intersect_wells = list(set(query_wells) & set(filter_wells))
if(len(intersect_wells)<=0):
return 'ERROR: No results for this query with the specifed filter parameters.'
data = data[data['STATION_ID'].isin(intersect_wells)]
info = []
wells = np.unique(data.STATION_ID.values)
for well in wells:
current = data[data.STATION_ID == well]
startDate = current.COLLECTION_DATE.min().date()
endDate = current.COLLECTION_DATE.max().date()
numSamples = current.duplicated().value_counts()[0]
info.append({'Well Name': well, 'Start Date': startDate, 'End Date': endDate,
'Date Range (days)': endDate-startDate ,
'Unique samples': numSamples})
details = pd.DataFrame(info)
details.index = details['Well Name']
details = details.drop('Well Name', axis=1)
details = details.sort_values(by=['Start Date', 'End Date'])
details['Date Range (days)'] = (details['Date Range (days)']/ np.timedelta64(1, 'D')).astype(int)
if(save_to_file):
if not os.path.exists(save_dir):
os.makedirs(save_dir)
details.to_csv(save_dir + '/' + analyte_name + '_details.csv')
return details
# Description:
# Returns a dataframe with a summary of the data for certain analytes.
# Summary includes the date ranges and the number of unique samples and other statistics for the analyte results.
# Parameters:
# analytes (list of strings): list of analyte names to be processed. If left empty, a list of all the analytes in the data will be used.
# sort_by (string): {‘date’, ‘samples’, ‘wells’} sorts the data by either the dates by entering: ‘date’, the samples by entering: ‘samples’, or by unique well locations by entering ‘wells’.
# ascending (bool): flag to sort in ascending order.
def get_data_summary(self, analytes=None, sort_by='date', ascending=False, filter=False, col=None, equals=[]):
data = self.data
if(analytes == None):
analytes = data.ANALYTE_NAME.unique()
data = data.loc[data.ANALYTE_NAME.isin(analytes)].drop(['RESULT_UNITS'], axis=1)
data = data[~data.duplicated()] # remove duplicates
data.COLLECTION_DATE = pd.to_datetime(data.COLLECTION_DATE)
data = data[~data.RESULT.isna()]
if(filter):
filter_res = self.filter_by_column(data=self.construction_data, col=col, equals=equals)
if('ERROR:' in str(filter_res)):
return filter_res
query_wells = list(data.STATION_ID.unique())
filter_wells = list(filter_res.index.unique())
intersect_wells = list(set(query_wells) & set(filter_wells))
if(len(intersect_wells)<=0):
return 'ERROR: No results for this query with the specifed filter parameters.'
data = data[data['STATION_ID'].isin(intersect_wells)]
info = []
for analyte_name in analytes:
query = data[data.ANALYTE_NAME == analyte_name]
startDate = min(query.COLLECTION_DATE)
endDate = max(query.COLLECTION_DATE)
numSamples = query.shape[0]
wellCount = len(query.STATION_ID.unique())
stats = query.RESULT.describe().drop('count', axis=0)
stats = pd.DataFrame(stats).T
stats_col = [x for x in stats.columns]
result = {'Analyte Name': analyte_name, 'Start Date': startDate, 'End Date': endDate,
'Date Range (days)':endDate-startDate, '# unique wells': wellCount,'# samples': numSamples,
'Unit': self.get_unit(analyte_name) }
for num in range(len(stats_col)):
result[stats_col[num]] = stats.iloc[0][num]
info.append(result)
details = pd.DataFrame(info)
details.index = details['Analyte Name']
details = details.drop('Analyte Name', axis=1)
if(sort_by.lower() == 'date'):
details = details.sort_values(by=['Start Date', 'End Date', 'Date Range (days)'], ascending=ascending)
elif(sort_by.lower() == 'samples'):
details = details.sort_values(by=['# samples'], ascending=ascending)
elif(sort_by.lower() == 'wells'):
details = details.sort_values(by=['# unique wells'], ascending=ascending)
return details
# Description:
# Displays the analyte names available at given well locations.
# Parameters:
# well_name (string): name of the well. If left empty, all wells are returned.
# filter (bool): flag to indicate filtering
# col (string): column to filter results
# equals (list of strings): value to match column name. Multiple values are accepted.
def get_well_analytes(self, well_name=None, filter=False, col=None, equals=[]):
data = self.data
bb = "\033[1m"
be = "\033[0m"
if(filter):
filter_res = self.filter_by_column(data=self.construction_data, col=col, equals=equals)
if('ERROR:' in str(filter_res)):
return filter_res
query_wells = list(data.STATION_ID.unique())
filter_wells = list(filter_res.index.unique())
intersect_wells = list(set(query_wells) & set(filter_wells))
if(len(intersect_wells)<=0):
return 'ERROR: No results for this query with the specifed filter parameters.'
data = data[data['STATION_ID'].isin(intersect_wells)]
if(well_name==None):
wells = list(data.STATION_ID.unique())
else:
wells = [well_name]
for well in wells:
print("{}{}{}".format(bb,str(well), be))
analytes = sorted(list(data[data.STATION_ID==well].ANALYTE_NAME.unique()))
print(str(analytes) +'\n')
# Description:
# Filters data by passing the data and specifying the well_name and analyte_name
# Parameters:
# well_name (string): name of the well to be processed
# analyte_name (string): name of the analyte to be processed
def query_data(self, well_name, analyte_name):
data = self.data
query = data[data.STATION_ID == well_name]
query = query[query.ANALYTE_NAME == analyte_name]
if(query.shape[0] == 0):
return 0
else:
return query
# Description:
# Plot concentrations over time of a specified well and analyte with a smoothed curve on interpolated data points.
# Parameters:
# well_name (string): name of the well to be processed
# analyte_name (string): name of the analyte to be processed
# log_transform (bool): choose whether or not the data should be transformed to log base 10 values
# alpha (int): value between 0 and 10 for line smoothing
# year_interval (int): plot by how many years to appear in the axis e.g.(1 = every year, 5 = every 5 years, ...)
# plot_inline (bool): choose whether or not to show plot inline
# save_dir (string): name of the directory you want to save the plot to
def plot_data(self, well_name, analyte_name, log_transform=True, alpha=0,
plot_inline=True, year_interval=2, x_label='Years', y_label='', save_dir='plot_data', filter=False, col=None, equals=[]):
# Gets appropriate data (well_name and analyte_name)
query = self.query_data(well_name, analyte_name)
query = self.simplify_data(data=query)
if(type(query)==int and query == 0):
return 'No results found for {} and {}'.format(well_name, analyte_name)
else:
if(filter):
filter_res = self.filter_by_column(data=self.construction_data, col=col, equals=equals)
if('ERROR:' in str(filter_res)):
return filter_res
query_wells = list(query.STATION_ID.unique())
filter_wells = list(filter_res.index.unique())
intersect_wells = list(set(query_wells) & set(filter_wells))
if(len(intersect_wells)<=0):
return 'ERROR: No results for this query with the specifed filter parameters.'
query = query[query['STATION_ID'].isin(intersect_wells)]
x_data = query.COLLECTION_DATE
x_data = | pd.to_datetime(x_data) | pandas.to_datetime |
import os
from urllib import urlretrieve
import pandas as pd
FREMONT_URL = 'https://data.seattle.gov/api/views/65db-xm6k/rows.csv?accessType=DOWNLOAD'
def get_fremont_data(filename='fremont.csv', url=FREMONT_URL, force_download=False):
"""Download and cache the fremont data
Parameters
----------
filename: string (optional)
location to save the data
url: string (optional)
force_download: bool (optional)
if True, force redownload of data
Returns
-------
data: pandas.DataFrame
"""
if force_download or not os.path.exists(filename):
urlretrieve(url, filename)
df = | pd.read_csv('fremont.csv', index_col='Date') | pandas.read_csv |
"""
Implementation of randomized hill climbing, simulated annealing, and genetic algorithm to
find optimal weights to a neural network that is classifying abalone as having either fewer
or more than 15 rings.
Based on AbaloneTest.java by <NAME>
"""
import os
import csv
import time
import sys
sys.path.append('/Users/jennyhung/MathfreakData/School/OMSCS_ML/Assign2/abagail_py/ABAGAIL/ABAGAIL.jar')
print(sys.path)
import jpype as jp
import get_all_data
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
#jp.startJVM(jp.getDefaultJVMPath(), "-ea")
jp.startJVM(jp.getDefaultJVMPath(), '-ea', '-Djava.class.path=/Users/jennyhung/MathfreakData/School/OMSCS_ML/Assign2/abagail_py/ABAGAIL/ABAGAIL.jar')
jp.java.lang.System.out.println("hello world")
jp.java.func.nn.backprop.BackPropagationNetworkFactory
jp.java.func.nn.backprop.RPROPUpdateRule
jp.java.func.nn.backprop.BatchBackPropagationTrainer
jp.java.shared.SumOfSquaresError
jp.java.shared.DataSet
jp.java.shared.Instance
jp.java.opt.SimulatedAnnealing
jp.java.opt.example.NeuralNetworkOptimizationProblem
jp.java.opt.RandomizedHillClimbing
jp.java.ga.StandardGeneticAlgorithm
jp.java.func.nn.activation.RELU
BackPropagationNetworkFactory = jp.JPackage('func').nn.backprop.BackPropagationNetworkFactory
DataSet = jp.JPackage('shared').DataSet
SumOfSquaresError = jp.JPackage('shared').SumOfSquaresError
NeuralNetworkOptimizationProblem = jp.JPackage('opt').example.NeuralNetworkOptimizationProblem
RandomizedHillClimbing = jp.JPackage('opt').RandomizedHillClimbing
SimulatedAnnealing = jp.JPackage('opt').SimulatedAnnealing
StandardGeneticAlgorithm = jp.JPackage('opt').ga.StandardGeneticAlgorithm
Instance = jp.JPackage('shared').Instance
RELU = jp.JPackage('func').nn.activation.RELU
INPUT_LAYER = 109
HIDDEN_LAYER = 5
OUTPUT_LAYER = 1
TRAINING_ITERATIONS = 1000
def get_all_data():
#dir = 'D:\\Backups\\StemData\\'
files = ['sample_orig_2016.txt', 'sample_orig_2015.txt', 'sample_orig_2014.txt', 'sample_orig_2013.txt',
'sample_orig_2012.txt', 'sample_orig_2011.txt',
'sample_orig_2010.txt', 'sample_orig_2009.txt', 'sample_orig_2008.txt', 'sample_orig_2007.txt']
files1 = ['sample_svcg_2016.txt', 'sample_svcg_2015.txt', 'sample_svcg_2014.txt', 'sample_svcg_2013.txt',
'sample_svcg_2012.txt', 'sample_svcg_2011.txt',
'sample_svcg_2010.txt', 'sample_svcg_2009.txt', 'sample_svcg_2008.txt', 'sample_svcg_2008.txt']
merged_data = pd.DataFrame()
for i in [0]:
print(files[i])
raw = pd.read_csv(files[i], sep='|', header=None, low_memory=False)
raw.columns = ['credit_score', 'first_pmt_date', 'first_time', 'mat_date', 'msa', 'mi_perc', 'units',
'occ_status', 'ocltv', 'odti', 'oupb', 'oltv', 'oint_rate', 'channel', 'ppm', 'fixed_rate',
'state', 'prop_type', 'zip', 'loan_num', 'loan_purpose', 'oterm', 'num_borrowers', 'seller_name',
'servicer_name', 'exceed_conform']
raw1 = pd.read_csv(files1[i], sep='|', header=None, low_memory=False)
raw1.columns = ['loan_num', 'yearmon', 'curr_upb', 'curr_delinq', 'loan_age', 'remain_months', 'repurchased',
'modified', 'zero_bal', 'zero_date', 'curr_rate', 'curr_def_upb', 'ddlpi', 'mi_rec',
'net_proceeds',
'non_mi_rec', 'exp', 'legal_costs', 'maint_exp', 'tax_insur', 'misc_exp', 'loss', 'mod_exp']
data = | pd.merge(raw, raw1, on='loan_num', how='inner') | pandas.merge |
import numpy as np
import pandas as pd
from pathlib import Path
import fileinput
import shutil
import os
def get_discharge(gis_dir, start, end, coords, station_id=None):
'''Downloads climate and observation data from Daymet and USGS, respectively.
Args:
gis_dir (str): Path to the location of NHDPlusV21 root directory; GageLoc and GageInfo
are required.
input_dir (str): Path to the location of climate data. The naming convention is
input_dir/{watershed name}_climate.h5
start (datetime): The starting date of the time period.
end (datetime): The end of the time period.
coords (float, float): A tuple including longitude and latitude of the observation point.
swmm_info (str, int): A tuple including path to .inp file and the coordinate system
(projection) of the swmm project (https://epsg.io is used).
new_data (bool): True is data should be downloaded and False if the data exists localy.
watershed (str): An optional argument for the name of the watershed for saving climate data.
Note: either coords should be given or swmm_info.
Return:
climate (DataFrame): A Pandas dataframe including the following:
yday, dayl, prcp, srad, swe, tmax, tmin, vp, pet, qobs, tmean
'''
from shapely.geometry import Point
from shapely.ops import nearest_points
import geopandas as gpd
from metpy.units import units
if station_id is None:
# Get x, y coords from swmm if given and transform to lat, lon
lon, lat = coords
loc_path = Path(gis_dir, 'GageLoc.shp')
if not loc_path.exists():
raise FileNotFoundError('GageLoc.shp cannot be found in ' +
str(gis_dir))
else:
gloc = gpd.read_file(loc_path)
# Get station ID based on lat/lon
point = Point(lon, lat)
pts = gloc.geometry.unary_union
station = gloc[gloc.geometry.geom_equals(nearest_points(point, pts)[1])]
station_id = station.SOURCE_FEA.values[0]
start, end = | pd.to_datetime(start) | pandas.to_datetime |
# spikein_utils.py
# Single Cell Sequencing Quality Assessment: scqua
#
# Copyright 2018 <NAME> <<EMAIL>>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
import pandas as pd
from glob import iglob
import click
from sklearn.linear_model import LogisticRegression
import numpy as np
import scipy
import seaborn as sns
import statsmodels.api as sm
import statsmodels.formula.api as smf
import matplotlib.pyplot as plt
import matplotlib
def get_ERCC():
ercc = pd.read_table('https://raw.githubusercontent.com/Teichlab/readquant/master/readquant/ERCC.tsv', index_col=1)
ercc = np.log(ercc['concentration in Mix 1 (attomoles/ul)'])
return(ercc)
def get_SIRV():
sirv = pd.read_csv('https://raw.githubusercontent.com/chichaumiau/scRNA_metadata/master/SIRV_E2/SIRV_concentration.csv', index_col=1)
sirv = np.log(sirv['E2 molarity [fmoles/µl]']*1000)
return(sirv)
def get_detection_limit(spike, quant, det_threshold=0.1):
X = spike[:, None]
y = quant[spike.index] >= det_threshold
if y.sum() < 8:
return np.inf
lr = LogisticRegression(solver='liblinear', fit_intercept=True)
lr.fit(X, y)
midpoint = -lr.intercept_ / lr.coef_[0]
return np.exp(midpoint[0])
def get_accuracy(ercc, quant, det_threshold=0.1):
y = np.log(quant[ercc.index]) \
.replace([np.inf, -np.inf], np.nan) \
.dropna()
if (y >= np.log(det_threshold)).sum() < 8:
return -np.inf
correlation = y.corr(ercc, method='pearson')
return correlation
def get_phn(cts_file,tpm_file,phn_file, ercc, sirv, spike):
cts = pd.read_csv(cts_file, index_col=0)
tpm = pd.read_csv(tpm_file, index_col=0)
phn = pd.read_csv(phn_file, index_col=0)
df = get_result(tpm, ercc, sirv, spike)
phn = pd.concat([phn,df,cts.loc[cts.index.str.startswith("ENS")].T], axis=1)
phn["Total_counts"] = cts.loc[cts.index.str.startswith("ENS")].sum()
return(phn)
def get_result(tpm, ercc=None, sirv=None, spike=None):
df = pd.DataFrame()
for col in tpm.columns:
quant = tpm[col]
qc_data = | pd.Series() | pandas.Series |
from os import listdir
from os.path import isfile, join
import Orange
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from parameters import order, alphas, regression_measures, datasets, rank_dir, output_dir, graphics_dir, result_dir
from regression_algorithms import regression_list
results_dir = './../results/'
class Performance:
def __init__(self):
pass
def average_results(self, rfile, release):
'''
Calculates average results
:param rfile: filename with results
:param kind: biclass or multiclass
:return: avarege_results in another file
'''
df = pd.read_csv(rfile)
t = pd.Series(data=np.arange(0, df.shape[0], 1))
dfr = pd.DataFrame(columns=['MODE', 'DATASET', 'PREPROC', 'ALGORITHM', 'ORDER',
'ALPHA', 'R2score', 'MAE', 'MSE', 'MAX'],
index=np.arange(0, int(t.shape[0] / 5)))
df_temp = df.groupby(by=['MODE', 'DATASET', 'PREPROC', 'ALGORITHM'])
idx = dfr.index.values
i = idx[0]
for name, group in df_temp:
group = group.reset_index()
dfr.at[i, 'MODE'] = group.loc[0, 'MODE']
dfr.at[i, 'DATASET'] = group.loc[0, 'DATASET']
dfr.at[i, 'PREPROC'] = group.loc[0, 'PREPROC']
dfr.at[i, 'ALGORITHM'] = group.loc[0, 'ALGORITHM']
dfr.at[i, 'ORDER'] = group.loc[0, 'ORDER']
dfr.at[i, 'ALPHA'] = group.loc[0, 'ALPHA']
dfr.at[i, 'R2score'] = group['R2score'].mean()
dfr.at[i, 'MAE'] = group['MAE'].mean()
dfr.at[i, 'MSE'] = group['MSE'].mean()
dfr.at[i, 'MAX'] = group['MAX'].mean()
i = i + 1
print('Total lines in a file: ', i)
dfr.to_csv(results_dir + 'regression_average_results_' + str(release) + '.csv', index=False)
def run_rank_choose_parameters(self, filename, release):
df_best_dto = pd.read_csv(filename)
df_B1 = df_best_dto[df_best_dto['PREPROC'] == '_Borderline1'].copy()
df_B2 = df_best_dto[df_best_dto['PREPROC'] == '_Borderline2'].copy()
df_GEO = df_best_dto[df_best_dto['PREPROC'] == '_Geometric_SMOTE'].copy()
df_SMOTE = df_best_dto[df_best_dto['PREPROC'] == '_SMOTE'].copy()
df_SMOTEsvm = df_best_dto[df_best_dto['PREPROC'] == '_smoteSVM'].copy()
df_original = df_best_dto[df_best_dto['PREPROC'] == '_train'].copy()
for o in order:
for a in alphas:
GEOMETRY = '_dto_smoter_' + o + '_' + str(a)
df_dto = df_best_dto[df_best_dto['PREPROC'] == GEOMETRY].copy()
df = pd.concat([df_B1, df_B2, df_GEO, df_SMOTE, df_SMOTEsvm, df_original, df_dto])
self.rank_by_algorithm(df, o, str(a), release)
self.rank_dto_by(o + '_' + str(a), release)
def rank_by_algorithm(self, df, order, alpha, release, smote=False):
'''
Calcula rank
:param df:
:param tipo:
:param wd:
:param GEOMETRY:
:return:
'''
df_table = pd.DataFrame(
columns=['DATASET', 'ALGORITHM', 'ORIGINAL', 'RANK_ORIGINAL', 'SMOTE', 'RANK_SMOTE', 'SMOTE_SVM',
'RANK_SMOTE_SVM', 'BORDERLINE1', 'RANK_BORDERLINE1', 'BORDERLINE2', 'RANK_BORDERLINE2',
'GEOMETRIC_SMOTE', 'RANK_GEOMETRIC_SMOTE', 'DTO', 'RANK_DTO', 'GEOMETRY',
'ALPHA', 'unit'])
df_temp = df.groupby(by=['ALGORITHM'])
for name, group in df_temp:
group = group.reset_index()
group.drop('index', axis=1, inplace=True)
if smote == False:
df.to_csv(rank_dir + release + '_' + order + '_' + str(alpha) + '.csv', index=False)
else:
df.to_csv(rank_dir + release + '_smote_' + order + '_' + str(alpha) + '.csv', index=False)
j = 0
measures = regression_measures
for d in datasets:
for m in measures:
aux = group[group['DATASET'] == d]
aux = aux.reset_index()
df_table.at[j, 'DATASET'] = d
df_table.at[j, 'ALGORITHM'] = name
indice = aux.PREPROC[aux.PREPROC == '_train'].index.tolist()[0]
df_table.at[j, 'ORIGINAL'] = aux.at[indice, m]
indice = aux.PREPROC[aux.PREPROC == '_SMOTE'].index.tolist()[0]
df_table.at[j, 'SMOTE'] = aux.at[indice, m]
indice = aux.PREPROC[aux.PREPROC == '_smoteSVM'].index.tolist()[0]
df_table.at[j, 'SMOTE_SVM'] = aux.at[indice, m]
indice = aux.PREPROC[aux.PREPROC == '_Borderline1'].index.tolist()[0]
df_table.at[j, 'BORDERLINE1'] = aux.at[indice, m]
indice = aux.PREPROC[aux.PREPROC == '_Borderline2'].index.tolist()[0]
df_table.at[j, 'BORDERLINE2'] = aux.at[indice, m]
indice = aux.PREPROC[aux.PREPROC == '_Geometric_SMOTE'].index.tolist()[0]
df_table.at[j, 'GEOMETRIC_SMOTE'] = aux.at[indice, m]
indice = aux.PREPROC[aux.ORDER == order].index.tolist()[0]
df_table.at[j, 'DTO'] = aux.at[indice, m]
df_table.at[j, 'GEOMETRY'] = order
df_table.at[j, 'ALPHA'] = alpha
df_table.at[j, 'unit'] = m
j += 1
df_r2 = df_table[df_table['unit'] == 'R2score']
df_mae = df_table[df_table['unit'] == 'MAE']
df_mse = df_table[df_table['unit'] == 'MSE']
df_max = df_table[df_table['unit'] == 'MAX']
r2 = df_r2[
['ORIGINAL', 'SMOTE', 'SMOTE_SVM', 'BORDERLINE1', 'BORDERLINE2', 'GEOMETRIC_SMOTE', 'DTO']]
mae = df_mae[
['ORIGINAL', 'SMOTE', 'SMOTE_SVM', 'BORDERLINE1', 'BORDERLINE2', 'GEOMETRIC_SMOTE', 'DTO']]
mse = df_mse[
['ORIGINAL', 'SMOTE', 'SMOTE_SVM', 'BORDERLINE1', 'BORDERLINE2', 'GEOMETRIC_SMOTE', 'DTO']]
max = df_max[['ORIGINAL', 'SMOTE', 'SMOTE_SVM', 'BORDERLINE1', 'BORDERLINE2', 'GEOMETRIC_SMOTE', 'DTO']]
r2 = r2.reset_index()
r2.drop('index', axis=1, inplace=True)
mae = mae.reset_index()
mae.drop('index', axis=1, inplace=True)
mse = mse.reset_index()
mse.drop('index', axis=1, inplace=True)
max = max.reset_index()
max.drop('index', axis=1, inplace=True)
# calcula rank linha a linha
r2_rank = r2.rank(axis=1, ascending=False)
mae_rank = mae.rank(axis=1, ascending=True)
mse_rank = mse.rank(axis=1, ascending=True)
max_rank = max.rank(axis=1, ascending=True)
df_r2 = df_r2.reset_index()
df_r2.drop('index', axis=1, inplace=True)
df_r2['RANK_ORIGINAL'] = r2_rank['ORIGINAL']
df_r2['RANK_SMOTE'] = r2_rank['SMOTE']
df_r2['RANK_SMOTE_SVM'] = r2_rank['SMOTE_SVM']
df_r2['RANK_BORDERLINE1'] = r2_rank['BORDERLINE1']
df_r2['RANK_BORDERLINE2'] = r2_rank['BORDERLINE2']
df_r2['RANK_GEOMETRIC_SMOTE'] = r2_rank['GEOMETRIC_SMOTE']
df_r2['RANK_DTO'] = r2_rank['DTO']
df_mae = df_mae.reset_index()
df_mae.drop('index', axis=1, inplace=True)
df_mae['RANK_ORIGINAL'] = mae_rank['ORIGINAL']
df_mae['RANK_SMOTE'] = mae_rank['SMOTE']
df_mae['RANK_SMOTE_SVM'] = mae_rank['SMOTE_SVM']
df_mae['RANK_BORDERLINE1'] = mae_rank['BORDERLINE1']
df_mae['RANK_BORDERLINE2'] = mae_rank['BORDERLINE2']
df_mae['RANK_GEOMETRIC_SMOTE'] = mae_rank['GEOMETRIC_SMOTE']
df_mae['RANK_DTO'] = mae_rank['DTO']
df_mse = df_mse.reset_index()
df_mse.drop('index', axis=1, inplace=True)
df_mse['RANK_ORIGINAL'] = mse_rank['ORIGINAL']
df_mse['RANK_SMOTE'] = mse_rank['SMOTE']
df_mse['RANK_SMOTE_SVM'] = mse_rank['SMOTE_SVM']
df_mse['RANK_BORDERLINE1'] = mse_rank['BORDERLINE1']
df_mse['RANK_BORDERLINE2'] = mse_rank['BORDERLINE2']
df_mse['RANK_GEOMETRIC_SMOTE'] = mse_rank['GEOMETRIC_SMOTE']
df_mse['RANK_DTO'] = mse_rank['DTO']
df_max = df_max.reset_index()
df_max.drop('index', axis=1, inplace=True)
df_max['RANK_ORIGINAL'] = max_rank['ORIGINAL']
df_max['RANK_SMOTE'] = max_rank['SMOTE']
df_max['RANK_SMOTE_SVM'] = max_rank['SMOTE_SVM']
df_max['RANK_BORDERLINE1'] = max_rank['BORDERLINE1']
df_max['RANK_BORDERLINE2'] = max_rank['BORDERLINE2']
df_max['RANK_GEOMETRIC_SMOTE'] = max_rank['GEOMETRIC_SMOTE']
df_max['RANK_DTO'] = max_rank['DTO']
# avarege rank
media_r2_rank = r2_rank.mean(axis=0)
media_mae_rank = mae_rank.mean(axis=0)
media_mse_rank = mse_rank.mean(axis=0)
media_max_rank = max_rank.mean(axis=0)
media_r2_rank_file = media_r2_rank.reset_index()
media_r2_rank_file = media_r2_rank_file.sort_values(by=0)
media_mae_rank_file = media_mae_rank.reset_index()
media_mae_rank_file = media_mae_rank_file.sort_values(by=0)
media_mse_rank_file = media_mse_rank.reset_index()
media_mse_rank_file = media_mse_rank_file.sort_values(by=0)
media_max_rank_file = media_max_rank.reset_index()
media_max_rank_file = media_max_rank_file.sort_values(by=0)
if smote == False:
# Grava arquivos importantes
df_r2.to_csv(
rank_dir + release + '_total_rank_' + order + '_' + str(
alpha) + '_' + name + '_r2.csv', index=False)
df_mae.to_csv(
rank_dir + release + '_total_rank_' + order + '_' + str(
alpha) + '_' + name + '_mae.csv', index=False)
df_mse.to_csv(
rank_dir + release + '_total_rank_' + order + '_' + str(
alpha) + '_' + name + '_mse.csv', index=False)
df_max.to_csv(
rank_dir + release + '_total_rank_' + order + '_' + str(
alpha) + '_' + name + '_max.csv', index=False)
media_r2_rank_file.to_csv(
rank_dir + release + '_' + 'media_rank_' + order + '_' + str(
alpha) + '_' + name + '_r2.csv',
index=False)
media_mae_rank_file.to_csv(
rank_dir + release + '_media_rank_' + order + '_' + str(
alpha) + '_' + name + '_mae.csv',
index=False)
media_mse_rank_file.to_csv(
rank_dir + release + '_media_rank_' + order + '_' + str(
alpha) + '_' + name + '_mse.csv',
index=False)
media_max_rank_file.to_csv(
rank_dir + release + '_media_rank_' + order + '_' + str(
alpha) + '_' + name + '_max.csv',
index=False)
GEOMETRY = order + '_' + str(alpha)
# grafico CD
identificadores = ['ORIGINAL', 'SMOTE', 'SMOTE_SVM', 'BORDERLINE1', 'BORDERLINE2', 'GEOMETRIC_SMOTE',
'DTO']
avranks = list(media_r2_rank)
cd = Orange.evaluation.compute_CD(avranks, len(datasets))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(
rank_dir + release + 'cd_' + '_' + GEOMETRY + '_' + name + '_r2.pdf')
plt.close()
avranks = list(media_mae_rank)
cd = Orange.evaluation.compute_CD(avranks, len(datasets))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(
rank_dir + release + 'cd_' + '_' + GEOMETRY + '_' + name + '_mae.pdf')
plt.close()
avranks = list(media_mse_rank)
cd = Orange.evaluation.compute_CD(avranks, len(datasets))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(
rank_dir + release + 'cd_' + '_' + GEOMETRY + '_' + name + '_mse.pdf')
plt.close()
avranks = list(media_max_rank)
cd = Orange.evaluation.compute_CD(avranks, len(datasets))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(rank_dir + release + 'cd_' + '_' + GEOMETRY + '_' + name + '_max.pdf')
plt.close()
print('Delaunay Type= ', GEOMETRY)
print('Algorithm= ', name)
else:
# Grava arquivos importantes
df_r2.to_csv(
rank_dir + release + '_smote_total_rank_' + order + '_' + str(
alpha) + '_' + name + '_r2.csv', index=False)
df_mae.to_csv(
rank_dir + release + '_smote_total_rank_' + order + '_' + str(
alpha) + '_' + name + '_mae.csv', index=False)
df_mse.to_csv(
rank_dir + release + '_smote_total_rank_' + order + '_' + str(
alpha) + '_' + name + '_mse.csv', index=False)
df_max.to_csv(
rank_dir + release + '_smote_total_rank_' + order + '_' + str(
alpha) + '_' + name + '_max.csv', index=False)
media_r2_rank_file.to_csv(
rank_dir + release + '_smote_media_rank_' + order + '_' + str(
alpha) + '_' + name + '_r2.csv',
index=False)
media_mae_rank_file.to_csv(
rank_dir + release + '_smote__media_rank_' + order + '_' + str(
alpha) + '_' + name + '_mae.csv',
index=False)
media_mse_rank_file.to_csv(
rank_dir + release + 'smote__media_rank_' + order + '_' + str(
alpha) + '_' + name + '_mse.csv',
index=False)
media_max_rank_file.to_csv(
rank_dir + release + 'smote__media_rank_' + order + '_' + str(
alpha) + '_' + name + '_max.csv',
index=False)
GEOMETRY = order + '_' + str(alpha)
# grafico CD
identificadores = ['ORIGINAL', 'SMOTE', 'SMOTE_SVM', 'BORDERLINE1', 'BORDERLINE2', 'GEOMETRIC_SMOTE',
GEOMETRY]
avranks = list(media_r2_rank)
cd = Orange.evaluation.compute_CD(avranks, len(datasets))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(
rank_dir + release + 'cd_smote' + '_' + GEOMETRY + '_' + name + '_pre.pdf')
plt.close()
avranks = list(media_mae_rank)
cd = Orange.evaluation.compute_CD(avranks, len(datasets))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(
rank_dir + release + 'cd_smote' + '_' + GEOMETRY + '_' + name + '_rec.pdf')
plt.close()
avranks = list(media_mse_rank)
cd = Orange.evaluation.compute_CD(avranks, len(datasets))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(
rank_dir + release + 'cd_smote' + '_' + GEOMETRY + '_' + name + '_spe.pdf')
plt.close()
avranks = list(media_max_rank)
cd = Orange.evaluation.compute_CD(avranks, len(datasets))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(rank_dir + release + 'cd_smote' + '_' + GEOMETRY + '_' + name + '_f1.pdf')
plt.close()
print('SMOTE Delaunay Type= ', GEOMETRY)
print('SMOTE Algorithm= ', name)
def rank_dto_by(self, geometry, release, smote=False):
M = ['_r2.csv', '_mae.csv', '_mse.csv', '_max.csv']
df_media_rank = pd.DataFrame(columns=['ALGORITHM', 'RANK_ORIGINAL', 'RANK_SMOTE',
'RANK_SMOTE_SVM', 'RANK_BORDERLINE1', 'RANK_BORDERLINE2',
'RANK_GEOMETRIC_SMOTE', 'RANK_DTO', 'unit'])
if smote == False:
name = rank_dir + release + '_total_rank_' + geometry + '_'
else:
name = rank_dir + release + '_smote_total_rank_' + geometry + '_'
for m in M:
i = 0
for c in regression_list:
df = pd.read_csv(name + c + m)
rank_original = df.RANK_ORIGINAL.mean()
rank_smote = df.RANK_SMOTE.mean()
rank_smote_svm = df.RANK_SMOTE_SVM.mean()
rank_b1 = df.RANK_BORDERLINE1.mean()
rank_b2 = df.RANK_BORDERLINE2.mean()
rank_geo_smote = df.RANK_GEOMETRIC_SMOTE.mean()
rank_dto = df.RANK_DTO.mean()
df_media_rank.loc[i, 'ALGORITHM'] = df.loc[0, 'ALGORITHM']
df_media_rank.loc[i, 'RANK_ORIGINAL'] = rank_original
df_media_rank.loc[i, 'RANK_SMOTE'] = rank_smote
df_media_rank.loc[i, 'RANK_SMOTE_SVM'] = rank_smote_svm
df_media_rank.loc[i, 'RANK_BORDERLINE1'] = rank_b1
df_media_rank.loc[i, 'RANK_BORDERLINE2'] = rank_b2
df_media_rank.loc[i, 'RANK_GEOMETRIC_SMOTE'] = rank_geo_smote
df_media_rank.loc[i, 'RANK_DTO'] = rank_dto
df_media_rank.loc[i, 'unit'] = df.loc[0, 'unit']
i += 1
dfmediarank = df_media_rank.copy()
dfmediarank = dfmediarank.sort_values('RANK_DTO')
dfmediarank.loc[i, 'ALGORITHM'] = 'avarage'
dfmediarank.loc[i, 'RANK_ORIGINAL'] = df_media_rank['RANK_ORIGINAL'].mean()
dfmediarank.loc[i, 'RANK_SMOTE'] = df_media_rank['RANK_SMOTE'].mean()
dfmediarank.loc[i, 'RANK_SMOTE_SVM'] = df_media_rank['RANK_SMOTE_SVM'].mean()
dfmediarank.loc[i, 'RANK_BORDERLINE1'] = df_media_rank['RANK_BORDERLINE1'].mean()
dfmediarank.loc[i, 'RANK_BORDERLINE2'] = df_media_rank['RANK_BORDERLINE2'].mean()
dfmediarank.loc[i, 'RANK_GEOMETRIC_SMOTE'] = df_media_rank['RANK_GEOMETRIC_SMOTE'].mean()
dfmediarank.loc[i, 'RANK_DTO'] = df_media_rank['RANK_DTO'].mean()
dfmediarank.loc[i, 'unit'] = df.loc[0, 'unit']
i += 1
dfmediarank.loc[i, 'ALGORITHM'] = 'std'
dfmediarank.loc[i, 'RANK_ORIGINAL'] = df_media_rank['RANK_ORIGINAL'].std()
dfmediarank.loc[i, 'RANK_SMOTE'] = df_media_rank['RANK_SMOTE'].std()
dfmediarank.loc[i, 'RANK_SMOTE_SVM'] = df_media_rank['RANK_SMOTE_SVM'].std()
dfmediarank.loc[i, 'RANK_BORDERLINE1'] = df_media_rank['RANK_BORDERLINE1'].std()
dfmediarank.loc[i, 'RANK_BORDERLINE2'] = df_media_rank['RANK_BORDERLINE2'].std()
dfmediarank.loc[i, 'RANK_GEOMETRIC_SMOTE'] = df_media_rank['RANK_GEOMETRIC_SMOTE'].std()
dfmediarank.loc[i, 'RANK_DTO'] = df_media_rank['RANK_DTO'].std()
dfmediarank.loc[i, 'unit'] = df.loc[0, 'unit']
dfmediarank['RANK_ORIGINAL'] = pd.to_numeric(dfmediarank['RANK_ORIGINAL'], downcast="float").round(2)
dfmediarank['RANK_SMOTE'] = pd.to_numeric(dfmediarank['RANK_SMOTE'], downcast="float").round(2)
dfmediarank['RANK_SMOTE_SVM'] = pd.to_numeric(dfmediarank['RANK_SMOTE_SVM'], downcast="float").round(2)
dfmediarank['RANK_BORDERLINE1'] = pd.to_numeric(dfmediarank['RANK_BORDERLINE1'], downcast="float").round(2)
dfmediarank['RANK_BORDERLINE2'] = pd.to_numeric(dfmediarank['RANK_BORDERLINE2'], downcast="float").round(2)
dfmediarank['RANK_GEOMETRIC_SMOTE'] = pd.to_numeric(dfmediarank['RANK_GEOMETRIC_SMOTE'],
downcast="float").round(2)
dfmediarank['RANK_DTO'] = pd.to_numeric(dfmediarank['RANK_DTO'], downcast="float").round(2)
if smote == False:
dfmediarank.to_csv(output_dir + release + '_results_media_rank_' + geometry + m,
index=False)
else:
dfmediarank.to_csv(output_dir + release + '_smote_results_media_rank_' + geometry + m,
index=False)
def grafico_variacao_alpha(self, release):
M = ['_r2', '_mae', '_mse', '_max']
df_alpha_variations_rank = pd.DataFrame()
df_alpha_variations_rank['alphas'] = alphas
df_alpha_variations_rank.index = alphas
df_alpha_all = pd.DataFrame()
df_alpha_all['alphas'] = alphas
df_alpha_all.index = alphas
for m in M:
for o in order:
for a in alphas:
filename = output_dir + release + '_results_media_rank_' + o + '_' + str(
a) + m + '.csv'
print(filename)
df = pd.read_csv(filename)
mean = df.loc[8, 'RANK_DTO']
df_alpha_variations_rank.loc[a, 'AVARAGE_RANK'] = mean
if m == '_r2':
measure = 'R2'
if m == '_mae':
measure = 'MAE'
if m == '_mse':
measure = 'MSE'
if m == '_max':
measure = 'MAX'
df_alpha_all[o + '_' + measure] = df_alpha_variations_rank['AVARAGE_RANK'].copy()
fig, ax = plt.subplots()
ax.set_title('DTO AVARAGE RANK\n ' + 'GEOMETRY = ' + o + '\nMEASURE = ' + measure, fontsize=10)
ax.set_xlabel('Alpha')
ax.set_ylabel('Rank')
ax.plot(df_alpha_variations_rank['AVARAGE_RANK'], marker='d', label='Avarage Rank')
ax.legend(loc="upper right")
plt.xticks(range(11))
fig.savefig(graphics_dir + release + '_pic_' + o + '_' + measure + '.png', dpi=125)
plt.show()
plt.close()
# figure(num=None, figsize=(10, 10), dpi=800, facecolor='w', edgecolor='k')
fig, ax = plt.subplots(figsize=(10, 7))
ax.set_title('DTO AVARAGE RANK\n ' + '\nMEASURE = R2', fontsize=5)
ax.set_xlabel('Alpha')
ax.set_ylabel('Rank')
t1 = df_alpha_all['alphas']
t2 = df_alpha_all['alphas']
t3 = df_alpha_all['alphas']
ft1 = df_alpha_all['max_solid_angle_R2']
ft2 = df_alpha_all['min_solid_angle_R2']
ft3 = df_alpha_all['solid_angle_R2']
ax.plot(t1, ft1, color='tab:brown', marker='o', label='max_solid_angle')
ax.plot(t2, ft2, color='tab:pink', marker='o', label='min_solid_angle')
ax.plot(t3, ft3, color='tab:gray', marker='o', label='solid_angle')
leg = ax.legend(loc='upper right')
leg.get_frame().set_alpha(0.5)
plt.xticks(range(12))
plt.savefig(graphics_dir + release + '_pic_all_r2.png', dpi=800)
plt.show()
plt.close()
df_alpha_all.to_csv(graphics_dir + release + '_pic_all_r2.csv', index=False)
###################
fig, ax = plt.subplots(figsize=(10, 7))
ax.set_title('DTO AVARAGE RANK\n ' + '\nMEASURE = MAE', fontsize=5)
ax.set_xlabel('Alpha')
ax.set_ylabel('Rank')
t1 = df_alpha_all['alphas']
t2 = df_alpha_all['alphas']
t3 = df_alpha_all['alphas']
ft1 = df_alpha_all['max_solid_angle_MAE']
ft2 = df_alpha_all['min_solid_angle_MAE']
ft3 = df_alpha_all['solid_angle_MAE']
ax.plot(t1, ft1, color='tab:brown', marker='o', label='max_solid_angle')
ax.plot(t2, ft2, color='tab:pink', marker='o', label='min_solid_angle')
ax.plot(t3, ft3, color='tab:gray', marker='o', label='solid_angle')
leg = ax.legend(loc='upper right')
leg.get_frame().set_alpha(0.5)
plt.xticks(range(12))
plt.savefig(graphics_dir + release + '_pic_all_mae.png', dpi=800)
plt.show()
plt.close()
df_alpha_all.to_csv(graphics_dir + release + '_pic_all_mae.csv', index=False)
fig, ax = plt.subplots(figsize=(10, 7))
ax.set_title('DTO AVARAGE RANK\n ' + '\nMEASURE = MSE', fontsize=5)
ax.set_xlabel('Alpha')
ax.set_ylabel('Rank')
t1 = df_alpha_all['alphas']
t2 = df_alpha_all['alphas']
t3 = df_alpha_all['alphas']
ft1 = df_alpha_all['max_solid_angle_MSE']
ft2 = df_alpha_all['min_solid_angle_MSE']
ft3 = df_alpha_all['solid_angle_MSE']
ax.plot(t1, ft1, color='tab:brown', marker='o', label='max_solid_angle')
ax.plot(t2, ft2, color='tab:pink', marker='o', label='min_solid_angle')
ax.plot(t3, ft3, color='tab:gray', marker='o', label='solid_angle')
leg = ax.legend(loc='upper right')
leg.get_frame().set_alpha(0.5)
plt.xticks(range(12))
plt.savefig(graphics_dir + release + '_pic_all_mse.png', dpi=800)
plt.show()
plt.close()
df_alpha_all.to_csv(graphics_dir + release + '_pic_all_mse.csv', index=False)
fig, ax = plt.subplots(figsize=(10, 7))
ax.set_title('DTO AVARAGE RANK\n ' + '\nMEASURE = MAX', fontsize=5)
ax.set_xlabel('Alpha')
ax.set_ylabel('Rank')
t1 = df_alpha_all['alphas']
t2 = df_alpha_all['alphas']
t3 = df_alpha_all['alphas']
ft1 = df_alpha_all['max_solid_angle_MAX']
ft2 = df_alpha_all['min_solid_angle_MAX']
ft3 = df_alpha_all['solid_angle_MAX']
ax.plot(t1, ft1, color='tab:brown', marker='o', label='max_solid_angle')
ax.plot(t2, ft2, color='tab:pink', marker='o', label='min_solid_angle')
ax.plot(t3, ft3, color='tab:gray', marker='o', label='solid_angle')
leg = ax.legend(loc='upper right')
leg.get_frame().set_alpha(0.5)
plt.xticks(range(12))
plt.savefig(graphics_dir + release + '_pic_all_max.png', dpi=800)
plt.show()
plt.close()
df_alpha_all.to_csv(graphics_dir + release + '_pic_all_max.csv', index=False)
def best_alpha(self, kind):
# Best alpha calculation
# GEO
df1 = pd.read_csv(output_dir + 'v1' + '_pic_all_geo.csv')
df2 = pd.read_csv(output_dir + 'v2' + '_pic_all_geo.csv')
df3 = pd.read_csv(output_dir + 'v3' + '_pic_all_geo.csv')
if kind == 'biclass':
col = ['area_GEO', 'volume_GEO', 'area_volume_ratio_GEO',
'edge_ratio_GEO', 'radius_ratio_GEO', 'aspect_ratio_GEO',
'max_solid_angle_GEO', 'min_solid_angle_GEO', 'solid_angle_GEO',
'area_IBA', 'volume_IBA', 'area_volume_ratio_IBA', 'edge_ratio_IBA',
'radius_ratio_IBA', 'aspect_ratio_IBA', 'max_solid_angle_IBA',
'min_solid_angle_IBA', 'solid_angle_IBA', 'area_AUC', 'volume_AUC',
'area_volume_ratio_AUC', 'edge_ratio_AUC', 'radius_ratio_AUC',
'aspect_ratio_AUC', 'max_solid_angle_AUC', 'min_solid_angle_AUC',
'solid_angle_AUC']
else:
col = ['area_GEO', 'volume_GEO',
'area_volume_ratio_GEO', 'edge_ratio_GEO', 'radius_ratio_GEO',
'aspect_ratio_GEO', 'max_solid_angle_GEO', 'min_solid_angle_GEO',
'solid_angle_GEO', 'area_IBA', 'volume_IBA', 'area_volume_ratio_IBA',
'edge_ratio_IBA', 'radius_ratio_IBA', 'aspect_ratio_IBA',
'max_solid_angle_IBA', 'min_solid_angle_IBA', 'solid_angle_IBA']
df_mean = pd.DataFrame()
df_mean['alphas'] = df1.alphas
for c in col:
for i in np.arange(0, df1.shape[0]):
df_mean.loc[i, c] = (df1.loc[i, c] + df2.loc[i, c] + df3.loc[i, c]) / 3.0
fig, ax = plt.subplots(figsize=(10, 7))
ax.set_title('DTO AVARAGE RANK\n ' + '\nMEASURE = GEO', fontsize=5)
ax.set_xlabel('Alpha')
ax.set_ylabel('Rank')
t1 = df_mean['alphas']
t2 = df_mean['alphas']
t3 = df_mean['alphas']
t4 = df_mean['alphas']
t5 = df_mean['alphas']
t6 = df_mean['alphas']
t7 = df_mean['alphas']
t8 = df_mean['alphas']
t9 = df_mean['alphas']
ft1 = df_mean['area_GEO']
ft2 = df_mean['volume_GEO']
ft3 = df_mean['area_volume_ratio_GEO']
ft4 = df_mean['edge_ratio_GEO']
ft5 = df_mean['radius_ratio_GEO']
ft6 = df_mean['aspect_ratio_GEO']
ft7 = df_mean['max_solid_angle_GEO']
ft8 = df_mean['min_solid_angle_GEO']
ft9 = df_mean['solid_angle_GEO']
ax.plot(t1, ft1, color='tab:blue', marker='o', label='area')
ax.plot(t2, ft2, color='tab:red', marker='o', label='volume')
ax.plot(t3, ft3, color='tab:green', marker='o', label='area_volume_ratio')
ax.plot(t4, ft4, color='tab:orange', marker='o', label='edge_ratio')
ax.plot(t5, ft5, color='tab:olive', marker='o', label='radius_ratio')
ax.plot(t6, ft6, color='tab:purple', marker='o', label='aspect_ratio')
ax.plot(t7, ft7, color='tab:brown', marker='o', label='max_solid_angle')
ax.plot(t8, ft8, color='tab:pink', marker='o', label='min_solid_angle')
ax.plot(t9, ft9, color='tab:gray', marker='o', label='solid_angle')
leg = ax.legend(loc='upper right')
leg.get_frame().set_alpha(0.5)
plt.xticks(range(12))
plt.savefig(output_dir + kind + '_pic_average_geo.png', dpi=800)
plt.show()
plt.close()
df_mean.to_csv(output_dir + kind + '_pic_average_geo.csv', index=False)
###################
fig, ax = plt.subplots(figsize=(10, 7))
ax.set_title('DTO AVARAGE RANK\n ' + '\nMEASURE = IBA', fontsize=5)
ax.set_xlabel('Alpha')
ax.set_ylabel('Rank')
t1 = df_mean['alphas']
t2 = df_mean['alphas']
t3 = df_mean['alphas']
t4 = df_mean['alphas']
t5 = df_mean['alphas']
t6 = df_mean['alphas']
t7 = df_mean['alphas']
t8 = df_mean['alphas']
t9 = df_mean['alphas']
ft1 = df_mean['area_IBA']
ft2 = df_mean['volume_IBA']
ft3 = df_mean['area_volume_ratio_IBA']
ft4 = df_mean['edge_ratio_IBA']
ft5 = df_mean['radius_ratio_IBA']
ft6 = df_mean['aspect_ratio_IBA']
ft7 = df_mean['max_solid_angle_IBA']
ft8 = df_mean['min_solid_angle_IBA']
ft9 = df_mean['solid_angle_IBA']
ax.plot(t1, ft1, color='tab:blue', marker='o', label='area')
ax.plot(t2, ft2, color='tab:red', marker='o', label='volume')
ax.plot(t3, ft3, color='tab:green', marker='o', label='area_volume_ratio')
ax.plot(t4, ft4, color='tab:orange', marker='o', label='edge_ratio')
ax.plot(t5, ft5, color='tab:olive', marker='o', label='radius_ratio')
ax.plot(t6, ft6, color='tab:purple', marker='o', label='aspect_ratio')
ax.plot(t7, ft7, color='tab:brown', marker='o', label='max_solid_angle')
ax.plot(t8, ft8, color='tab:pink', marker='o', label='min_solid_angle')
ax.plot(t9, ft9, color='tab:gray', marker='o', label='solid_angle')
leg = ax.legend(loc='upper right')
leg.get_frame().set_alpha(0.5)
plt.xticks(range(12))
plt.savefig(output_dir + kind + '_pic_average_iba.png', dpi=800)
plt.show()
plt.close()
df_mean.to_csv(output_dir + kind + '_pic_average_iba.csv', index=False)
if kind == 'biclass':
fig, ax = plt.subplots(figsize=(10, 7))
ax.set_title('DTO AVARAGE RANK\n ' + '\nMEASURE = AUC', fontsize=5)
ax.set_xlabel('Alpha')
ax.set_ylabel('Rank')
t1 = df_mean['alphas']
t2 = df_mean['alphas']
t3 = df_mean['alphas']
t4 = df_mean['alphas']
t5 = df_mean['alphas']
t6 = df_mean['alphas']
t7 = df_mean['alphas']
t8 = df_mean['alphas']
t9 = df_mean['alphas']
ft1 = df_mean['area_AUC']
ft2 = df_mean['volume_AUC']
ft3 = df_mean['area_volume_ratio_AUC']
ft4 = df_mean['edge_ratio_AUC']
ft5 = df_mean['radius_ratio_AUC']
ft6 = df_mean['aspect_ratio_AUC']
ft7 = df_mean['max_solid_angle_AUC']
ft8 = df_mean['min_solid_angle_AUC']
ft9 = df_mean['solid_angle_AUC']
ax.plot(t1, ft1, color='tab:blue', marker='o', label='area')
ax.plot(t2, ft2, color='tab:red', marker='o', label='volume')
ax.plot(t3, ft3, color='tab:green', marker='o', label='area_volume_ratio')
ax.plot(t4, ft4, color='tab:orange', marker='o', label='edge_ratio')
ax.plot(t5, ft5, color='tab:olive', marker='o', label='radius_ratio')
ax.plot(t6, ft6, color='tab:purple', marker='o', label='aspect_ratio')
ax.plot(t7, ft7, color='tab:brown', marker='o', label='max_solid_angle')
ax.plot(t8, ft8, color='tab:pink', marker='o', label='min_solid_angle')
ax.plot(t9, ft9, color='tab:gray', marker='o', label='solid_angle')
leg = ax.legend(loc='upper right')
leg.get_frame().set_alpha(0.5)
plt.xticks(range(12))
plt.savefig(output_dir + kind + '_pic_average_auc.png', dpi=800)
plt.show()
plt.close()
df_mean.to_csv(output_dir + kind + '_pic_average_auc.csv', index=False)
def run_global_rank(self, filename, kind, release):
df_best_dto = pd.read_csv(filename)
df_B1 = df_best_dto[df_best_dto['PREPROC'] == '_Borderline1'].copy()
df_B2 = df_best_dto[df_best_dto['PREPROC'] == '_Borderline2'].copy()
df_GEO = df_best_dto[df_best_dto['PREPROC'] == '_Geometric_SMOTE'].copy()
df_SMOTE = df_best_dto[df_best_dto['PREPROC'] == '_SMOTE'].copy()
df_SMOTEsvm = df_best_dto[df_best_dto['PREPROC'] == '_smoteSVM'].copy()
df_original = df_best_dto[df_best_dto['PREPROC'] == '_train'].copy()
o = 'solid_angle'
if kind == 'biclass':
a = 7.0
else:
a = 7.5
GEOMETRY = '_delaunay_' + o + '_' + str(a)
df_dto = df_best_dto[df_best_dto['PREPROC'] == GEOMETRY].copy()
df = pd.concat([df_B1, df_B2, df_GEO, df_SMOTE, df_SMOTEsvm, df_original, df_dto])
self.rank_by_algorithm(df, kind, o, str(a), release, smote=True)
self.rank_dto_by(o + '_' + str(a), kind, release, smote=True)
def overall_rank(self, ext, kind, alpha):
df1 = pd.read_csv(
output_dir + 'v1_smote_' + kind + '_results_media_rank_solid_angle_' + str(alpha) + '_' + ext + '.csv')
df2 = pd.read_csv(
output_dir + 'v2_smote_' + kind + '_results_media_rank_solid_angle_' + str(alpha) + '_' + ext + '.csv')
df3 = pd.read_csv(
output_dir + 'v3_smote_' + kind + '_results_media_rank_solid_angle_' + str(alpha) + '_' + ext + '.csv')
col = ['RANK_ORIGINAL', 'RANK_SMOTE', 'RANK_SMOTE_SVM', 'RANK_BORDERLINE1'
, 'RANK_BORDERLINE2', 'RANK_GEOMETRIC_SMOTE', 'RANK_DELAUNAY']
df_mean = pd.DataFrame()
df_mean['ALGORITHM'] = df1.ALGORITHM
df_mean['unit'] = df1.unit
for c in col:
for i in np.arange(0, df1.shape[0]):
df_mean.loc[i, c] = (df1.loc[i, c] + df2.loc[i, c] + df3.loc[i, c]) / 3.0
df_mean['RANK_ORIGINAL'] = pd.to_numeric(df_mean['RANK_ORIGINAL'], downcast="float").round(2)
df_mean['RANK_SMOTE'] = pd.to_numeric(df_mean['RANK_SMOTE'], downcast="float").round(2)
df_mean['RANK_SMOTE_SVM'] = pd.to_numeric(df_mean['RANK_SMOTE_SVM'], downcast="float").round(2)
df_mean['RANK_BORDERLINE1'] = pd.to_numeric(df_mean['RANK_BORDERLINE1'], downcast="float").round(2)
df_mean['RANK_BORDERLINE2'] = pd.to_numeric(df_mean['RANK_BORDERLINE2'], downcast="float").round(2)
df_mean['RANK_GEOMETRIC_SMOTE'] = pd.to_numeric(df_mean['RANK_GEOMETRIC_SMOTE'], downcast="float").round(2)
df_mean['RANK_DELAUNAY'] = pd.to_numeric(df_mean['RANK_DELAUNAY'], downcast="float").round(2)
df_mean.to_csv(output_dir + 'overall_rank_results_' + kind + '_' + str(alpha) + '_' + ext + '.csv', index=False)
def cd_graphics(self, df, datasetlen, kind): # TODO
# grafico CD
names = ['ORIGINAL', 'SMOTE', 'SMOTE_SVM', 'BORDERLINE1', 'BORDERLINE2', 'GEOMETRIC_SMOTE', 'DTO']
algorithms = regression_list
for i in np.arange(0, len(algorithms)):
avranks = list(df.loc[i])
algorithm = avranks[0]
measure = avranks[1]
avranks = avranks[2:]
cd = Orange.evaluation.compute_CD(avranks, datasetlen)
Orange.evaluation.graph_ranks(avranks, names, cd=cd, width=len(algorithms), textspace=3)
plt.savefig(output_dir + kind + '_cd_' + algorithm + '_' + measure + '.pdf')
plt.close()
def read_dir_files(self, dir_name):
f = [f for f in listdir(dir_name) if isfile(join(dir_name, f))]
return f
def find_best_rank(self, results_dir,release):
results = self.read_dir_files(results_dir)
df = | pd.DataFrame(columns=[['ARQUIVO', 'WINER']]) | pandas.DataFrame |
import os
from io import StringIO
from pathlib import Path
import pandas as pd
import pandas._testing as pt
import pytest
from pyplotutil.datautil import Data, DataSet
csv_dir_path = os.path.join(os.path.dirname(__file__), "data")
test_data = """\
a,b,c,d,e
1,0.01,10.0,3.5,100
2,0.02,20.0,7.5,200
3,0.03,30.0,9.5,300
4,0.04,40.0,11.5,400
"""
test_dataset = """\
tag,a,b,c,d,e
tag01,0,1,2,3,4
tag01,5,6,7,8,9
tag01,10,11,12,13,14
tag01,15,16,17,18,19
tag01,20,21,22,23,24
tag01,25,26,27,28,29
tag02,10,11,12,13,14
tag02,15,16,17,18,19
tag02,110,111,112,113,114
tag02,115,116,117,118,119
tag02,120,121,122,123,124
tag02,125,126,127,128,129
tag03,20,21,22,23,24
tag03,25,26,27,28,29
tag03,210,211,212,213,214
tag03,215,216,217,218,219
tag03,220,221,222,223,224
tag03,225,226,227,228,229
"""
@pytest.mark.parametrize("cls", [str, Path])
def test_data_init_path(cls) -> None:
csv_path = os.path.join(csv_dir_path, "test.csv")
path = cls(csv_path)
expected_df = pd.read_csv(csv_path)
data = Data(path)
assert data.datapath == Path(csv_path)
pt.assert_frame_equal(data.dataframe, expected_df)
def test_data_init_StringIO() -> None:
csv_path = os.path.join(csv_dir_path, "test.csv")
expected_df = pd.read_csv(csv_path)
data = Data(StringIO(test_data))
assert data.datapath is None
pt.assert_frame_equal(data.dataframe, expected_df)
def test_data_init_DataFrame() -> None:
csv_path = os.path.join(csv_dir_path, "test.csv")
expected_df = pd.read_csv(csv_path)
if isinstance(expected_df, pd.DataFrame):
data = Data(expected_df)
assert data.datapath is None
pt.assert_frame_equal(data.dataframe, expected_df)
else:
pytest.skip(f"Expected DataFram type: {type(expected_df)}")
def test_data_init_kwds() -> None:
csv_path = os.path.join(csv_dir_path, "test.csv")
expected_df = pd.read_csv(csv_path, usecols=[0, 1])
data = Data(csv_path, usecols=[0, 1])
assert len(data.dataframe.columns) == 2
pt.assert_frame_equal(data.dataframe, expected_df)
def test_data_getitem() -> None:
df = pd.DataFrame([[0, 1, 2], [3, 4, 5], [6, 7, 8]], columns=["a", "b", "c"])
data = Data(df)
pt.assert_series_equal(data["a"], df.a) # type: ignore
pt.assert_series_equal(data["b"], df.b) # type: ignore
pt.assert_series_equal(data["c"], df.c) # type: ignore
def test_data_getitem_no_header() -> None:
df = pd.DataFrame([[0, 1, 2], [3, 4, 5], [6, 7, 8]])
data = Data(df)
pt.assert_series_equal(data[0], df[0]) # type: ignore
pt.assert_series_equal(data[1], df[1]) # type: ignore
pt.assert_series_equal(data[2], df[2]) # type: ignore
def test_data_len() -> None:
df = pd.DataFrame([[0, 1, 2], [3, 4, 5], [6, 7, 8]], columns=["a", "b", "c"])
data = Data(df)
assert len(data) == len(df)
def test_data_getattr() -> None:
df = pd.DataFrame([[0, 1, 2], [3, 4, 5], [6, 7, 8]], columns=["a", "b", "c"])
data = Data(df)
pt.assert_index_equal(data.columns, pd.Index(["a", "b", "c"]))
assert data.shape == (3, 3)
assert data.to_csv() == ",a,b,c\n0,0,1,2\n1,3,4,5\n2,6,7,8\n"
assert data.iat[1, 2] == 5
assert data.at[2, "a"] == 6
def test_data_attributes() -> None:
df = pd.DataFrame([[0, 1, 2], [3, 4, 5], [6, 7, 8]], columns=["a", "b", "c"])
data = Data(df)
pt.assert_series_equal(data.a, df.a) # type: ignore
pt.assert_series_equal(data.b, df.b) # type: ignore
pt.assert_series_equal(data.c, df.c) # type: ignore
def test_data_param() -> None:
csv_path = os.path.join(csv_dir_path, "test.csv")
data = Data(csv_path)
assert data.param("b") == 0.01
def test_data_param_list() -> None:
csv_path = os.path.join(csv_dir_path, "test.csv")
data = Data(csv_path)
assert data.param(["c", "e"]) == [10.0, 100]
@pytest.mark.parametrize("cls", [str, Path])
def test_dataset_init_path(cls) -> None:
csv_path = os.path.join(csv_dir_path, "test_dataset.csv")
path = cls(csv_path)
raw_df = pd.read_csv(csv_path)
dataset = DataSet(path)
assert dataset.datapath == Path(csv_path)
pt.assert_frame_equal(dataset.dataframe, raw_df)
if isinstance(raw_df, pd.DataFrame):
groups = raw_df.groupby("tag")
datadict = dataset._datadict
pt.assert_frame_equal(
datadict["tag01"].dataframe,
groups.get_group("tag01").reset_index(drop=True),
)
pt.assert_frame_equal(
datadict["tag02"].dataframe,
groups.get_group("tag02").reset_index(drop=True),
)
pt.assert_frame_equal(
datadict["tag03"].dataframe,
groups.get_group("tag03").reset_index(drop=True),
)
else:
pytest.skip(f"Expected DataFram type: {type(raw_df)}")
def test_dataset_init_StringIO() -> None:
csv_path = os.path.join(csv_dir_path, "test_dataset.csv")
raw_df = | pd.read_csv(csv_path) | pandas.read_csv |
import re
import logging
from functools import reduce, partial
from concurrent import futures
from concurrent.futures import ThreadPoolExecutor, as_completed
import pandas as pd
import numpy as np
from pandas.api.types import is_numeric_dtype
from influxdb.resultset import ResultSet
from requests.exceptions import RequestException
from .connection import get_client, InfluxDBException, _timeout
from .util import aslist, asstr
from .db import _check_table, _CATEGORICAL_COLUMNS, AGGREGATE
from . import db
__all__ = ['query', 'query_async', 'getdf']
log = logging.getLogger(__name__)
def query(query: str, **kwargs) -> ResultSet:
"""
Fetch results of a raw SQL query.
Parameters
----------
query : str
An SQL query to fetch results for.
kwargs :
Passed to ``influxdb.client.InfluxDBClient``.
Returns
-------
influxdb.resultset.ResultSet
"""
try:
client = get_client()
except InfluxDBException:
log.exception('Failed to instantiate InfluxDB client:')
raise
kwargs.setdefault('epoch', 'ms')
try:
log.debug('Executing query: %s', query)
result = client.query(query, **kwargs)
log.debug('Result set size: %d, %d rows', len(result), len(tuple(result.get_points())))
return result
except RequestException:
log.error('Failed to execute query in %d seconds: %s', _timeout, query)
raise
except InfluxDBException:
log.error('Failed to execute query: %s', query)
raise
def query_async(queries: list, callback=None, **kwargs) -> ResultSet:
"""
Generator fetching results of SQL queries in an asynchronous manner.
Parameters
----------
queries : list of str
An list of SQL queries to fetch results for.
callback : callable
The function to call after each successfully executed query.
kwargs :
Passed to ``influxdb.client.InfluxDBClient``.
Yields
------
influxdb.resultset.ResultSet
"""
if isinstance(queries, str):
queries = [queries]
with ThreadPoolExecutor(max_workers=len(queries)) as executor:
try:
for future in as_completed((executor.submit(query, query_str, **kwargs)
for query_str in queries),
# +1 to allow InfluxDBClient (requests) to fail first
timeout=_timeout + 1):
yield future.result()
if callback:
callback()
except (futures.TimeoutError, RequestException):
log.error("Failed to execute all queries in %d seconds: %s", _timeout, queries)
raise
def _query_str(table, *, freq, columns='', where='', resample='', limit=1000):
parts = ['SELECT {columns} FROM {table}_{freq}'.format(
columns=asstr(columns) or (table._select_agg() if resample else '*'),
table=str(table),
freq=freq)]
if where:
where = aslist(where, str)
parts.append('WHERE ' + ' AND '.join(where))
if resample:
resample = 'time({}), '.format(resample)
parts.append('GROUP BY ' + (resample + table._groupby()).lstrip(','))
if limit:
parts.append('LIMIT ' + str(int(limit)))
query_str = ' '.join(parts)
return query_str
def merge_asof_helper (left, right, tolerance=None):
#https://github.com/pandas-dev/pandas/issues/16454 pandas doesnt allow multiple pd.Categorical "by" values?, dirty hacks
if 'time' in left.columns.values.tolist():
left.time = pd.to_datetime(left.time, unit='ms')
left.set_index('time', inplace=True)
left.sort_index(inplace=True)
right.time = pd.to_datetime(right.time, unit='ms')
right.set_index('time', inplace=True)
right.sort_index(inplace=True)
temp = pd.merge_asof(left, right, left_index=True, right_index=True,
by=[a for a in list(set(left.columns.values.tolist()).intersection(right.columns.values.tolist()))
if a not in ['Interface','Operator'] ],
direction='backward', tolerance=tolerance, suffixes=('_left', '_right'))
temp.rename(columns=lambda x: x if not x.endswith('_left') else x[:-len('_left')], inplace=True) # rename left cols, there is more data in it
temp.drop(columns=[x for x in temp.columns.values.tolist() if x.endswith('_right')], inplace=True, axis=1) # drop right cols, not so much data
return temp
def getdf(tables, *, nodeid='', where='', limit=100000,
start_time=None, end_time=None,
freq=None, resample='',
interpolate=False,
tolerance=None,
callback=None) -> pd.DataFrame:
"""
Return MONROE data as Pandas DataFrame.
Parameters
----------
tables : str or list of str
Table name(s) to query and merge. Tables can be from the list
as retuend by ``all_tables()``.
nodeid : int or str or list of int or str
A single node ID or a list thereof. If empty, results for all
available nodes are returned.
where : str or list of str
Additional SQL WHERE conditions.
limit : int
Hard-limit on the number of rows requested from the DB for each
NodeId.
start_time : str or datetime or pandas.Timestamp
Query results after start time. Default is set to 14 days before
`end_time` or the min timestamp of `tables`, whichever is later.
end_time : str or datetime or pandas.Timestamp
Query results before end time. Default is set to now or the
max timestamp of `tables`, whichever is sooner.
freq : str, from {'10ms', '1s', '1m', '30m'}
The level of detail to query. Higher precision results in MORE
data. By default, `freq` is set to a sensible (manageable) value
based on query time span.
resample : str
Resampling rule (such as '1h', '2h', '1d', ...) from
http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases
interpolate : str or bool, default False
Interpolation method supported by ``pandas.DataFrame.interpolate``,
or ``True`` for `linear` interpolation of missing values.
Rows are grouped by NodeId,Iccid before interpolation.
callback : callable
The function to call after each successfully executed query.
Returns
-------
pandas.DataFrame
"""
tables = list(map(_check_table, aslist(tables)))
if not tables:
raise ValueError('Need a table name to fetch')
if where and isinstance(where, str):
where = [where]
where = aslist(where or [], str)
if nodeid:
nodeid = aslist(nodeid, str)
nodedid_where = ['NodeId = {!r}'.format(str(node))
for node in nodeid]
where.append('(' + ' OR '.join(nodedid_where) + ')')
# Sanitize input date and time
start_time, end_time = _check_time(start_time, end_time, tables=tables)
where.append('time >= {!r}'.format(start_time.isoformat()))
where.append('time <= {!r}'.format(end_time.isoformat()))
# Determine correct level-of-detail table
freq = _check_freq(freq, tspan=end_time - start_time, nodeid=nodeid)
def _where_field_name(condition, _identifiers=re.compile(r'\w+').findall):
return _identifiers(condition)[0]
def _query_for_table(table, where, freq, limit, columns=''):
table_columns = {'time'} | set(table._columns())
_where = [cond for cond in where
if _where_field_name(cond) in table_columns]
return _query_str(table, columns=columns, freq=freq, where=_where, limit=limit)
# Construct queries with their applicable "where" parameters
queries = [_query_for_table(table, where, freq, limit)
for table in tables]
# If output will contain column Iccid, ensure it also contains modem.Interface
#if db.modem not in tables and any('Iccid' in table and table!='nettest' for table in tables):
# queries.append(_query_for_table(db.modem, where, freq, limit,
# columns=['Interface', 'Iccid']))
# Construct response data frames; One df per measurement per tag
dfs = []
for results in query_async(queries, callback=callback):
df = _result_set_to_df(results)
if df is not None:
dfs.append(df)
if not dfs:
return pd.DataFrame()
# Join all tables on intersecting columns, namely 'time', 'NodeId', 'IccId', ...
if (tolerance is not None) and (len(tables)>1):
df = reduce(partial(merge_asof_helper, tolerance=tolerance), sorted(dfs, key=lambda x: x.size, reverse=True))
else:
df = reduce(partial(pd.merge, how='outer', copy=False), dfs)
del dfs
# Transform known categorical columns into Categoricals
for col in df:
if col in _CATEGORICAL_COLUMNS:
df[col] = df[col].astype('category')
# Strip trailing '.0' in categoricals constructed from floats (ints upcasted via NaNs)
categories = df[col].cat.categories
if | is_numeric_dtype(categories) | pandas.api.types.is_numeric_dtype |
"""
BiFrame for data synthesis.
"""
import logging
import numpy as np
import pandas as pd
from pandas import Index
from sklearn.model_selection import train_test_split
from ds4ml.dataset import DataSet
from ds4ml.utils import train_and_predict, normalize_range
from ds4ml.metrics import jensen_shannon_divergence, relative_error
logger = logging.getLogger(__name__)
class BiFrame(object):
def __init__(self, first: pd.DataFrame, second: pd.DataFrame,
categories=None):
"""
BiFrame class that contains two data sets, which currently provides
kinds of analysis methods from distribution, correlation, and some
machine learning tasks.
Especially, if the input data sets are source and synthesized dataset,
this class can be used to evaluate the utility and privacy of
synthesized data set.
Parameters
----------
first : {pandas.DataFrame}
first data set (i.e. original dataset)
second : {pandas.DataFrame}
second data set (i.e. synthesized dataset)
categories : list of columns
Column names whose values are categorical.
"""
# distribution
self._dt = {}
# To compare two data set, make sure that they have same columns.
# If not, compare the common part.
common = set(first.columns) & set(second.columns)
if len(common) != len(first.columns) or len(common) != len(second.columns):
logger.info(f"BiFrame constructed on attributes: {common}.")
# left and right data set (ds)
self.first = DataSet(first[common], categories=categories)
self.second = DataSet(second[common], categories=categories)
self._columns = self.first.columns.sort_values().to_list()
# Make sure that two dataset have same domain for categorical
# attributes, and same min, max values for numerical attributes.
for col in self._columns:
# If current column is not categorical, will ignore it.
if not self.first[col].categorical or not self.second[col].categorical:
continue
d1, d2 = self.first[col].domain, self.second[col].domain
if not np.array_equal(d1, d2):
if self.first[col].categorical:
domain = np.unique(np.concatenate((d1, d2)))
else:
domain = [min(d1[0], d2[0]), max(d1[1], d2[1])]
self.first[col].domain = domain
self.second[col].domain = domain
@property
def columns(self):
return self._columns
def err(self):
"""
Return pairwise err (relative error) of columns' distribution.
"""
# merge two frequency counts, and calculate relative difference
df = pd.DataFrame(columns=self._columns, index=['err'])
df.fillna(0)
for col in self._columns:
df.at['err', col] = relative_error(self.first[col].counts(),
self.second[col].counts())
return df
def jsd(self):
"""
Return pairwise JSD (Jensen-Shannon divergence) of columns' distribution.
"""
df = pd.DataFrame(columns=self._columns, index=['jsd'])
df.fillna(0)
for col in self._columns:
df.at['jsd', col] = jensen_shannon_divergence(
self.first[col].counts(), self.second[col].counts())
return df
def corr(self):
"""
Return pairwise correlation and dependence measured by mi (mutual
information).
"""
return self.first.mi(), self.second.mi()
def dist(self, column):
"""
Return frequency distribution of one column.
Parameters
----------
column : str
column name, whose distribution will be return
"""
if len(self._dt) == 0:
for c in self._columns:
self._dt[c] = {}
if self.first[c].categorical:
bins = self.first[c].domain
counts1 = self.first[c].counts(bins=bins)
counts2 = self.second[c].counts(bins=bins)
else:
min_, max_ = self.first[c].domain
# the domain from two data set are same;
# extend the domain to human-readable range
bins = normalize_range(min_, max_ + 1)
counts1 = self.first[c].counts(bins=bins)
counts2 = self.second[c].counts(bins=bins)
# Note: index, value of np.histogram has different length
bins = bins[:-1]
self._dt[c]['bins'] = bins
# stack arrays vertically
self._dt[c]['counts'] = np.vstack((counts1, counts2))
return self._dt[column]['bins'], self._dt[column]['counts']
def describe(self):
"""
Give descriptive difference between two data sets, which concluded
relative errors, and jsd divergence.
Return a panda.DataFrame, whose columns are two dataset's columns, and
indexes are a array of metrics, e.g. ['err', 'jsd'].
"""
df1 = self.err()
df2 = self.jsd()
return pd.concat([df1, df2])
def classify(self, label: str, test: pd.DataFrame = None):
"""
Train two svm classifiers based on data sets, and predict class labels
for test data. Return both error rates.
Parameters
----------
label : str
classifier feature, key is one column in left data frame.
It supports two-class and multi-class.
test : {pandas.DataFrame}
test frame, is test data for machine learning algorithms. If it is
not provided, it will split 20% of left data frame as test data.
Returns
-------
a DataFrame, e.g.
target source target
male female male female male female
source male 1 3 or actual male 1 3 1 2
female 2 4 female 2 4 3 4
"""
if (not self.first[label].categorical or
not self.second[label].categorical):
raise ValueError(f'Classifier can not run on non-categorical '
f'column: {label}')
from sklearn.metrics import confusion_matrix
def split_feature_label(df: pd.DataFrame):
# TODO need improve sub_cols
sub_cols = [attr for attr in df.columns if attr.startswith(label)]
if len(sub_cols) == 0:
return df, None
is_one_class = len(sub_cols) == 2
if is_one_class:
# For one class, there are two sorted values.
# e.g. ['Yes', 'No'] => [[0, 1],
# [1, 0]]
# Choose second column to represent this attribute.
label_ = sub_cols[1]
return df.drop(sub_cols, axis=1), df[label_]
else:
try:
# merge multiple columns into one column:
# [Name_A, Name_B, ..] => Name
_y = df[sub_cols].apply(lambda x: Index(x).get_loc(1),
axis=1)
return df.drop(sub_cols, axis=1), _y
except KeyError as e:
print(e)
print(sub_cols)
print(df[sub_cols])
# If test dataset is not provided, then split 20% of original dataset
# for testing.
if test is None:
fst_train, test = train_test_split(self.first, test_size=0.2)
snd_train, _ = train_test_split(self.second, test_size=0.2)
else:
fst_train = self.first
snd_train = self.second
# ts = self.first.encode(data=fst_train)
fst_train_x, fst_train_y = split_feature_label(
self.first.encode(data=fst_train))
test_x, test_y = split_feature_label(self.first.encode(data=test))
snd_train_x, snd_train_y = split_feature_label(
self.first.encode(data=snd_train))
# construct svm classifier, and predict on the same test dataset
fst_predict_y = train_and_predict(fst_train_x, fst_train_y, test_x)
snd_predict_y = train_and_predict(snd_train_x, snd_train_y, test_x)
columns = self.first[label].bins
labels = range(len(columns))
# If test dataset has the columns as class label for prediction, return
# two expected scores: (self.first) original dataset's and (self.second)
# anonymized dataset's confusion matrix.
if label in test:
fst_matrix = confusion_matrix(test_y, fst_predict_y, labels=labels)
snd_matrix = confusion_matrix(test_y, snd_predict_y, labels=labels)
# normalize the confusion matrix
# fst_matrix = fst_matrix.astype('float') / fst_matrix.sum(axis=1)
# snd_matrix = snd_matrix.astype('float') / snd_matrix.sum(axis=1)
return (pd.DataFrame(fst_matrix, columns=columns, index=columns),
pd.DataFrame(snd_matrix, columns=columns, index=columns))
# If test dataset does not have the class label for prediction, return
# their predicted values.
else:
matrix = confusion_matrix(fst_predict_y, snd_predict_y,
labels=labels)
return | pd.DataFrame(matrix, columns=columns, index=columns) | pandas.DataFrame |
"""Parallel FB Prophet transformer is a time series transformer that predicts target using FBProphet models."""
"""
This transformer fits one FBProphet model per time group and therefore may take time. Before using this transformer
we suggest you check FBProphet prediction significance by running an experiment with
parallel_prophet_forecast_using_individual_groups. Then enable parallel prophet forecast to get even better predictions."""
"""
In this implementation, Time Group Models are fitted in parallel
The recipe outputs 2 predictors:
- The first one is trained on the average of the target over the time column
- The second one is trained on TopN groups, where TopN is defined by recipe_dict in config.toml.
These groups are those with the highest number of data points.
If TopN is not defined in config.toml set using the toml override in the expert settings,
TopN group defaults to 1. Setting TopN is done with recipe_dict="{'prophet_top_n': 200}"
You may also want to modify the parameters explored line 99 to 103 to fit your needs.
"""
import importlib
from h2oaicore.transformer_utils import CustomTimeSeriesTransformer
from h2oaicore.systemutils import (
small_job_pool, save_obj, load_obj, remove, max_threads, config,
user_dir)
import datatable as dt
import numpy as np
import os
import uuid
import shutil
import random
import importlib
import pandas as pd
from sklearn.preprocessing import LabelEncoder, MinMaxScaler
from h2oaicore.systemutils import make_experiment_logger, loggerinfo, loggerwarning
from datetime import datetime
# For more information about FB prophet please visit :
# This parallel implementation is faster than the serial implementation
# available in the repository.
# Standard implementation is therefore disabled.
class suppress_stdout_stderr(object):
def __init__(self):
self.null_fds = [os.open(os.devnull, os.O_RDWR) for x in range(2)]
self.save_fds = [os.dup(1), os.dup(2)]
def __enter__(self):
os.dup2(self.null_fds[0], 1)
os.dup2(self.null_fds[1], 2)
def __exit__(self, *_):
os.dup2(self.save_fds[0], 1)
os.dup2(self.save_fds[1], 2)
for fd in self.null_fds + self.save_fds:
os.close(fd)
# Parallel implementation requires methods being called from different processes
# Global methods support this feature
# We use global methods as a wrapper for member methods of the transformer
def MyParallelProphetTransformer_fit_async(*args, **kwargs):
return MyParallelProphetTransformer._fit_async(*args, **kwargs)
def MyParallelProphetTransformer_transform_async(*args, **kwargs):
return MyParallelProphetTransformer._transform_async(*args, **kwargs)
class MyParallelProphetTransformer(CustomTimeSeriesTransformer):
"""Implementation of the FB Prophet transformer using a pool of processes to fit models in parallel"""
_is_reproducible = True
_binary = False
_multiclass = False
# some package dependencies are best sequential to overcome known issues
froms3 = True
if froms3:
_root_path = "https://s3.amazonaws.com/artifacts.h2o.ai/deps/dai/recipes"
_suffix = "-cp38-cp38-linux_x86_64.whl"
_modules_needed_by_name = [
'%s/setuptools_git-1.2%s' % (_root_path, _suffix),
'%s/LunarCalendar-0.0.9%s' % (_root_path, _suffix),
'%s/ephem-3.7.7.1%s' % (_root_path, _suffix),
'%s/cmdstanpy-0.9.5%s' % (_root_path, _suffix),
'%s/pystan-2.19.1.1%s' % (_root_path, _suffix),
'%s/httpstan-4.5.0-cp38-cp38-manylinux_2_27_x86_64.whl' % _root_path,
'%s/fbprophet-0.7.1%s' % (_root_path, _suffix),
]
else:
_modules_needed_by_name = ['holidays==0.11.1', 'convertdate', 'lunarcalendar', 'pystan==2.19.1.1', 'fbprophet==0.7.1']
_included_model_classes = None # ["gblinear"] for strong trends - can extrapolate
_testing_can_skip_failure = False # ensure tested as if shouldn't fail
def __init__(
self,
country_holidays=None,
monthly_seasonality=False,
**kwargs
):
super().__init__(**kwargs)
self.country_holidays = country_holidays
self.monthly_seasonality = monthly_seasonality
@property
def display_name(self):
name = "FBProphet"
if self.country_holidays is not None:
name += "_Holiday_{}".format(self.country_holidays)
if self.monthly_seasonality:
name += "_Month"
return name
@staticmethod
def get_default_properties():
return dict(col_type="time_column", min_cols=1, max_cols=1, relative_importance=1)
@staticmethod
def get_parameter_choices():
return {
"country_holidays": [None, "US"],
"monthly_seasonality": [False, True],
}
@staticmethod
def acceptance_test_timeout():
return 30 # allow for 20 minutes to do acceptance test
@staticmethod
def is_enabled():
return False
@staticmethod
def _fit_async(X_path, grp_hash, tmp_folder, params):
"""
Fits a FB Prophet model for a particular time group
:param X_path: Path to the data used to fit the FB Prophet model
:param grp_hash: Time group identifier
:return: time group identifier and path to the pickled model
"""
np.random.seed(1234)
random.seed(1234)
X = load_obj(X_path)
# Commented for performance, uncomment for debug
# print("prophet - fitting on data of shape: %s for group: %s" % (str(X.shape), grp_hash))
if X.shape[0] < 20:
# print("prophet - small data work-around for group: %s" % grp_hash)
return grp_hash, None
# Import FB Prophet package
mod = importlib.import_module('fbprophet')
Prophet = getattr(mod, "Prophet")
model = Prophet(yearly_seasonality=True, weekly_seasonality=True, daily_seasonality=True)
if params["country_holidays"] is not None:
model.add_country_holidays(country_name=params["country_holidays"])
if params["monthly_seasonality"]:
model.add_seasonality(name='monthly', period=30.5, fourier_order=5)
with suppress_stdout_stderr():
model.fit(X[['ds', 'y']])
model_path = os.path.join(tmp_folder, "fbprophet_model" + str(uuid.uuid4()))
save_obj(model, model_path)
remove(X_path) # remove to indicate success
return grp_hash, model_path
def _get_n_jobs(self, logger, **kwargs):
try:
if config.fixed_num_folds <= 0:
n_jobs = max(1, int(int(max_threads() / min(config.num_folds, kwargs['max_workers']))))
else:
n_jobs = max(1, int(
int(max_threads() / min(config.fixed_num_folds, config.num_folds, kwargs['max_workers']))))
except KeyError:
loggerinfo(logger, "Prophet No Max Worker in kwargs. Set n_jobs to 1")
n_jobs = 1
return n_jobs if n_jobs > 1 else 1
def _clean_tmp_folder(self, logger, tmp_folder):
try:
shutil.rmtree(tmp_folder)
loggerinfo(logger, "Prophet cleaned up temporary file folder.")
except:
loggerwarning(logger, "Prophet could not delete the temporary file folder.")
def _create_tmp_folder(self, logger):
# Create a temp folder to store files used during multi processing experiment
# This temp folder will be removed at the end of the process
# Set the default value without context available (required to pass acceptance test
tmp_folder = os.path.join(user_dir(), "%s_prophet_folder" % uuid.uuid4())
# Make a real tmp folder when experiment is available
if self.context and self.context.experiment_id:
tmp_folder = os.path.join(self.context.experiment_tmp_dir, "%s_prophet_folder" % uuid.uuid4())
# Now let's try to create that folder
try:
os.mkdir(tmp_folder)
except PermissionError:
# This not occur so log a warning
loggerwarning(logger, "Prophet was denied temp folder creation rights")
tmp_folder = os.path.join(user_dir(), "%s_prophet_folder" % uuid.uuid4())
os.mkdir(tmp_folder)
except FileExistsError:
# We should never be here since temp dir name is expected to be unique
loggerwarning(logger, "Prophet temp folder already exists")
tmp_folder = os.path.join(self.context.experiment_tmp_dir, "%s_prophet_folder" % uuid.uuid4())
os.mkdir(tmp_folder)
except:
# Revert to temporary file path
tmp_folder = os.path.join(user_dir(), "%s_prophet_folder" % uuid.uuid4())
os.mkdir(tmp_folder)
loggerinfo(logger, "Prophet temp folder {}".format(tmp_folder))
return tmp_folder
def fit(self, X: dt.Frame, y: np.array = None, **kwargs):
"""
Fits FB Prophet models (1 per time group) using historical target values contained in y
Model fitting is distributed over a pool of processes and uses file storage to share the data with workers
:param X: Datatable frame containing the features
:param y: numpy array containing the historical values of the target
:return: self
"""
# Get the logger if it exists
logger = None
if self.context and self.context.experiment_id:
logger = make_experiment_logger(
experiment_id=self.context.experiment_id,
tmp_dir=self.context.tmp_dir,
experiment_tmp_dir=self.context.experiment_tmp_dir,
username=self.context.username,
)
try:
# Add value of prophet_top_n in recipe_dict variable inside of config.toml file
# eg1: recipe_dict="{'prophet_top_n': 200}"
# eg2: recipe_dict="{'prophet_top_n':10}"
self.top_n = config.recipe_dict['prophet_top_n']
except KeyError:
self.top_n = 50
loggerinfo(logger, f"Prophet will use {self.top_n} groups as well as average target data.")
tmp_folder = self._create_tmp_folder(logger)
n_jobs = self._get_n_jobs(logger, **kwargs)
# Reduce X to TGC
tgc_wo_time = list(np.setdiff1d(self.tgc, self.time_column))
X = X[:, self.tgc].to_pandas()
# Fill NaNs or None
X = X.replace([None, np.nan], 0)
# Add target, Label encoder is only used for Classif. which we don't support...
if self.labels is not None:
y = LabelEncoder().fit(self.labels).transform(y)
X['y'] = np.array(y)
self.nan_value = X['y'].mean()
# Change date feature name to match Prophet requirements
X.rename(columns={self.time_column: "ds"}, inplace=True)
# Create a general scale now that will be used for unknown groups at prediction time
# Can we do smarter than that ?
self.general_scaler = MinMaxScaler().fit(X[['y', 'ds']].groupby('ds').median().values)
# Go through groups and standard scale them
if len(tgc_wo_time) > 0:
X_groups = X.groupby(tgc_wo_time)
else:
X_groups = [([None], X)]
self.scalers = {}
scaled_ys = []
print(f'{datetime.now()} Start of group scaling')
for key, X_grp in X_groups:
# Create dict key to store the min max scaler
grp_hash = self.get_hash(key)
# Scale target for current group
self.scalers[grp_hash] = MinMaxScaler()
y_skl = self.scalers[grp_hash].fit_transform(X_grp[['y']].values)
# Put back in a DataFrame to keep track of original index
y_skl_df = pd.DataFrame(y_skl, columns=['y'])
# (0, 'A') (1, 4) (100, 1) (100, 1)
# print(grp_hash, X_grp.shape, y_skl.shape, y_skl_df.shape)
y_skl_df.index = X_grp.index
scaled_ys.append(y_skl_df)
print(f'{datetime.now()} End of group scaling')
# Set target back in original frame but keep original
X['y_orig'] = X['y']
X['y'] = pd.concat(tuple(scaled_ys), axis=0)
# Now Average groups
X_avg = X[['ds', 'y']].groupby('ds').mean().reset_index()
# Send that to Prophet
params = {
"country_holidays": self.country_holidays,
"monthly_seasonality": self.monthly_seasonality
}
mod = importlib.import_module('fbprophet')
Prophet = getattr(mod, "Prophet")
self.model = Prophet(yearly_seasonality=True, weekly_seasonality=True, daily_seasonality=True)
if params["country_holidays"] is not None:
self.model.add_country_holidays(country_name=params["country_holidays"])
if params["monthly_seasonality"]:
self.model.add_seasonality(name='monthly', period=30.5, fourier_order=5)
with suppress_stdout_stderr():
self.model.fit(X[['ds', 'y']])
print(f'{datetime.now()} General Model Fitted')
self.top_groups = None
if len(tgc_wo_time) > 0:
if self.top_n > 0:
top_n_grp = X.groupby(tgc_wo_time).size().sort_values().reset_index()[tgc_wo_time].iloc[
-self.top_n:].values
self.top_groups = [
'_'.join(map(str, key))
for key in top_n_grp
]
if self.top_groups:
self.grp_models = {}
self.priors = {}
# Prepare for multi processing
num_tasks = len(self.top_groups)
def processor(out, res):
out[res[0]] = res[1]
pool_to_use = small_job_pool
loggerinfo(logger, f"Prophet will use {n_jobs} workers for fitting.")
loggerinfo(logger, "Prophet parameters holidays {} / monthly {}".format(self.country_holidays,
self.monthly_seasonality))
pool = pool_to_use(
logger=None, processor=processor,
num_tasks=num_tasks, max_workers=n_jobs
)
#
# Fit 1 FB Prophet model per time group columns
nb_groups = len(X_groups)
# Put y back to its unscaled value for top groups
X['y'] = X['y_orig']
for _i_g, (key, X) in enumerate(X_groups):
# Just log where we are in the fitting process
if (_i_g + 1) % max(1, nb_groups // 20) == 0:
loggerinfo(logger, "FB Prophet : %d%% of groups fitted" % (100 * (_i_g + 1) // nb_groups))
X_path = os.path.join(tmp_folder, "fbprophet_X" + str(uuid.uuid4()))
X = X.reset_index(drop=True)
save_obj(X, X_path)
grp_hash = self.get_hash(key)
if grp_hash not in self.top_groups:
continue
self.priors[grp_hash] = X['y'].mean()
params = {
"country_holidays": self.country_holidays,
"monthly_seasonality": self.monthly_seasonality
}
args = (X_path, grp_hash, tmp_folder, params)
kwargs = {}
pool.submit_tryget(None, MyParallelProphetTransformer_fit_async,
args=args, kwargs=kwargs, out=self.grp_models)
pool.finish()
for k, v in self.grp_models.items():
self.grp_models[k] = load_obj(v) if v is not None else None
remove(v)
self._clean_tmp_folder(logger, tmp_folder)
return self
@staticmethod
def _transform_async(model_path, X_path, nan_value, tmp_folder):
"""
Predicts target for a particular time group
:param model_path: path to the stored model
:param X_path: Path to the data used to fit the FB Prophet model
:param nan_value: Value of target prior, used when no fitted model has been found
:return: self
"""
model = load_obj(model_path)
XX_path = os.path.join(tmp_folder, "fbprophet_XX" + str(uuid.uuid4()))
X = load_obj(X_path)
# Facebook Prophet returns the predictions ordered by time
# So we should keep track of the time order for each group so that
# predictions are ordered the same as the imput frame
# Keep track of the order
order = np.argsort(pd.to_datetime(X["ds"]))
if model is not None:
# Run prophet
yhat = model.predict(X)['yhat'].values
XX = pd.DataFrame(yhat, columns=['yhat'])
else:
XX = pd.DataFrame(np.full((X.shape[0], 1), nan_value), columns=['yhat']) # invalid models
XX.index = X.index[order]
assert XX.shape[1] == 1
save_obj(XX, XX_path)
remove(model_path) # indicates success, no longer need
remove(X_path) # indicates success, no longer need
return XX_path
def transform(self, X: dt.Frame, **kwargs):
"""
Uses fitted models (1 per time group) to predict the target
:param X: Datatable Frame containing the features
:return: FB Prophet predictions
"""
# Get the logger if it exists
logger = None
if self.context and self.context.experiment_id:
logger = make_experiment_logger(
experiment_id=self.context.experiment_id,
tmp_dir=self.context.tmp_dir,
experiment_tmp_dir=self.context.experiment_tmp_dir
)
tmp_folder = self._create_tmp_folder(logger)
n_jobs = self._get_n_jobs(logger, **kwargs)
# Reduce X to TGC
tgc_wo_time = list(np.setdiff1d(self.tgc, self.time_column))
X = X[:, self.tgc].to_pandas()
# Fill NaNs or None
X = X.replace([None, np.nan], 0)
# Change date feature name to match Prophet requirements
X.rename(columns={self.time_column: "ds"}, inplace=True)
# Predict y using unique dates
X_time = X[['ds']].groupby('ds').first().reset_index()
with suppress_stdout_stderr():
y_avg = self.model.predict(X_time)[['ds', 'yhat']]
# Prophet transforms the date column to datetime so we need to transfrom that to merge back
X_time.sort_values('ds', inplace=True)
X_time['yhat'] = y_avg['yhat']
X_time.sort_index(inplace=True)
# Merge back into original frame on 'ds'
# pd.merge wipes the index ... so keep it to provide it again
indices = X.index
X = pd.merge(
left=X,
right=X_time[['ds', 'yhat']],
on='ds',
how='left'
)
X.index = indices
# Go through groups and recover the scaled target for knowed groups
if len(tgc_wo_time) > 0:
X_groups = X.groupby(tgc_wo_time)
else:
X_groups = [([None], X)]
inverted_ys = []
for key, X_grp in X_groups:
grp_hash = self.get_hash(key)
# Scale target for current group
if grp_hash in self.scalers.keys():
inverted_y = self.scalers[grp_hash].inverse_transform(X_grp[['yhat']])
else:
inverted_y = self.general_scaler.inverse_transform(X_grp[['yhat']])
# Put back in a DataFrame to keep track of original index
inverted_df = | pd.DataFrame(inverted_y, columns=['yhat']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on set/2020
json a partir da tabela sqlite
@author: github rictom/rede-cnpj
2020-11-25 - Se uma tabela já existir, parece causar lentidão para o pandas pd.to_sql.
Não fazer Create table ou criar índice para uma tabela a ser criada ou modificada pelo pandas
"""
import os, sys, glob
import time, copy, re, string, unicodedata, collections, json, secrets
import pandas as pd, sqlalchemy
from fnmatch import fnmatch
'''
from sqlalchemy.pool import StaticPool
engine = create_engine('sqlite://',
connect_args={'check_same_thread':False},
poolclass=StaticPool)
'''
import rede_config as config
try:
caminhoDBReceita = config.config['BASE']['base_receita']
except:
sys.exit('o arquivo sqlite não foi localizado. Veja o caminho da base no arquivo de configuracao rede.ini está correto.')
if not caminhoDBReceita: #se não houver db da receita, carrega um template para evitar erros nas consultas
caminhoDBReceita = 'base_cnpj_vazia.db'
caminhoDBReceitaFTS = config.config['BASE'].get('base_receita_fulltext','').strip()
caminhoDBEnderecoNormalizado = config.config['BASE'].get('base_endereco_normalizado', '').strip()
caminhoDBLinks = config.config['BASE'].get('base_links', '').strip()
caminhoDBBaseLocal = config.config['BASE'].get('base_local', '').strip()
#logAtivo = True if config['rede']['logAtivo']=='1' else False #registra cnpjs consultados
logAtivo = config.config['ETC'].getboolean('logativo',False) #registra cnpjs consultados
# ligacaoSocioFilial = True if config['rede']['ligacaoSocioFilial']=='1' else False #registra cnpjs consultados
ligacaoSocioFilial = config.config['ETC'].getboolean('ligacao_socio_filial',False) #registra cnpjs consultados
kLimiteCamada = config.config['ETC'].getboolean('limite_registros_camada', 10000)
gEngineExecutionOptions = {"sqlite_raw_colnames": True, 'pool_size':1} #poll_size=1 força usar só uma conexão??
#'isolation_level':'AUTOCOMMIT'
class DicionariosCodigosCNPJ():
def __init__(self):
if not caminhoDBReceita:
return
con = sqlalchemy.create_engine(f"sqlite:///{caminhoDBReceita}", execution_options=gEngineExecutionOptions)
#dfaux = pd.read_csv(r"tabelas/tabela-de-qualificacao-do-socio-representante.csv", sep=';')
dfaux = pd.read_sql_table('qualificacao_socio', con, index_col=None )
self.dicQualificacao_socio = pd.Series(dfaux.descricao.values,index=dfaux.codigo).to_dict()
#dfaux = pd.read_csv(r"tabelas/DominiosMotivoSituaoCadastral.csv", sep=';', encoding='latin1', dtype=str)
dfaux = pd.read_sql_table('motivo', con, index_col=None )
self.dicMotivoSituacao = pd.Series(dfaux['descricao'].values, index=dfaux['codigo']).to_dict()
#dfaux = pd.read_excel(r"tabelas/cnae.xlsx", sheet_name='codigo-grupo-classe-descr')
dfaux = pd.read_sql_table('cnae', con, index_col=None )
self.dicCnae = pd.Series(dfaux['descricao'].values, index=dfaux['codigo']).to_dict()
#dfaux = pd.read_csv(r"tabelas/natureza_juridica.csv", sep=';', encoding='latin1', dtype=str)
dfaux = pd.read_sql_table('natureza_juridica', con, index_col=None )
self.dicNaturezaJuridica = pd.Series(dfaux['descricao'].values, index=dfaux['codigo']).to_dict()
self.dicSituacaoCadastral = {'01':'Nula', '02':'Ativa', '03':'Suspensa', '04':'Inapta', '08':'Baixada'}
#self.dicSituacaoCadastral = {'1':'Nula', '2':'Ativa', '3':'Suspensa', '4':'Inapta', '8':'Baixada'}
self.dicPorteEmpresa = {'00':'Não informado', '01':'Micro empresa', '03':'Empresa de pequeno porte', '05':'Demais (Médio ou Grande porte)'}
def tabelaTemp():
''' tabela temporaria com numero aleatorio para evitar colisão '''
return 'tmp' #'tmp_' + secrets.token_hex(4)
gdic = DicionariosCodigosCNPJ()
dfaux=None
gTableIndex = 0
kCaractereSeparadorLimite = '@'
#decorator para medir tempo de execução de função
def timeit(method):
def timed(*args, **kw):
ts = time.time()
result = method(*args, **kw)
te = time.time()
if 'log_time' in kw:
name = kw.get('log_name', method.__name__.upper())
kw['log_time'][name] = int((te - ts) * 1000)
else:
print ('%r %2.2f ms' % \
(method.__name__, (te - ts) * 1000))
return result
return timed
# def apagaTabelasTemporarias(prefixo_tabela_temporaria='tmp'):
# con = sqlalchemy.create_engine(f"sqlite:///{caminhoDBReceita}", execution_options=gEngineExecutionOptions)
# #con.execute('DROP TABLE if exists tmp_cnpjs')
# tmp = prefixo_tabela_temporaria
# con.execute(f'DROP TABLE if exists {tmp}_cpfpjnomes')
# con.execute(f'DROP TABLE if exists {tmp}_ids')
# con.execute(f'DROP TABLE if exists {tmp}_socios')
# con.execute(f'DROP TABLE if exists {tmp}_busca_nome')
# con = None
def apagaTabelasTemporarias(prefixo_tabela_temporaria='tmp', caminhoDB=caminhoDBReceita):
'''apaga tabelas temporárias. Isto pode dar erro em ambiente com threads??
se prefixo_tabela_temporaria='', apaga TODAS as tabelas tmp_'''
con = sqlalchemy.create_engine(f"sqlite:///{caminhoDB}", execution_options=gEngineExecutionOptions)
insp = sqlalchemy.inspect(con)
tmp = prefixo_tabela_temporaria if prefixo_tabela_temporaria else 'tmp_'
tmp_tabelas = [t for t in insp.get_table_names() if t.startswith(tmp)]
for t in tmp_tabelas:
con.execute(f'Drop table if exists {t}')
con = None
apagaTabelasTemporarias() #apaga quando abrir o módulo
def buscaPorNome(nomeIn, limite=10): #nome tem que ser completo. Com Teste, pega item randomico
'''caminhoDBReceitaFTS base com indice full text search, fica rápido com match mas com = fila lento, por isso
precisa fazer consulta em caminhoDBReceita quando não for usar match
'''
#remove acentos
#se limite==-1, não havia @N no nome
tmp='tmp'
nomeIn = nomeIn.strip().upper()
caracteres_pontuacao = set('''!#$%&\'()+,-./:;<=>@[\\]^_`{|}~''') #sem * ? "
nomeIn = ''.join(ch for ch in nomeIn if ch not in caracteres_pontuacao)
nomeMatch = ''
try:
limite = int(limite)
except:
limite = 0
if (not( ('*' in nomeIn) or ('?' in nomeIn) or ('"' in nomeIn))) and limite>0: # se tinha arroba mas sem caractere curinga, acrescenta *
nomeIn = '*' + nomeIn + '*'
limite = min(limite,100) if limite else 10
#print('limite', limite, nomeIn)
if ('*' in nomeIn) or ('?' in nomeIn) or ('"' in nomeIn):
nomeMatchInicial = nomeIn.strip()
nomeMatch = nomeMatchInicial
nomeMatchInicial = nomeMatchInicial.replace('"','') #para usar com fnmatch
if nomeMatch.startswith('*'): #match do sqlite não aceita * no começo
nomeMatch = nomeMatch[1:].strip()
if '?' in nomeMatch: #? não é aceito em match do sqlite, mas pode ser usado no fnmatch
nomeMatch = nomeMatch.replace('?', '*')
if caminhoDBReceitaFTS:
confts = sqlalchemy.create_engine(f"sqlite:///{caminhoDBReceitaFTS}", execution_options=gEngineExecutionOptions)
nomeMatch = ''.join(x for x in unicodedata.normalize('NFKD', nomeMatch) if x in string.printable).upper()
#nomeMatch = re.sub(r'[^a-zA-Z0-9_ *""]', '', nomeMatch)
con = sqlalchemy.create_engine(f"sqlite:///{caminhoDBReceita}", execution_options=gEngineExecutionOptions)
nome = ''.join(x for x in unicodedata.normalize('NFKD', nomeIn) if x in string.printable).upper()
#nome = re.sub(r'[^a-zA-Z0-9_ *""]', '', nome)
cjs, cps = set(), set()
cursor = []
if nomeMatch:
if not caminhoDBReceitaFTS: #como não há tabela, não faz consulta por match
#con = None
return set(), set()
queryfts = f'''
SELECT DISTINCT nome_socio as nome
FROM socios_search
where nome_socio match :nomeMatch
limit {limite*20}
'''
df_busca_nomesPF = pd.read_sql(queryfts, confts, index_col=None, params={'nomeMatch':nomeMatch})
df_busca_nomesPF.to_sql(f'{tmp}_busca_nomePF', con, if_exists='replace', index=None)
query = f'''
SELECT distinct cnpj_cpf_socio, nome_socio
from {tmp}_busca_nomePF tn
left join socios ts on tn.nome=ts.nome_socio
where cnpj_cpf_socio not null and nome_socio<>"" and length(cnpj_cpf_socio)=11
limit {limite*2}
'''
cursor = con.execute(query)
#obs 26/4/2021, a rigor não seria necessário length(cnpj_cpf_socio)=11, o problema é que a base está com erro no nome de sócios em que são empresas
elif nomeIn=='TESTE':
query = 'select cnpj_cpf_socio, nome_socio from socios where rowid > (abs(random()) % (select (select max(rowid) from socios)+1)) LIMIT 1;'
cursor = con.execute(query)
else:
query = f'''
SELECT distinct cnpj_cpf_socio, nome_socio
FROM socios
where nome_socio=:nome
limit {limite}
'''
cursor = con.execute(query, {'nome':nome})
#nomeMatch = nomeMatch.replace('"','')
# print('query', query)
contagemRegistros = 0
for r in cursor.fetchall(): #con.execute(query):
if contagemRegistros>=limite:
break
if nomeMatch:
if not fnmatch(r.nome_socio.strip(), nomeMatchInicial):
continue
if len(r.cnpj_cpf_socio)==14:
cjs.add(r.cnpj_cpf_socio)
elif len(r.cnpj_cpf_socio)==11:
cps.add((r.cnpj_cpf_socio, r.nome_socio))
contagemRegistros += 1
if nome=='TESTE':
print('##TESTE com identificador aleatorio:', cjs, cps)
con = None
return cjs, cps
con.execute(f'drop table if exists {tmp}_busca_nomePF')
#pega cnpjs
cursor = []
if nomeMatch:
queryfts = f'''
SELECT DISTINCT razao_social as nome
FROM empresas_search
where razao_social match :nomeMatch
limit {limite*20}
'''
df_busca_nomesPJ = | pd.read_sql(queryfts, confts, index_col=None, params={'nomeMatch':nomeMatch}) | pandas.read_sql |
import numpy as np
import pandas as pd
import sqlite3
class DataTransformation:
"""Performs data loading and data transformation
"""
def __init__(self, url:str) -> None:
try:
self.__data = pd.read_csv(url, sep=";")
self.__transform()
except Exception as error:
raise error("There was a problem loading the file", error)
def __transform(self) -> None:
column_names = {"fecha_nacimiento":"birth_date", "fecha_vencimiento":"due_date",
"deuda":"due_balance","direccion":"address",
"correo":"email", "estatus_contacto":"status",
"deuda":"due_balance","prioridad":"priority", "telefono":"phone"}
try:
self.__data = self.__data.rename(column_names, axis=1)
#self.__data[['birth_date','due_date']] = self.__data[['birth_date','due_date']].apply(pd.to_datetime, format="%Y-%m-%d")
self.__data['due_date'] = pd.to_datetime(self.__data['due_date'])
self.__data['birth_date'] = pd.to_datetime(self.__data['birth_date'])
today = | pd.to_datetime("today") | pandas.to_datetime |
"""
Functions for writing a directory for iModulonDB webpages
"""
import logging
import os
import re
from itertools import chain
from zipfile import ZipFile
import numpy as np
import pandas as pd
from matplotlib.colors import to_hex
from tqdm.notebook import tqdm
from pymodulon.plotting import _broken_line, _get_fit, _solid_line
##################
# User Functions #
##################
def imodulondb_compatibility(model, inplace=False, tfcomplex_to_gene=None):
"""
Checks for all issues and missing information prior to exporting to iModulonDB.
If inplace = True, modifies the model (not recommended for main model variables).
Parameters
----------
model: :class:`~pymodulon.core.IcaData`
IcaData object to check
inplace: bool, optional
If true, modifies the model to prepare for export.
Not recommended for use with your main model variable.
tfcomplex_to_gene: dict, optional
dictionary pointing complex TRN entries to matching gene names in the gene
table (ex: {"FlhDC":"flhD"})
Returns
-------
table_issues: pd.DataFrame
Each row corresponds to an issue with one of the main class elements.
Columns:
* Table: which table or other variable the issue is in
* Missing Column: the column of the Table with the issue (not case
sensitive; capitalization is ignored).
* Solution: Unless "CRITICAL" is in this cell, the site behavior if the
issue remained is described here.
tf_issues: pd.DataFrame
Each row corresponds to a regulator that is used in the imodulon_table.
Columns:
* in_trn: whether the regulator is in the model.trn. Regulators not
in the TRN will be ignored in the site's histograms and gene tables.
* has_link: whether the regulator has a link in tf_links. If not, no
link to external regulator databases will be shown.
* has_gene: whether the regulator can be matched to a gene in the model.
If this is false, then there will be no regulator scatter plot on the
site. You can link TF complexes to one of their genes using the
tfcomplex_to_gene input.
missing_g_links: pd.Series
The genes on this list don't have links in the gene_links. Their gene pages
for these genes will not display links.
missing_DOIs: pd.Series
The samples listed here don't have DOIs in the sample_table. Clicking on their
associated bars in the activity plots will not link to relevant papers.
"""
if tfcomplex_to_gene is None:
tfcomplex_to_gene = {}
table_issues = pd.DataFrame(columns=["Table", "Missing Column", "Solution"])
# Check for X
if model.X is None:
table_issues = table_issues.append(
{
"Table": "X",
"Missing Column": "all",
"Solution": "CRITICAL. Add the expression matrix"
" so that gene pages can be generated.",
},
ignore_index=True,
)
logging.warning("Critical issue: No X matrix")
# Check for updated imodulondb table
default_imdb_table = {
"organism": "New Organism",
"dataset": "New Dataset",
"strain": "Unspecified",
"publication_name": "Unpublished Study",
"publication_link": "",
"gene_link_db": "External Database",
"organism_folder": "new_organism",
"dataset_folder": "new_dataset",
}
for k, v in default_imdb_table.items():
if model.imodulondb_table[k] == v:
if k == "publication_link":
solution = "The publication name will not be a hyperlink."
else:
solution = 'The default, "{}", will be used.'.format(v)
table_issues = table_issues.append(
{
"Table": "iModulonDB",
"Missing Column": k,
"Solution": solution,
},
ignore_index=True,
)
# Check the gene table
gene_table_cols = {
"gene_name": "Locus tags (gene_table.index) will be used.",
"gene_product": "Locus tags (gene_table.index) will be used.",
"cog": "COG info will not display & the gene scatter plot will"
" not have color.",
"start": "The x axis of the scatter plot will be a numerical"
" value instead of a genome location.",
"operon": "Operon info will not display.",
"regulator": "Regulator info will not display. If you have a"
" TRN, add it to the model to auto-generate this column.",
}
gene_table_lower = {i.lower(): i for i in model.gene_table.columns}
for col in gene_table_cols.keys():
if not (col in gene_table_lower.keys()):
table_issues = table_issues.append(
{
"Table": "Gene",
"Missing Column": col,
"Solution": gene_table_cols[col],
},
ignore_index=True,
)
if (col in ["gene_name", "gene_product"]) & inplace:
model.gene_table[col] = model.gene_table.index
elif inplace:
model.gene_table = model.gene_table.rename(
{gene_table_lower[col]: col}, axis=1
)
# check for missing gene links
missing_g_links = []
for g in model.M.index:
if (
not (isinstance(model.gene_links[g], str))
or model.gene_links[g].strip() == ""
):
missing_g_links.append(g)
missing_g_links = pd.Series(missing_g_links, name="missing_gene_links")
# check for errors in the n_replicates column of the sample table
if inplace & ("n_replicates" in model.sample_table.columns):
try:
imdb_activity_bar_df(model, model.imodulon_table.index[0])
except ValueError:
logging.warning(
"Error detected in sample_table['n_replicates']."
" Deleting that column. It will be auto-regenerated."
" You can prevent this from happening in the future"
" using generate_n_replicates_column(model)"
)
model.sample_table = model.sample_table.drop("n_replicates", 1)
# check the sample table
sample_table_cols = {
"project": "This is a CRITICAL column defining the largest"
" grouping of samples. Vertical bars in the activity plot"
" will separate projects.",
"condition": "This is an CRITICAL column defining the smallest"
" grouping of samples. Biological replicates must have matching"
" projects and conditions, and they will appear as single bars"
" with averaged activities.",
"sample": "The sample_table.index will be used. Each entry must be"
' unique. Note that the preferred syntax is "project__condition__#."',
"n_replicates": "This column will be generated for you.",
"doi": "Clicking on activity plot bars will not link to relevant"
" papers for the samples.",
}
sample_table_lower = {i.lower(): i for i in model.sample_table.columns}
if model.sample_table.columns.str.lower().duplicated().any():
logging.warning(
"Critical issue: Duplicated column names"
" (case insensitive) in sample_table"
)
table_issues = table_issues.append(
{
"Table": "Sample",
"Missing Column": "N/A - Duplicated Columns Exist",
"Solution": "Column names (case insensitive) should not "
"be duplicated. Pay special attention the 'sample' column.",
},
ignore_index=True,
)
for col in sample_table_cols.keys():
if not (col in sample_table_lower.keys()):
if (col == "sample") & (model.sample_table.index.name == "sample"):
continue
if col in ["project", "condition"]:
logging.warning(
"Critical issue: No {} column in sample_table.".format(col)
)
table_issues = table_issues.append(
{
"Table": "Sample",
"Missing Column": col,
"Solution": sample_table_cols[col],
},
ignore_index=True,
)
if (col == "n_replicates") & inplace:
generate_n_replicates_column(model)
elif inplace:
model.sample_table = model.sample_table.rename(
{sample_table_lower[col]: col}, axis=1
)
# check for missing DOIs
if "doi" in sample_table_lower.keys():
if inplace:
doi_idx = "doi"
else:
doi_idx = sample_table_lower["doi"]
missing_DOIs = model.sample_table.index[
model.sample_table[doi_idx].isna()
].copy()
missing_DOIs.name = "missing_DOIs"
else:
missing_DOIs = model.sample_table.index.copy()
missing_DOIs.name = "missing_DOIs"
# check the iModulon table columns
try:
model.imodulon_table.index.astype(int)
im_idx = "int"
except TypeError:
im_idx = "str"
iM_table_cols = {
"name": "imodulon_table.index will be used.",
"regulator": "The regulator details will be left blank.",
"function": "The function will be blank in the dataset table and"
' "Uncharacterized" in the iModulon dashboard',
"category": 'The categories will be filled in as "Uncharacterized".',
"n_genes": "This column will be computed for you.",
"precision": "This column will be left blank.",
"recall": "This column will be left blank.",
"exp_var": "This column will be left blank.",
}
iM_table_lower = {i.lower(): i for i in model.imodulon_table.columns}
for col in iM_table_cols.keys():
if not (col in iM_table_lower.keys()):
table_issues = table_issues.append(
{
"Table": "iModulon",
"Missing Column": col,
"Solution": iM_table_cols[col],
},
ignore_index=True,
)
if inplace:
if col == "name":
if im_idx == "int":
model.imodulon_table["name"] = [
"iModulon {}".format(i) for i in model.imodulon_table.index
]
else:
model.imodulon_table["name"] = model.imodulon_table.index
elif col == "n_genes":
model.imodulon_table["n_genes"] = model.M_binarized.sum().astype(
int
)
else:
model.imodulon_table[col] = np.nan
elif inplace:
model.imodulon_table = model.imodulon_table.rename(
{iM_table_lower[col]: col}, axis=1
)
if inplace:
if im_idx == "str":
model.rename_imodulons(
dict(zip(model.imodulon_names, range(len(model.imodulon_names))))
)
for idx, tf in zip(model.imodulon_table.index, model.imodulon_table.regulator):
try:
model.imodulon_table.loc[idx, "regulator_readable"] = (
model.imodulon_table.regulator[idx]
.replace("/", " or ")
.replace("+", " and ")
)
except AttributeError:
model.imodulon_table.loc[
idx, "regulator_readable"
] = model.imodulon_table.regulator[idx]
# check the TRN
cols = ["in_trn", "has_link", "has_gene"]
tf_issues = pd.DataFrame(columns=cols)
if "regulator" in iM_table_lower.keys():
if inplace:
reg_idx = "regulator"
else:
reg_idx = iM_table_lower["regulator"]
for tf_string in model.imodulon_table[reg_idx]:
_, no_trn = parse_tf_string(model, tf_string)
_, no_link = tf_with_links(model, tf_string)
_, no_gene = get_tfs_to_scatter(model, tf_string, tfcomplex_to_gene)
tfs_to_add = set(no_trn + no_link + no_gene)
for tf in tfs_to_add:
row = dict(zip(cols, [True] * 3))
for col, tf_set in zip(cols, [no_trn, no_link, no_gene]):
if tf in tf_set:
row[col] = False
tf_issues.loc[tf] = row
return table_issues, tf_issues, missing_g_links, missing_DOIs
def imodulondb_export(
model,
path=".",
cat_order=None,
tfcomplex_to_gene=None,
skip_iMs=False,
skip_genes=False,
):
"""
Generates the iModulonDB page for the data and exports to the path.
If certain columns are unavailable but can be filled in automatically,
they will be.
Parameters
----------
model : :class:`~pymodulon.core.IcaData`
IcaData object to export
path : str, optional
Path to iModulonDB main hosting folder (default = ".")
cat_order : list, optional
List of categories in the imodulon_table, ordered as you would
like them to appear in the dataset table (default = None)
tfcomplex_to_gene : dict, optional
dictionary pointing complex TRN entries
to matching gene names in the gene table
ex: {"FlhDC":"flhD"}
skip_iMs : bool, optional
If this is True, do not output iModulon files (to save time)
skip_genes : bool, optional
If this is True, do not output gene files (to save time)
Returns
-------
None: None
"""
if tfcomplex_to_gene is None:
tfcomplex_to_gene = {}
model1 = model.copy()
imodulondb_compatibility(model1, True, tfcomplex_to_gene=tfcomplex_to_gene)
print("Writing main site files...")
folder = imodulondb_main_site_files(model1, path, cat_order=cat_order)
print("Done writing main site files. Writing plot files...")
if not (skip_iMs and skip_genes):
print(
"Two progress bars will appear below. The second will take "
"significantly longer than the first."
)
if not (skip_iMs):
print("Writing iModulon page files (1/2)")
imdb_generate_im_files(model1, folder, "start", tfcomplex_to_gene)
if not (skip_genes):
print("Writing Gene page files (2/2)")
imdb_generate_gene_files(model1, folder)
print(
"Complete! (Organism = {}; Dataset = {})".format(
model1.imodulondb_table["organism_folder"],
model1.imodulondb_table["dataset_folder"],
)
)
###############################
# Major Outputs (Called Once) #
###############################
def imdb_dataset_table(model):
"""
Converts the model's imodulondb_table into dataset metadata
for the gray box on the left side of the dataset page
Parameters
----------
model: :class:`~pymodulon.core.IcaData`
An IcaData object
Returns
-------
res: ~pandas.Series
A series of formatted metadata
"""
res = pd.Series(dtype=str)
if model.imodulondb_table["organism"] == "New Organism":
org_short = ""
else:
org_parts = model.imodulondb_table["organism"].split(" ")
org_short = org_parts[0][0].upper() + ". " + org_parts[1].lower()
org_short = "<i>" + org_short + "</i>"
res["Title"] = org_short + " " + model.imodulondb_table["dataset"]
res["Organism"] = "<i>" + model.imodulondb_table["organism"] + "</i>"
res["Strain"] = model.imodulondb_table["strain"]
if model.imodulondb_table["publication_link"] == "":
res["Publication"] = model.imodulondb_table["publication_name"]
else:
pub_str = '<a href="' + model.imodulondb_table["publication_link"]
pub_str += '">' + model.imodulondb_table["publication_name"] + "</a>"
res["Publication"] = pub_str
res["Number of Samples"] = model.A.shape[1]
if ("project" in model.sample_table.columns) and (
"condition" in model.sample_table.columns
):
num_conds = len(model.sample_table.groupby(["condition", "project"]))
else:
num_conds = "Unknown"
res["Number of Unique Conditions"] = num_conds
res["Number of Genes"] = model.M.shape[0]
res["Number of iModulons"] = model.M.shape[1]
return res
def imdb_iM_table(imodulon_table, cat_order=None):
"""
Reformats the iModulon table according
Parameters
----------
imodulon_table : ~pandas.DataFrame
Table formatted similar to IcaData.imodulon_table
cat_order : list, optional
List of categories in imodulon_table.category, ordered as desired
Returns
-------
im_table: ~pandas.DataFrame
New iModulon table with the columns expected by iModulonDB
"""
im_table = imodulon_table[
[
"name",
"regulator_readable",
"function",
"category",
"n_genes",
"exp_var",
"precision",
"recall",
]
].copy()
im_table.index.name = "k"
im_table.category = im_table.category.fillna("Uncharacterized")
if cat_order is not None:
cat_dict = {val: i for i, val in enumerate(cat_order)}
im_table.loc[:, "category_num"] = [
cat_dict[im_table.category[k]] for k in im_table.index
]
else:
try:
im_table.loc[:, "category_num"] = imodulon_table["new_idx"]
except KeyError:
im_table.loc[:, "category_num"] = im_table.index
return im_table
def imdb_gene_presence(model):
"""
Generates the two versions of the gene presence file, one as a binary
matrix, and one as a DataFrame
Parameters
----------
model: :class:`~pymodulon.core.IcaData`
An IcaData object
Returns
-------
mbin: ~pandas.DataFrame
Binarized M matrix
mbin_list: ~pandas.DataFrame
Table mapping genes to iModulons
"""
mbin = model.M_binarized.astype(bool)
mbin_list = pd.DataFrame(columns=["iModulon", "Gene"])
for k in mbin.columns:
for g in mbin.index[mbin[k]]:
mbin_list = mbin_list.append({"iModulon": k, "Gene": g}, ignore_index=True)
return mbin, mbin_list
def imodulondb_main_site_files(
model, path_prefix=".", rewrite_annotations=True, cat_order=None
):
"""
Generates all parts of the site that do not require large iteration loops
Parameters
----------
model : :class:`~pymodulon.core.IcaData`
IcaData object
path_prefix : str, optional
Main folder for iModulonDB files (default = ".")
rewrite_annotations : bool, optional
Set to False if the gene_table and trn are unchanged (default = True)
cat_order : list, optional
list of categories in data.imodulon_table.category, ordered as you want
them to appear on the dataset page (default = None)
Returns
-------
main_folder: str
Dataset folder, for use as the path_prefix in imdb_generate_im_files()
"""
organism = model.imodulondb_table["organism_folder"]
dataset = model.imodulondb_table["dataset_folder"]
# create new folders
organism_folder = os.path.join(path_prefix, "organisms", organism)
if not (os.path.isdir(organism_folder)):
os.makedirs(organism_folder)
annot_folder = os.path.join(organism_folder, "annotation")
if not (os.path.isdir(annot_folder)):
rewrite_annotations = True
os.makedirs(annot_folder)
# save annotations
if rewrite_annotations:
# make the folder if necessary
gene_folder = os.path.join(annot_folder, "gene_files")
if not (os.path.isdir(gene_folder)):
os.makedirs(gene_folder)
# add files to the folder
model.gene_table.to_csv(os.path.join(gene_folder, "gene_info.csv"))
try:
model.trn.to_csv(os.path.join(gene_folder, "trn.csv"))
except FileNotFoundError:
pass
# zip the folder
old_cwd = os.getcwd()
os.chdir(gene_folder)
with ZipFile("../gene_files.zip", "w") as z:
z.write("gene_info.csv")
z.write("trn.csv")
os.chdir(old_cwd)
main_folder = os.path.join(organism_folder, dataset)
if not (os.path.isdir(main_folder)):
os.makedirs(main_folder)
# save the metadata files in the main folder
dataset_meta = imdb_dataset_table(model)
dataset_meta.to_csv(os.path.join(main_folder, "dataset_meta.csv"))
# num_ims - used so that the 'next iModulon' button doesn't overflow
file = open(main_folder + "/num_ims.txt", "w")
file.write(str(model.M.shape[1]))
file.close()
# save the dataset files in the data folder
data_folder = os.path.join(main_folder, "data_files")
if not (os.path.isdir(data_folder)):
os.makedirs(data_folder)
model.X.to_csv(os.path.join(data_folder, "log_tpm.csv"))
model.A.to_csv(os.path.join(data_folder, "A.csv"))
model.M.to_csv(os.path.join(data_folder, "M.csv"))
im_table = imdb_iM_table(model.imodulon_table, cat_order)
im_table.to_csv(os.path.join(data_folder, "iM_table.csv"))
model.sample_table.to_csv(os.path.join(data_folder, "sample_table.csv"))
mbin, mbin_list = imdb_gene_presence(model)
mbin.to_csv(os.path.join(data_folder, "gene_presence_matrix.csv"))
mbin_list.to_csv(os.path.join(data_folder, "gene_presence_list.csv"))
pd.Series(model.thresholds).to_csv(os.path.join(data_folder, "M_thresholds.csv"))
# zip the data folder
old_cwd = os.getcwd()
os.chdir(data_folder)
with ZipFile("../data_files.zip", "w") as z:
z.write("log_tpm.csv")
z.write("A.csv")
z.write("M.csv")
z.write("iM_table.csv")
z.write("sample_table.csv")
z.write("gene_presence_list.csv")
z.write("gene_presence_matrix.csv")
z.write("M_thresholds.csv")
os.chdir(old_cwd)
# make iModulons searchable
enrich_df = model.imodulon_table.copy()
enrich_df["component"] = enrich_df.index
enrich_df = enrich_df[["component", "name", "regulator", "function"]]
enrich_df = enrich_df.rename({"function": "Function"}, axis=1)
try:
enrich_df = enrich_df.sort_values(by="name").fillna(value="N/A")
except TypeError:
enrich_df["name"] = enrich_df["name"].astype(str)
enrich_df = enrich_df.sort_values(by="name").fillna(value="N/A")
if not (os.path.isdir(main_folder + "/iModulon_files")):
os.makedirs(main_folder + "/iModulon_files")
enrich_df.to_json(main_folder + "/iModulon_files/im_list.json", orient="records")
# make genes searchable
gene_df = model.gene_table.copy()
gene_df = gene_df[gene_df.index.isin(model.X.index)]
gene_df["gene_id"] = gene_df.index
gene_df = gene_df[["gene_name", "gene_id", "gene_product"]]
gene_df = gene_df.sort_values(by="gene_name").fillna(value="not available")
if not (os.path.isdir(main_folder + "/gene_page_files")):
os.makedirs(main_folder + "/gene_page_files")
gene_df.to_json(main_folder + "/gene_page_files/gene_list.json", orient="records")
# make the html
html = '<div class="panel">\n'
html += ' <div class="panel-header">\n'
html += ' <h2 class="mb-0">\n'
html += ' <button class="btn btn-link collapsed organism" type="button"'
html += ' data-toggle="collapse" data-target="#new_org" aria-expanded="false"'
html += ' aria-controls="new_org">\n <i>'
html += model.imodulondb_table["organism"]
html += "</i>\n </button>\n </h2>\n </div>\n"
html += ' <div id="new_org" class="collapse" aria-labelledby="headingThree"'
html += ' data-parent="#organismAccordion">\n'
html += ' <div class="panel-body">\n'
html += ' <ul class="nav navbar-dark flex-column">\n'
html += ' <li class="nav-item dataset">\n'
html += ' <a class="nav-link active" href="dataset.html?organism='
html += organism
html += "&dataset="
html += dataset
html += '"><i class="fas fa-angle-right pr-2"></i>'
html += model.imodulondb_table["dataset"]
html += "\n </a>\n </li>\n"
html += " </ul>\n </div>\n </div>\n</div>"
file = open(main_folder + "/html_for_splash.html", "w")
file.write(html)
file.close()
return main_folder
def imdb_generate_im_files(
model, path_prefix=".", gene_scatter_x="start", tfcomplex_to_gene=None
):
"""
Generates all files for all iModulons in data
Parameters
----------
model : :class:`~pymodulon.core.IcaData`
IcaData object
path_prefix : str, optional
Dataset folder in which to store the files (default = ".")
gene_scatter_x : str
Column from the gene table that specificies what to use on the
X-axis of the gene scatter plot (default = "start")
tfcomplex_to_gene : dict, optional
dictionary pointing complex TRN entries
to matching gene names in the gene table
ex: {"FlhDC":"flhD"}
"""
if tfcomplex_to_gene is None:
tfcomplex_to_gene = {}
for k in tqdm(model.imodulon_table.index):
make_im_directory(model, k, path_prefix, gene_scatter_x, tfcomplex_to_gene)
def imdb_generate_gene_files(model, path_prefix="."):
"""
Generates all files for all iModulons in IcaData object
Parameters
----------
model : :class:`~pymodulon.core.IcaData`
IcaData object
path_prefix : str, optional
Dataset folder in which to store the files (default = ".")
Returns
-------
None
"""
for g in tqdm(model.M.index):
make_gene_directory(model, g, path_prefix)
###################################################
# iModulon-Related Outputs (and Helper Functions) #
###################################################
# Gene Table
def parse_tf_string(model, tf_str, verbose=False):
"""
Returns a list of relevant tfs from a string. Will ignore TFs not in the
trn file.
iModulonDB helper function.
Parameters
----------
model : :class:`~pymodulon.core.IcaData`
IcaData object
tf_str : str
String of tfs joined by '+' and '/' operators
verbose : bool, optional
Whether or nor to print outputs
Returns
-------
tfs: list
List of relevant TFs
"""
if not (type(tf_str) == str):
return [], []
if tf_str == "":
return [], []
tf_str = tf_str.replace("[", "").replace("]", "")
tfs = re.split("[+/]", tf_str)
# Check if there is an issue, just remove the issues for now.
bad_tfs = []
for tf in tfs:
tf = tf.strip()
if tf not in model.trn.regulator.unique():
if verbose:
print("Regulator not in TRN:", tf)
print(
"To remedy this, add rows to the TRN for each gene associated "
"with this regulator. Otherwise, it will be ignored in the gene"
"tables and histograms."
)
bad_tfs.append(tf)
tfs = [t.strip() for t in list(set(tfs) - set(bad_tfs))]
bad_tfs = list(set(bad_tfs))
return tfs, bad_tfs
def imdb_gene_table_df(model, k):
"""
Creates the gene table dataframe for iModulonDB
Parameters
----------
model : :class:`~pymodulon.core.IcaData`
IcaData object
k : int or str
iModulon name
Returns
-------
res: ~pandas.DataFrame
DataFrame of the gene table that is compatible with iModulonDB
"""
# get TFs and large table
row = model.imodulon_table.loc[k]
tfs, _ = parse_tf_string(model, row.regulator)
res = model.view_imodulon(k)
# sort
columns = []
for c in [
"gene_weight",
"gene_name",
"old_locus_tag",
"gene_product",
"cog",
"operon",
"regulator",
]:
if c in res.columns:
columns.append(c)
res = res[columns]
res = res.sort_values("gene_weight", ascending=False)
# add TFs
for tf in tfs:
reg_genes = model.trn.gene_id[model.trn.regulator == tf].values
res[tf] = [i in reg_genes for i in res.index]
# add links
res["link"] = [model.gene_links[g] for g in res.index]
# clean up
res.index.name = "locus"
return res
# Gene Histogram
def _component_DF(model, k, tfs=None):
"""
Helper function for imdb_gene_hist_df
Parameters
----------
model : :class:`~pymodulon.core.IcaData`
IcaData object
k : int or str
iModulon name
tfs : list
List of TFs (default = None)
Returns
-------
gene_table: ~pandas.DataFrame
Gene table for the iModulon
"""
df = pd.DataFrame(model.M[k].sort_values())
df.columns = ["gene_weight"]
if "gene_product" in model.gene_table.columns:
df["gene_product"] = model.gene_table["gene_product"]
if "gene_name" in model.gene_table.columns:
df["gene_name"] = model.gene_table["gene_name"]
if "operon" in model.gene_table.columns:
df["operon"] = model.gene_table["operon"]
if "length" in model.gene_table.columns:
df["length"] = model.gene_table.length
if "regulator" in model.gene_table.columns:
df["regulator"] = model.gene_table.regulator.fillna("")
if tfs is not None:
for tf in tfs:
df[tf] = [tf in regs.split(",") for regs in df["regulator"]]
return df.sort_values("gene_weight")
def _tf_combo_string(row):
"""
Creates a formatted string for the histogram legends. Helper function for
imdb_gene_hist_df.
Parameters
----------
row : ~pandas.Series
Boolean series indexed by TFs for a given gene
Returns
-------
str
A string formatted for display (i.e. "Regulated by ...")
"""
if row.sum() == 0:
return "unreg"
if row.sum() == 1:
return row.index[row][0]
if row.sum() == 2:
return " and ".join(row.index[row])
else:
return ", ".join(row.index[row][:-1]) + ", and " + row.index[row][-1]
def _sort_tf_strings(tfs, unique_elts):
"""
Sorts TF strings for the legend of the histogram. Helper function for
imdb_gene_hist_df.
Parameters
----------
tfs : list[str]
Sequence of TFs in the desired order
unique_elts : list[str]
All combination strings made by _tf_combo_string
Returns
-------
list[str]
A sorted list of combination strings that have a consistent ordering
"""
# unreg always goes first
unique_elts.remove("unreg")
sorted_elts = ["unreg"]
# then the individual TFs
for tf in tfs:
if tf in unique_elts:
sorted_elts.append(tf)
unique_elts.remove(tf)
# then pairs
pairs = [i for i in unique_elts if "," not in i]
for i in tfs:
for j in tfs:
name = i + " and " + j
if name in pairs:
sorted_elts.append(name)
unique_elts.remove(name)
# then longer combos, which won't be sorted for now
return sorted_elts + unique_elts
def imdb_gene_hist_df(model, k, bins=20, tol=0.001):
"""
Creates the gene histogram for an iModulon
Parameters
----------
model : :class:`~pymodulon.core.IcaData`
IcaData object
k : int or str
iModulon name
bins : int
Number of bins in the histogram (default = 20)
tol : float
Distance to threshold for deciding if a bar is in the iModulon
(default = .001)
Returns
-------
gene_hist_table: ~pandas.DataFrame
A dataframe for producing the histogram that is compatible with
iModulonDB
"""
# get TFs
row = model.imodulon_table.loc[k]
if not (type(row.regulator) == str):
tfs = []
else:
tfs, _ = parse_tf_string(model, row.regulator)
tfs = list(set(tfs))
# get genes
DF_gene = _component_DF(model, k, tfs)
# add a tf_combo column
if len(tfs) == 0:
DF_gene["tf_combos"] = ["unreg"] * DF_gene.shape[0]
else:
tf_bools = DF_gene[tfs]
DF_gene["tf_combos"] = [
_tf_combo_string(tf_bools.loc[g]) for g in tf_bools.index
]
# get the list of tf combos in the correct order
tf_combo_order = _sort_tf_strings(tfs, list(DF_gene.tf_combos.unique()))
# compute bins
xmin = min(min(DF_gene.gene_weight), -model.thresholds[k])
xmax = max(max(DF_gene.gene_weight), model.thresholds[k])
width = (
2
* model.thresholds[k]
/ max((np.floor(2 * model.thresholds[k] * bins / (xmax - xmin) - 1)), 1)
)
xmin = -model.thresholds[k] - width * np.ceil((-model.thresholds[k] - xmin) / width)
xmax = xmin + width * bins
# column headers: bin middles
columns = np.arange(xmin + width / 2, xmax + width / 2, width)[:bins]
index = ["thresh"] + tf_combo_order + [i + "_genes" for i in tf_combo_order]
res = pd.DataFrame(index=index, columns=columns)
# row 0: threshold indices and number of unique tf combos
thresh1 = -model.thresholds[k]
thresh2 = model.thresholds[k]
num_combos = len(tf_combo_order)
res.loc["thresh"] = [thresh1, thresh2, num_combos] + [np.nan] * (len(columns) - 3)
# next set of rows: heights of bars
for r in tf_combo_order:
res.loc[r] = np.histogram(
DF_gene.gene_weight[DF_gene.tf_combos == r], bins, (xmin, xmax)
)[0]
# last set of rows: gene names
for b_mid in columns:
# get the bin bounds
b_lower = b_mid - width / 2
b_upper = b_lower + width
for r in tf_combo_order:
# get the genes for this regulator and bin
genes = DF_gene.index[
(DF_gene.tf_combos == r)
& (DF_gene.gene_weight < b_upper)
& (DF_gene.gene_weight > b_lower)
]
# use the gene names, and get them with num2name (more robust)
genes = [model.num2name(g) for g in genes]
res.loc[r, b_mid] = len(genes)
gene_list = np.array2string(np.array(genes), separator=" ")
# don't list unregulated genes unless they are in the i-modulon
if r == "unreg":
if (b_lower + tol >= model.thresholds[k]) or (
b_upper - tol <= -model.thresholds[k]
):
res.loc[r + "_genes", b_mid] = gene_list
else:
res.loc[r + "_genes", b_mid] = "[]"
else:
res.loc[r + "_genes", b_mid] = gene_list
return res
# Gene Scatter Plot
def _gene_color_dict(model):
"""
Helper function to match genes to colors based on COG. Used by
imdb_gene_scatter_df.
Parameters
----------
model : :class:`~pymodulon.core.IcaData`
IcaData object
Returns
-------
dict
Dictionary associating gene names to colors
"""
try:
gene_cogs = model.gene_table.cog.to_dict()
except AttributeError:
return {k: "dodgerblue" for k in model.gene_table.index}
try:
return {k: model.cog_colors[v] for k, v in gene_cogs.items()}
except (KeyError, AttributeError):
# previously, this would call the setter using:
# data.cog_colors = None
cogs = sorted(model.gene_table.cog.unique())
model.cog_colors = dict(
zip(
cogs,
[
"red",
"pink",
"y",
"orchid",
"mediumvioletred",
"green",
"lightgray",
"lightgreen",
"slategray",
"blue",
"saddlebrown",
"turquoise",
"lightskyblue",
"c",
"skyblue",
"lightblue",
"fuchsia",
"dodgerblue",
"lime",
"sandybrown",
"black",
"goldenrod",
"chocolate",
"orange",
],
)
)
return {k: model.cog_colors[v] for k, v in gene_cogs.items()}
def imdb_gene_scatter_df(model, k, gene_scatter_x="start"):
"""
Generates a dataframe for the gene scatter plot in iModulonDB
Parameters
----------
model : :class:`~pymodulon.core.IcaData`
IcaData object
k : int or str
iModulon name
gene_scatter_x : str
Determines x-axis of the scatterplot
Returns
-------
res: ~pandas.DataFrame
A dataframe for producing the scatterplot
"""
columns = ["name", "x", "y", "cog", "color", "link"]
res = pd.DataFrame(columns=columns, index=model.M.index)
res.index.name = "locus"
cutoff = model.thresholds[k]
# x&y scatterplot points - do alternatives later
if gene_scatter_x == "start":
try:
res.x = model.gene_table.loc[res.index, "start"]
except KeyError:
gene_scatter_x = "gene number"
res.x = range(len(res.index))
else:
raise ValueError("Only 'start' is supported as a gene_scatter_x input.")
# res.x = data.X[base_conds].mean(axis=1)
res.y = model.M[k]
# add other data
res.name = [model.num2name(i) for i in res.index]
try:
res.cog = model.gene_table.cog[res.index]
except AttributeError:
res.cog = "Unknown"
gene_colors = _gene_color_dict(model)
res.color = [to_hex(gene_colors[gene]) for gene in res.index]
# if the gene is in the iModulon, it is clickable
in_im = res.index[res.y.abs() > cutoff]
for g in in_im:
res.loc[g, "link"] = model.gene_links[g]
# add a row to store the threshold
cutoff_row = pd.DataFrame(
[gene_scatter_x, cutoff] + [np.nan] * 4, columns=["meta"], index=columns
).T
res = pd.concat([cutoff_row, res])
return res
# Activity Bar Graph
def generate_n_replicates_column(model):
"""
Generates the "n_replicates" column of the sample_table for iModulonDB.
Parameters
----------
model: :class:`~pymodulon.core.IcaData`
IcaData object. Will overwrite the existing column if it exists.
Returns
-------
None: None
"""
try:
for name, group in model.sample_table.groupby(["project", "condition"]):
model.sample_table.loc[group.index, "n_replicates"] = group.shape[0]
except KeyError:
logging.warning(
"Unable to write n_replicates column. Add"
" project & condition columns (required)."
)
def imdb_activity_bar_df(model, k):
"""
Generates a dataframe for the activity bar graph of iModulon k
Parameters
----------
model : :class:`~pymodulon.core.IcaData`
IcaData object
k : int or str
iModulon name
Returns
-------
res: ~pandas.DataFrame
A dataframe for producing the activity bar graph for iModulonDB
"""
samp_table = model.sample_table.reset_index(drop=True)
# get the row of A
A_k = model.A.loc[k]
A_k = A_k.rename(dict(zip(A_k.index, samp_table.index)))
# initialize the dataframe
max_replicates = int(samp_table["n_replicates"].max())
columns = ["A_avg", "A_std", "n"] + list(
chain(
*[
["rep{}_idx".format(i), "rep{}_A".format(i)]
for i in range(1, max_replicates + 1)
]
)
)
res = pd.DataFrame(columns=columns)
# iterate through conditions and fill in rows
for cond, group in samp_table.groupby(["project", "condition"], sort=False):
# get condition name and A values
cond_name = cond[0] + "__" + cond[1] # project__cond
vals = A_k[group.index]
# compute statistics
new_row = [vals.mean(), vals.std(), len(vals)]
# fill in individual samples (indices and values)
for idx in group.index:
new_row += [idx, vals[idx]]
new_row += [np.nan] * ((max_replicates - len(vals)) * 2)
res.loc[cond_name] = new_row
# clean up
res.index.name = "condition"
res = res.reset_index()
return res
# Regulon Venn Diagram
def _parse_regulon_string(model, s):
"""
The Bacillus microarray dataset uses [] to create unusually complicated
TF strings. This function parses those, as a helper to _get_reg_genes for
imdb_regulon_venn_df.
Parameters
----------
model : :class:`~pymodulon.core.IcaData`
IcaData object
s : str
TF string
Returns
-------
res: set
Set of genes regulated by this string
"""
res = set()
if not (isinstance(s, str)):
return res
if "/" in s:
union = s.split("] / [")
union[0] = union[0][1:]
union[-1] = union[-1][:-1]
else:
union = [s]
for r in union:
if "+" in r:
intersection = r.split(" + ")
genes = set(model.trn.gene_id[model.trn.regulator == intersection[0]])
for i in intersection[1:]:
genes = genes.intersection(
set(model.trn.gene_id[model.trn.regulator == i])
)
else:
genes = set(model.trn.gene_id[model.trn.regulator == r])
res = res.union(genes)
return res
def _get_reg_genes(model, tf):
"""
Finds the set of genes regulated by the boolean combination of regulators
in a TF string
Parameters
----------
model : :class:`~pymodulon.core.IcaData`
IcaData object
tf : str
string of TFs separated by +, /, and/or []
Returns
-------
reg_genes: set[str]
Set of regulated genes
"""
# the Bacillus tf strings use '[]' to make complicated boolean combinations
if "[" in tf:
reg_genes = _parse_regulon_string(model, tf)
# other datasets can use this simpler code
else:
tf = tf.strip()
if "+" in tf:
reg_list = []
for tfx in tf.split("+"):
tfx = tfx.strip()
reg_list.append(
set(model.trn[model.trn.regulator == tfx].gene_id.unique())
)
reg_genes = set.intersection(*reg_list)
elif "/" in tf:
reg_genes = set(
model.trn[
model.trn.regulator.isin([t.strip() for t in tf.split("/")])
].gene_id.unique()
)
else:
reg_genes = set(model.trn[model.trn.regulator == tf].gene_id.unique())
# return result
return reg_genes
def imdb_regulon_venn_df(model, k):
"""
Generates a dataframe for the regulon venn diagram of iModulon k. Returns
None if there is no diagram to draw
Parameters
----------
model : :class:`~pymodulon.core.IcaData`
IcaData object
k : int or str
iModulon name
Returns
-------
res: ~pandas.DataFrame
A DataFrame for producing the venn diagram in iModulonDB
"""
row = model.imodulon_table.loc[k]
tf = row["regulator"]
if not (type(tf) == str):
return None
if tf.strip() == "":
return None
# Take care of and/or enrichments
reg_genes = _get_reg_genes(model, tf)
# Get component genes
comp_genes = set(model.view_imodulon(k).index)
both_genes = set(reg_genes & comp_genes)
# Get gene and operon counts
reg_gene_count = len(reg_genes)
comp_gene_count = len(comp_genes)
both_gene_count = len(both_genes)
# Add adjustments for venn plotting (add '2' for alternates)
reg_gene_count2 = 0
comp_gene_count2 = 0
both_gene_count2 = 0
if reg_genes == comp_genes:
reg_gene_count = 0
comp_gene_count = 0
both_gene_count = 0
reg_gene_count2 = 0
comp_gene_count2 = 0
both_gene_count2 = len(reg_genes)
elif all(item in comp_genes for item in reg_genes):
reg_gene_count = 0
both_gene_count = 0
reg_gene_count2 = len(reg_genes)
comp_gene_count2 = 0
both_gene_count2 = 0
elif all(item in reg_genes for item in comp_genes):
comp_gene_count = 0
both_gene_count = 0
reg_gene_count2 = 0
comp_gene_count2 = len(comp_genes)
both_gene_count2 = 0
res = pd.DataFrame(
[
tf,
reg_gene_count,
comp_gene_count,
both_gene_count,
reg_gene_count2,
comp_gene_count2,
both_gene_count2,
],
columns=["Value"],
index=[
"TF",
"reg_genes",
"comp_genes",
"both_genes",
"reg_genes2",
"comp_genes2",
"both_genes2",
],
)
# gene lists
just_reg = reg_genes - both_genes
just_comp = comp_genes - both_genes
for i, l in zip(
["reg_genes", "comp_genes", "both_genes"], [just_reg, just_comp, both_genes]
):
gene_list = np.array([model.num2name(g) for g in l])
gene_list = np.array2string(gene_list, separator=" ")
res.loc[i, "list"] = gene_list
return res
# Regulon Scatter Plot
def get_tfs_to_scatter(model, tf_string, tfcomplex_to_genename=None, verbose=False):
"""
Parameters
----------
model : :class:`~pymodulon.core.IcaData`
IcaData object
tf_string : str or ~numpy.nan
String of TFs, or np.nan
tfcomplex_to_genename : dict, optional
dictionary pointing complex TRN entries
to matching gene names in the gene table
ex: {"FlhDC":"flhD"}
verbose : bool
Show verbose output (default: False)
Returns
-------
res: list
List of gene loci
"""
# hard-coded TF names
# should just modify TRN/gene info so everything matches but ok
if tfcomplex_to_genename is None:
tfcomplex_to_genename = {}
rename_tfs = {
"csqR": "yihW",
"hprR": "yedW",
"thi-box": "Thi-box",
"FlhDC": "flhD",
"RcsAB": "rcsB",
"ntrC": "glnG",
"gutR": "srlR",
"IHF": "ihfB",
"H-NS": "hns",
"GadE-RcsB": "gadE",
}
for k, v in tfcomplex_to_genename.items():
rename_tfs[k] = v
res = []
bad_res = []
if type(tf_string) == str:
tf_string = tf_string.replace("[", "").replace("]", "")
tfs = re.split("[+/]", tf_string)
for tf in tfs:
tf = tf.strip()
if tf in rename_tfs.keys():
tf = rename_tfs[tf]
try:
b_num = model.name2num(tf)
if b_num in model.X.index:
res.append(tf)
except ValueError:
bad_res.append(tf)
if verbose:
print("TF has no associated expression profile:", tf)
print("If {} is not a gene, this behavior is expected.".format(tf))
print(
"If it is a gene, use consistent naming"
" between the TRN and gene_table."
)
res = list(set(res)) # remove duplicates
bad_res = list(set(bad_res))
return res, bad_res
def imdb_regulon_scatter_df(model, k, tfcomplex_to_genename=None):
"""
Parameters
----------
model : :class:`~pymodulon.core.IcaData`
IcaData object
k : int or str
iModulon name
tfcomplex_to_genename : dict, optional
dictionary pointing complex TRN entries
to matching gene names in the gene table
ex: {"FlhDC":"flhD"}
Returns
-------
res: ~pandas.DataFrame
A dataframe for producing the regulon scatter plots in iModulonDB
"""
if tfcomplex_to_genename is None:
tfcomplex_to_genename = {}
row = model.imodulon_table.loc[k]
tfs, _ = get_tfs_to_scatter(model, row.regulator, tfcomplex_to_genename)
if len(tfs) == 0:
return None
# coordinates for points
coord = pd.DataFrame(columns=["A"] + tfs, index=model.A.columns)
coord["A"] = model.A.loc[k]
# params for fit line
param_df = pd.DataFrame(
columns=["A"] + tfs, index=["R2", "xmin", "xmid", "xmax", "ystart", "yend"]
)
# fill in dfs
for tf in tfs:
# coordinates
coord[tf] = model.X.loc[model.name2num(tf)]
xlim = np.array([coord[tf].min(), coord[tf].max()])
# fit line
params, r2 = _get_fit(coord[tf], coord["A"])
if len(params) == 2: # unbroken
y = _solid_line(xlim, *params)
out = [xlim[0], np.nan, xlim[1], y[0], y[1]]
else: # broken
xvals = np.array([xlim[0], params[2], xlim[1]])
y = _broken_line(xvals, *params)
out = [xlim[0], params[2], xlim[1], y[0], y[2]]
param_df[tf] = [r2] + out
res = pd.concat([param_df, coord], axis=0)
res = res.sort_values("R2", axis=1, ascending=False)
res = res[ | pd.Index(["A"]) | pandas.Index |
import re
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import IntervalArray
class TestSeriesReplace:
def test_replace_explicit_none(self):
# GH#36984 if the user explicitly passes value=None, give it to them
ser = pd.Series([0, 0, ""], dtype=object)
result = ser.replace("", None)
expected = pd.Series([0, 0, None], dtype=object)
tm.assert_series_equal(result, expected)
df = pd.DataFrame(np.zeros((3, 3)))
df.iloc[2, 2] = ""
result = df.replace("", None)
expected = pd.DataFrame(
{
0: np.zeros(3),
1: np.zeros(3),
2: np.array([0.0, 0.0, None], dtype=object),
}
)
assert expected.iloc[2, 2] is None
tm.assert_frame_equal(result, expected)
# GH#19998 same thing with object dtype
ser = pd.Series([10, 20, 30, "a", "a", "b", "a"])
result = ser.replace("a", None)
expected = pd.Series([10, 20, 30, None, None, "b", None])
assert expected.iloc[-1] is None
tm.assert_series_equal(result, expected)
def test_replace_noop_doesnt_downcast(self):
# GH#44498
ser = pd.Series([None, None, pd.Timestamp("2021-12-16 17:31")], dtype=object)
res = ser.replace({np.nan: None}) # should be a no-op
tm.assert_series_equal(res, ser)
assert res.dtype == object
# same thing but different calling convention
res = ser.replace(np.nan, None)
tm.assert_series_equal(res, ser)
assert res.dtype == object
def test_replace(self):
N = 100
ser = pd.Series(np.random.randn(N))
ser[0:4] = np.nan
ser[6:10] = 0
# replace list with a single value
return_value = ser.replace([np.nan], -1, inplace=True)
assert return_value is None
exp = ser.fillna(-1)
tm.assert_series_equal(ser, exp)
rs = ser.replace(0.0, np.nan)
ser[ser == 0.0] = np.nan
tm.assert_series_equal(rs, ser)
ser = pd.Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N), dtype=object)
ser[:5] = np.nan
ser[6:10] = "foo"
ser[20:30] = "bar"
# replace list with a single value
rs = ser.replace([np.nan, "foo", "bar"], -1)
assert (rs[:5] == -1).all()
assert (rs[6:10] == -1).all()
assert (rs[20:30] == -1).all()
assert (pd.isna(ser[:5])).all()
# replace with different values
rs = ser.replace({np.nan: -1, "foo": -2, "bar": -3})
assert (rs[:5] == -1).all()
assert (rs[6:10] == -2).all()
assert (rs[20:30] == -3).all()
assert (pd.isna(ser[:5])).all()
# replace with different values with 2 lists
rs2 = ser.replace([np.nan, "foo", "bar"], [-1, -2, -3])
tm.assert_series_equal(rs, rs2)
# replace inplace
return_value = ser.replace([np.nan, "foo", "bar"], -1, inplace=True)
assert return_value is None
assert (ser[:5] == -1).all()
assert (ser[6:10] == -1).all()
assert (ser[20:30] == -1).all()
def test_replace_nan_with_inf(self):
ser = pd.Series([np.nan, 0, np.inf])
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
ser = pd.Series([np.nan, 0, "foo", "bar", np.inf, None, pd.NaT])
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
filled = ser.copy()
filled[4] = 0
tm.assert_series_equal(ser.replace(np.inf, 0), filled)
def test_replace_listlike_value_listlike_target(self, datetime_series):
ser = pd.Series(datetime_series.index)
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
# malformed
msg = r"Replacement lists must match in length\. Expecting 3 got 2"
with pytest.raises(ValueError, match=msg):
ser.replace([1, 2, 3], [np.nan, 0])
# ser is dt64 so can't hold 1 or 2, so this replace is a no-op
result = ser.replace([1, 2], [np.nan, 0])
tm.assert_series_equal(result, ser)
ser = pd.Series([0, 1, 2, 3, 4])
result = ser.replace([0, 1, 2, 3, 4], [4, 3, 2, 1, 0])
tm.assert_series_equal(result, pd.Series([4, 3, 2, 1, 0]))
def test_replace_gh5319(self):
# API change from 0.12?
# GH 5319
ser = pd.Series([0, np.nan, 2, 3, 4])
expected = ser.ffill()
result = ser.replace([np.nan])
tm.assert_series_equal(result, expected)
ser = pd.Series([0, np.nan, 2, 3, 4])
expected = ser.ffill()
result = ser.replace(np.nan)
tm.assert_series_equal(result, expected)
def test_replace_datetime64(self):
# GH 5797
ser = pd.Series(pd.date_range("20130101", periods=5))
expected = ser.copy()
expected.loc[2] = pd.Timestamp("20120101")
result = ser.replace({pd.Timestamp("20130103"): pd.Timestamp("20120101")})
tm.assert_series_equal(result, expected)
result = ser.replace(pd.Timestamp("20130103"), pd.Timestamp("20120101"))
tm.assert_series_equal(result, expected)
def test_replace_nat_with_tz(self):
# GH 11792: Test with replacing NaT in a list with tz data
ts = pd.Timestamp("2015/01/01", tz="UTC")
s = pd.Series([pd.NaT, pd.Timestamp("2015/01/01", tz="UTC")])
result = s.replace([np.nan, pd.NaT], pd.Timestamp.min)
expected = pd.Series([pd.Timestamp.min, ts], dtype=object)
tm.assert_series_equal(expected, result)
def test_replace_timedelta_td64(self):
tdi = pd.timedelta_range(0, periods=5)
ser = pd.Series(tdi)
# Using a single dict argument means we go through replace_list
result = ser.replace({ser[1]: ser[3]})
expected = pd.Series([ser[0], ser[3], ser[2], ser[3], ser[4]])
tm.assert_series_equal(result, expected)
def test_replace_with_single_list(self):
ser = pd.Series([0, 1, 2, 3, 4])
result = ser.replace([1, 2, 3])
tm.assert_series_equal(result, pd.Series([0, 0, 0, 0, 4]))
s = ser.copy()
return_value = s.replace([1, 2, 3], inplace=True)
assert return_value is None
tm.assert_series_equal(s, pd.Series([0, 0, 0, 0, 4]))
# make sure things don't get corrupted when fillna call fails
s = ser.copy()
msg = (
r"Invalid fill method\. Expecting pad \(ffill\) or backfill "
r"\(bfill\)\. Got crash_cymbal"
)
with pytest.raises(ValueError, match=msg):
return_value = s.replace([1, 2, 3], inplace=True, method="crash_cymbal")
assert return_value is None
tm.assert_series_equal(s, ser)
def test_replace_mixed_types(self):
ser = pd.Series(np.arange(5), dtype="int64")
def check_replace(to_rep, val, expected):
sc = ser.copy()
result = ser.replace(to_rep, val)
return_value = sc.replace(to_rep, val, inplace=True)
assert return_value is None
tm.assert_series_equal(expected, result)
tm.assert_series_equal(expected, sc)
# 3.0 can still be held in our int64 series, so we do not upcast GH#44940
tr, v = [3], [3.0]
check_replace(tr, v, ser)
# Note this matches what we get with the scalars 3 and 3.0
check_replace(tr[0], v[0], ser)
# MUST upcast to float
e = pd.Series([0, 1, 2, 3.5, 4])
tr, v = [3], [3.5]
check_replace(tr, v, e)
# casts to object
e = pd.Series([0, 1, 2, 3.5, "a"])
tr, v = [3, 4], [3.5, "a"]
check_replace(tr, v, e)
# again casts to object
e = pd.Series([0, 1, 2, 3.5, pd.Timestamp("20130101")])
tr, v = [3, 4], [3.5, pd.Timestamp("20130101")]
check_replace(tr, v, e)
# casts to object
e = pd.Series([0, 1, 2, 3.5, True], dtype="object")
tr, v = [3, 4], [3.5, True]
check_replace(tr, v, e)
# test an object with dates + floats + integers + strings
dr = pd.Series(pd.date_range("1/1/2001", "1/10/2001", freq="D"))
result = dr.astype(object).replace([dr[0], dr[1], dr[2]], [1.0, 2, "a"])
expected = pd.Series([1.0, 2, "a"] + dr[3:].tolist(), dtype=object)
tm.assert_series_equal(result, expected)
def test_replace_bool_with_string_no_op(self):
s = pd.Series([True, False, True])
result = s.replace("fun", "in-the-sun")
tm.assert_series_equal(s, result)
def test_replace_bool_with_string(self):
# nonexistent elements
s = pd.Series([True, False, True])
result = s.replace(True, "2u")
expected = pd.Series(["2u", False, "2u"])
tm.assert_series_equal(expected, result)
def test_replace_bool_with_bool(self):
s = pd.Series([True, False, True])
result = s.replace(True, False)
expected = pd.Series([False] * len(s))
tm.assert_series_equal(expected, result)
def test_replace_with_dict_with_bool_keys(self):
s = pd.Series([True, False, True])
result = s.replace({"asdf": "asdb", True: "yes"})
expected = pd.Series(["yes", False, "yes"])
tm.assert_series_equal(result, expected)
def test_replace_Int_with_na(self, any_int_ea_dtype):
# GH 38267
result = pd.Series([0, None], dtype=any_int_ea_dtype).replace(0, pd.NA)
expected = pd.Series([pd.NA, pd.NA], dtype=any_int_ea_dtype)
tm.assert_series_equal(result, expected)
result = pd.Series([0, 1], dtype=any_int_ea_dtype).replace(0, pd.NA)
result.replace(1, pd.NA, inplace=True)
tm.assert_series_equal(result, expected)
def test_replace2(self):
N = 100
ser = pd.Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N), dtype=object)
ser[:5] = np.nan
ser[6:10] = "foo"
ser[20:30] = "bar"
# replace list with a single value
rs = ser.replace([np.nan, "foo", "bar"], -1)
assert (rs[:5] == -1).all()
assert (rs[6:10] == -1).all()
assert (rs[20:30] == -1).all()
assert (pd.isna(ser[:5])).all()
# replace with different values
rs = ser.replace({np.nan: -1, "foo": -2, "bar": -3})
assert (rs[:5] == -1).all()
assert (rs[6:10] == -2).all()
assert (rs[20:30] == -3).all()
assert (pd.isna(ser[:5])).all()
# replace with different values with 2 lists
rs2 = ser.replace([np.nan, "foo", "bar"], [-1, -2, -3])
tm.assert_series_equal(rs, rs2)
# replace inplace
return_value = ser.replace([np.nan, "foo", "bar"], -1, inplace=True)
assert return_value is None
assert (ser[:5] == -1).all()
assert (ser[6:10] == -1).all()
assert (ser[20:30] == -1).all()
def test_replace_with_dictlike_and_string_dtype(self, nullable_string_dtype):
# GH 32621, GH#44940
ser = pd.Series(["one", "two", np.nan], dtype=nullable_string_dtype)
expected = pd.Series(["1", "2", np.nan], dtype=nullable_string_dtype)
result = ser.replace({"one": "1", "two": "2"})
tm.assert_series_equal(expected, result)
def test_replace_with_empty_dictlike(self):
# GH 15289
s = pd.Series(list("abcd"))
tm.assert_series_equal(s, s.replace({}))
with tm.assert_produces_warning(FutureWarning):
empty_series = pd.Series([])
tm.assert_series_equal(s, s.replace(empty_series))
def test_replace_string_with_number(self):
# GH 15743
s = pd.Series([1, 2, 3])
result = s.replace("2", np.nan)
expected = | pd.Series([1, 2, 3]) | pandas.Series |
"""handler functions and classes for ix workflow"""
import json
import os
from importlib import import_module
from pathlib import Path
from typing import Mapping, Optional
import numpy as np
import pandas as pd
import pyarrow as pa
from pyarrow import parquet
import pdr
from pdr_tests.utilz.ix_utilz import (
get_product_row,
console_and_log,
stamp,
download_product_row,
verbose_temp_download,
assemble_urls,
flip_ends_with,
read_and_hash,
record_comparison,
)
from pdr_tests.utilz.dev_utilz import FakeStopwatch, Stopwatch
# ############ INDEX & TESTING CLASSES #############
class MissingHashError(ValueError):
pass
class DatasetDefinition:
"""
base class for this module. defines and encapsulates references / directory
structure for the ix workflow.
"""
def __init__(self, name):
rules_module = import_module(f"definitions.{name}.selection_rules")
self.rules = getattr(rules_module, "file_information")
self.def_path = Path(rules_module.__file__).parent
self.data_path = Path(self.def_path.parent.parent, "data", name)
self.browse_path = Path(self.def_path.parent.parent, "browse", name)
self.temp_path = Path(Path.home(), "pdr_test_temp")
self.dataset = name
def complete_list_path(self, product_type):
return Path(
self.def_path, "sample_lists", f"{product_type}_complete.parquet"
)
def subset_list_path(self, product_type):
return Path(
self.def_path, "sample_lists", f"{product_type}_subset.csv"
)
def shared_list_path(self):
return Path(
self.def_path, "shared_lists", f"{self.dataset}_shared.csv"
)
def product_data_path(self, product_type):
return Path(self.data_path, product_type)
def temp_data_path(self, product_type):
return Path(self.temp_path, product_type)
def index_path(self, product_type):
return Path(self.def_path, f"{product_type}.csv")
def test_path(self, product_type):
return Path(self.def_path, f"{product_type}_test.csv")
def product_browse_path(self, product_type):
return Path(self.browse_path, product_type)
def data_mkdirs(self, product_type):
os.makedirs(self.product_data_path(product_type), exist_ok=True)
os.makedirs(self.temp_data_path(product_type), exist_ok=True)
def across_all_types(self, method_name, *args, **kwargs):
output = []
for product_type in self.rules:
result = getattr(self, method_name)(product_type, *args, **kwargs)
output.append(result)
return output
class ProductPicker(DatasetDefinition):
def __init__(self, name):
super().__init__(name)
# TODO, maybe: it is possibly time-inefficient to iterate through
# the manifest a bunch of times, although it's very
# memory-efficient. idk. it might not be as bad as i think,
# though, unless we did clever segmentation of results on each group.
def make_product_list(self, product_type: str):
"""
construct and write full-set parquet file for a given product type.
"""
if product_type is None:
return self.across_all_types("make_product_list")
os.makedirs(
self.complete_list_path(product_type).parent, exist_ok=True
)
print(f"Making product list for {product_type} ...... ", end="")
self.complete_list_path(product_type).unlink(missing_ok=True)
manifest = self.rules[product_type]["manifest"]
manifest_parquet = parquet.ParquetFile(manifest)
results = []
for group_ix in range(manifest_parquet.num_row_groups):
results.append(
self.filter_table(
product_type, manifest_parquet.read_row_group(group_ix)
)
)
products = pa.concat_tables(results)
size_gb = round(pa.compute.sum(products["size"]).as_py() / 10**9, 2)
# TODO: this estimate is bad for products with several large files
print(f"{len(products)} products found, {size_gb} estimated GB")
parquet.write_table(products, self.complete_list_path(product_type))
def filter_table(self, product_type: str, table: pa.Table) -> pa.Table:
"""
construct list of filter functions -- methods of pa.compute --
based on selection rules for dataset and product type. apply them
to select examples of specified product type from manifest table.
"""
info = self.rules[product_type]
filts = []
if "url_must_contain" in info.keys():
for string in info["url_must_contain"]:
filts.append((pa.compute.match_substring, "url", string))
if "fn_ends_with" in info.keys():
ends = info["fn_ends_with"]
assert len(ends) == 1, "only one filename ending may be specified"
filts.append((flip_ends_with, "filename", ends[0]))
if "fn_must_contain" in info.keys():
for string in info["fn_must_contain"]:
filts.append((pa.compute.match_substring, "filename", string))
if "fn_regex" in info.keys():
for string in info["fn_regex"]:
filts.append(
(pa.compute.match_substring_regex, "filename", string)
)
if len(filts) == 0:
raise ValueError("filters must be specified for product types.")
for method, column, substring in filts:
table = table.filter(method(table[column], substring))
return table
def random_picks(
self,
product_type: str,
subset_size: int = 200,
max_gb: float = 8,
):
"""
randomly select a subset of products from a given product type; write
this subset to disk as a csv file. optionally specify subset size and
cap file size in GB.
"""
if product_type is None:
return self.across_all_types("random_picks", subset_size, max_gb)
print(
f"picking test subset for {self.dataset} {product_type} ...... ",
end="",
)
max_bytes = max_gb * 10**9
complete = self.complete_list_path(product_type)
subset = self.subset_list_path(product_type)
total = parquet.read_metadata(complete).num_rows
if total < subset_size:
# pick them all (if not too big)
small_enough = parquet.read_table(
complete, filters=[("size", "<", max_bytes)]
)
print(
f"{total} products; {len(small_enough)}/{total} < {max_gb} GB "
f"cutoff; taking all {len(small_enough)}"
)
small_enough.to_pandas().to_csv(subset, index=None)
return
sizes = parquet.read_table(complete, columns=["size"])[
"size"
].to_numpy()
small_enough_ix = np.nonzero(sizes < max_bytes)[0]
pick_ix = np.sort(np.random.choice(small_enough_ix, subset_size))
print(
f"{total} products; {len(small_enough_ix)}/{total} < {max_gb} GB "
f"cutoff; randomly picking {subset_size}"
)
# TODO: this is not very clever
ix_base, picks = 0, []
complete_parquet = parquet.ParquetFile(complete)
for group_ix in range(complete_parquet.num_row_groups):
group = complete_parquet.read_row_group(group_ix)
available = [
ix - ix_base for ix in pick_ix if ix - ix_base < len(group)
]
if len(available) != 0:
picks.append(group.take(available))
pa.concat_tables(picks).to_pandas().to_csv(subset, index=None)
class IndexMaker(DatasetDefinition):
def __init__(self, name):
super().__init__(name)
def get_labels(self, product_type: str, dry_run: bool = False):
if product_type is None:
return self.across_all_types("get_labels", dry_run)
self.data_mkdirs(product_type)
dry = "" if dry_run is False else "(dry run)"
print(f"Downloading labels for {self.dataset} {product_type} {dry}")
subset = self.load_subset_table(product_type)
if dry_run is True:
return
for url in subset["url"]:
verbose_temp_download(
self.product_data_path(product_type),
self.temp_data_path(product_type),
url,
)
def load_subset_table(self, product_type: str, verbose: bool = True):
subset = pd.read_csv(self.subset_list_path(product_type))
detached = self.rules[product_type]["label"] != "A"
if detached:
# TODO: PDS4
label_rule = self.rules[product_type]["label"]
if isinstance(label_rule, tuple):
try:
regex = self.rules[product_type]["regex"]
print(
f"regex has been set to {regex} for {product_type} "
f"label replacement rules."
)
except KeyError:
regex = False
subset["filename"] = subset["filename"].str.replace(
*label_rule, regex
)
else:
subset["filename"] = subset["filename"].map(
lambda fn: Path(fn).with_suffix(".LBL").name
)
subset["url"] = assemble_urls(subset)
subset["path"] = subset["filename"].map(
lambda fn: Path(self.product_data_path(product_type), fn)
)
if verbose is True:
present = subset["path"].map(lambda path: path.exists())
if detached:
size_message = "detached labels; "
else:
size = round(subset.loc[~present]["size"].sum() / 10**9, 1)
size_message = f"attached labels; total download ~{size} GB"
print(
f"{len(subset)} labels; "
f"{len(subset.loc[present])} already in system; {size_message}"
)
return subset
def write_subset_index(self, product_type: str):
if product_type is None:
return self.across_all_types("write_subset_index")
print(f"Writing index for {self.dataset} {product_type}")
subset = self.load_subset_table(product_type, verbose=False)
product_rows = []
for ix, product in subset.iterrows():
product_row = get_product_row(product["path"], product["url"])
print(product_row)
product_rows.append(product_row)
# noinspection PyTypeChecker
pd.DataFrame(product_rows).to_csv(
self.index_path(product_type), index=None
)
print(f"Wrote index for {self.dataset} {product_type} subset.")
class IndexDownloader(DatasetDefinition):
def __init__(self, name):
super().__init__(name)
rules_module = import_module(f"definitions.{name}.selection_rules")
self.skip_files = ()
if hasattr(rules_module, "SKIP_FILES"):
self.skip_files = getattr(rules_module, "SKIP_FILES")
def download_index(self, product_type: str, get_test: bool = False):
if product_type is None:
return self.across_all_types("download_index", get_test)
ptype = "subset files" if get_test is False else "test files"
console_and_log(f"Downloading {self.dataset} {product_type} {ptype}.")
data_path = self.product_data_path(product_type)
temp_path = self.temp_data_path(product_type)
self.data_mkdirs(product_type)
if self.shared_list_path().exists():
print(f"Checking shared files for {self.dataset}.")
shared_index = pd.read_csv(self.shared_list_path())
for ix, row in shared_index.iterrows():
verbose_temp_download(
data_path, temp_path, row["url"], skip_quietly=False
)
if get_test is True:
index = pd.read_csv(self.test_path(product_type))
else:
index = pd.read_csv(self.index_path(product_type))
for ix, row in index.iterrows():
console_and_log(f"Downloading product id: {row['product_id']}")
download_product_row(data_path, temp_path, row, self.skip_files)
class ProductChecker(DatasetDefinition):
def __init__(self, name):
super().__init__(name)
hash_rows, log_rows = {}, {}
def dump_test_paths(self, product_type):
if product_type is None:
return self.across_all_types("dump_test_paths")
index = pd.read_csv(self.test_path(product_type))
data_path = self.product_data_path(product_type)
return [
str(Path(data_path, product["label_file"]))
for ix, product in index.iterrows()
]
def compare_test_hashes(
self,
product_type,
regen=False,
write=True,
debug=True,
dump_browse=False,
dump_kwargs=None,
):
"""
generate and / or compare test hashes for a specified mission and
dataset. writes new hashes into test index files if no hashes are
present.
regenerate: if True, skip hash comparisons and instead overwrite any
hashes found in test index files
write: if False, do a 'dry run' -- don't write anything besides logs
regardless of other settings/results
debug: should we open products in debug mode?
dump_browse: if True, also write browse products
dump_kwargs: kwargs for browse writer
"""
if product_type is None:
return self.across_all_types(
"compare_test_hashes",
regen,
write,
debug,
dump_browse,
dump_kwargs,
)
console_and_log(f"Hashing {self.dataset} {product_type}.")
index = pd.read_csv(self.test_path(product_type))
if "hash" not in index.columns:
console_and_log(f"no hashes found for {product_type}, writing new")
elif regen is True:
console_and_log(f"regenerate=True passed, overwriting hashes")
compare = not ((regen is True) or ("hash" not in index.columns))
# compare/overwrite are redundant rn, but presumably we might want
# different logic in the future.
overwrite = (regen is True) or ("hash" not in index.columns)
data_path = self.product_data_path(product_type)
self.hash_rows, self.log_rows = {}, {}
for ix, product in index.iterrows():
console_and_log(f"testing {product['product_id']}")
data, self.hash_rows[ix], self.log_rows[ix] = test_product(
product, Path(data_path, product["label_file"]), compare, debug
)
if (dump_browse is True) and (data is not None):
console_and_log(
f"dumping browse products for {product['product_id']}"
)
self.dump_test_browse(data, product_type, dump_kwargs)
console_and_log(
f"dumped browse products for {product['product_id']}"
)
if (overwrite is True) and (write is False):
console_and_log("write=False passed, not updating hashes in csv")
elif overwrite is True:
index["hash"] = | pd.Series(self.hash_rows) | pandas.Series |
from datetime import datetime
from io import StringIO
import itertools
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
Period,
Series,
Timedelta,
date_range,
)
import pandas._testing as tm
class TestDataFrameReshape:
def test_stack_unstack(self, float_frame):
df = float_frame.copy()
df[:] = np.arange(np.prod(df.shape)).reshape(df.shape)
stacked = df.stack()
stacked_df = DataFrame({"foo": stacked, "bar": stacked})
unstacked = stacked.unstack()
unstacked_df = stacked_df.unstack()
tm.assert_frame_equal(unstacked, df)
tm.assert_frame_equal(unstacked_df["bar"], df)
unstacked_cols = stacked.unstack(0)
unstacked_cols_df = stacked_df.unstack(0)
tm.assert_frame_equal(unstacked_cols.T, df)
tm.assert_frame_equal(unstacked_cols_df["bar"].T, df)
def test_stack_mixed_level(self):
# GH 18310
levels = [range(3), [3, "a", "b"], [1, 2]]
# flat columns:
df = DataFrame(1, index=levels[0], columns=levels[1])
result = df.stack()
expected = Series(1, index=MultiIndex.from_product(levels[:2]))
tm.assert_series_equal(result, expected)
# MultiIndex columns:
df = DataFrame(1, index=levels[0], columns=MultiIndex.from_product(levels[1:]))
result = df.stack(1)
expected = DataFrame(
1, index=MultiIndex.from_product([levels[0], levels[2]]), columns=levels[1]
)
tm.assert_frame_equal(result, expected)
# as above, but used labels in level are actually of homogeneous type
result = df[["a", "b"]].stack(1)
expected = expected[["a", "b"]]
tm.assert_frame_equal(result, expected)
def test_unstack_not_consolidated(self, using_array_manager):
# Gh#34708
df = DataFrame({"x": [1, 2, np.NaN], "y": [3.0, 4, np.NaN]})
df2 = df[["x"]]
df2["y"] = df["y"]
if not using_array_manager:
assert len(df2._mgr.blocks) == 2
res = df2.unstack()
expected = df.unstack()
tm.assert_series_equal(res, expected)
def test_unstack_fill(self):
# GH #9746: fill_value keyword argument for Series
# and DataFrame unstack
# From a series
data = Series([1, 2, 4, 5], dtype=np.int16)
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = data.unstack(fill_value=-1)
expected = DataFrame(
{"a": [1, -1, 5], "b": [2, 4, -1]}, index=["x", "y", "z"], dtype=np.int16
)
tm.assert_frame_equal(result, expected)
# From a series with incorrect data type for fill_value
result = data.unstack(fill_value=0.5)
expected = DataFrame(
{"a": [1, 0.5, 5], "b": [2, 4, 0.5]}, index=["x", "y", "z"], dtype=float
)
tm.assert_frame_equal(result, expected)
# GH #13971: fill_value when unstacking multiple levels:
df = DataFrame(
{"x": ["a", "a", "b"], "y": ["j", "k", "j"], "z": [0, 1, 2], "w": [0, 1, 2]}
).set_index(["x", "y", "z"])
unstacked = df.unstack(["x", "y"], fill_value=0)
key = ("<KEY>")
expected = unstacked[key]
result = Series([0, 0, 2], index=unstacked.index, name=key)
tm.assert_series_equal(result, expected)
stacked = unstacked.stack(["x", "y"])
stacked.index = stacked.index.reorder_levels(df.index.names)
# Workaround for GH #17886 (unnecessarily casts to float):
stacked = stacked.astype(np.int64)
result = stacked.loc[df.index]
tm.assert_frame_equal(result, df)
# From a series
s = df["w"]
result = s.unstack(["x", "y"], fill_value=0)
expected = unstacked["w"]
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame(self):
# From a dataframe
rows = [[1, 2], [3, 4], [5, 6], [7, 8]]
df = DataFrame(rows, columns=list("AB"), dtype=np.int32)
df.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = df.unstack(fill_value=-1)
rows = [[1, 3, 2, 4], [-1, 5, -1, 6], [7, -1, 8, -1]]
expected = DataFrame(rows, index=list("xyz"), dtype=np.int32)
expected.columns = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")]
)
tm.assert_frame_equal(result, expected)
# From a mixed type dataframe
df["A"] = df["A"].astype(np.int16)
df["B"] = df["B"].astype(np.float64)
result = df.unstack(fill_value=-1)
expected["A"] = expected["A"].astype(np.int16)
expected["B"] = expected["B"].astype(np.float64)
tm.assert_frame_equal(result, expected)
# From a dataframe with incorrect data type for fill_value
result = df.unstack(fill_value=0.5)
rows = [[1, 3, 2, 4], [0.5, 5, 0.5, 6], [7, 0.5, 8, 0.5]]
expected = DataFrame(rows, index=list("xyz"), dtype=float)
expected.columns = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")]
)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_datetime(self):
# Test unstacking with date times
dv = date_range("2012-01-01", periods=4).values
data = Series(dv)
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = data.unstack()
expected = DataFrame(
{"a": [dv[0], pd.NaT, dv[3]], "b": [dv[1], dv[2], pd.NaT]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
result = data.unstack(fill_value=dv[0])
expected = DataFrame(
{"a": [dv[0], dv[0], dv[3]], "b": [dv[1], dv[2], dv[0]]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_timedelta(self):
# Test unstacking with time deltas
td = [Timedelta(days=i) for i in range(4)]
data = Series(td)
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = data.unstack()
expected = DataFrame(
{"a": [td[0], pd.NaT, td[3]], "b": [td[1], td[2], pd.NaT]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
result = data.unstack(fill_value=td[1])
expected = DataFrame(
{"a": [td[0], td[1], td[3]], "b": [td[1], td[2], td[1]]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_period(self):
# Test unstacking with period
periods = [
Period("2012-01"),
Period("2012-02"),
Period("2012-03"),
Period("2012-04"),
]
data = Series(periods)
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = data.unstack()
expected = DataFrame(
{"a": [periods[0], None, periods[3]], "b": [periods[1], periods[2], None]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
result = data.unstack(fill_value=periods[1])
expected = DataFrame(
{
"a": [periods[0], periods[1], periods[3]],
"b": [periods[1], periods[2], periods[1]],
},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_categorical(self):
# Test unstacking with categorical
data = Series(["a", "b", "c", "a"], dtype="category")
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
# By default missing values will be NaN
result = data.unstack()
expected = DataFrame(
{
"a": pd.Categorical(list("axa"), categories=list("abc")),
"b": pd.Categorical(list("bcx"), categories=list("abc")),
},
index=list("xyz"),
)
tm.assert_frame_equal(result, expected)
# Fill with non-category results in a ValueError
msg = r"'fill_value=d' is not present in"
with pytest.raises(TypeError, match=msg):
data.unstack(fill_value="d")
# Fill with category value replaces missing values as expected
result = data.unstack(fill_value="c")
expected = DataFrame(
{
"a": pd.Categorical(list("aca"), categories=list("abc")),
"b": pd.Categorical(list("bcc"), categories=list("abc")),
},
index=list("xyz"),
)
tm.assert_frame_equal(result, expected)
def test_unstack_tuplename_in_multiindex(self):
# GH 19966
idx = MultiIndex.from_product(
[["a", "b", "c"], [1, 2, 3]], names=[("A", "a"), ("B", "b")]
)
df = DataFrame({"d": [1] * 9, "e": [2] * 9}, index=idx)
result = df.unstack(("A", "a"))
expected = DataFrame(
[[1, 1, 1, 2, 2, 2], [1, 1, 1, 2, 2, 2], [1, 1, 1, 2, 2, 2]],
columns=MultiIndex.from_tuples(
[
("d", "a"),
("d", "b"),
("d", "c"),
("e", "a"),
("e", "b"),
("e", "c"),
],
names=[None, ("A", "a")],
),
index=Index([1, 2, 3], name=("B", "b")),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"unstack_idx, expected_values, expected_index, expected_columns",
[
(
("A", "a"),
[[1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2]],
MultiIndex.from_tuples(
[(1, 3), (1, 4), (2, 3), (2, 4)], names=["B", "C"]
),
MultiIndex.from_tuples(
[("d", "a"), ("d", "b"), ("e", "a"), ("e", "b")],
names=[None, ("A", "a")],
),
),
(
(("A", "a"), "B"),
[[1, 1, 1, 1, 2, 2, 2, 2], [1, 1, 1, 1, 2, 2, 2, 2]],
Index([3, 4], name="C"),
MultiIndex.from_tuples(
[
("d", "a", 1),
("d", "a", 2),
("d", "b", 1),
("d", "b", 2),
("e", "a", 1),
("e", "a", 2),
("e", "b", 1),
("e", "b", 2),
],
names=[None, ("A", "a"), "B"],
),
),
],
)
def test_unstack_mixed_type_name_in_multiindex(
self, unstack_idx, expected_values, expected_index, expected_columns
):
# GH 19966
idx = MultiIndex.from_product(
[["a", "b"], [1, 2], [3, 4]], names=[("A", "a"), "B", "C"]
)
df = DataFrame({"d": [1] * 8, "e": [2] * 8}, index=idx)
result = df.unstack(unstack_idx)
expected = DataFrame(
expected_values, columns=expected_columns, index=expected_index
)
tm.assert_frame_equal(result, expected)
def test_unstack_preserve_dtypes(self):
# Checks fix for #11847
df = DataFrame(
{
"state": ["IL", "MI", "NC"],
"index": ["a", "b", "c"],
"some_categories": Series(["a", "b", "c"]).astype("category"),
"A": np.random.rand(3),
"B": 1,
"C": "foo",
"D": pd.Timestamp("20010102"),
"E": Series([1.0, 50.0, 100.0]).astype("float32"),
"F": Series([3.0, 4.0, 5.0]).astype("float64"),
"G": False,
"H": Series([1, 200, 923442], dtype="int8"),
}
)
def unstack_and_compare(df, column_name):
unstacked1 = df.unstack([column_name])
unstacked2 = df.unstack(column_name)
tm.assert_frame_equal(unstacked1, unstacked2)
df1 = df.set_index(["state", "index"])
unstack_and_compare(df1, "index")
df1 = df.set_index(["state", "some_categories"])
unstack_and_compare(df1, "some_categories")
df1 = df.set_index(["F", "C"])
unstack_and_compare(df1, "F")
df1 = df.set_index(["G", "B", "state"])
unstack_and_compare(df1, "B")
df1 = df.set_index(["E", "A"])
unstack_and_compare(df1, "E")
df1 = df.set_index(["state", "index"])
s = df1["A"]
unstack_and_compare(s, "index")
def test_stack_ints(self):
columns = MultiIndex.from_tuples(list(itertools.product(range(3), repeat=3)))
df = DataFrame(np.random.randn(30, 27), columns=columns)
tm.assert_frame_equal(df.stack(level=[1, 2]), df.stack(level=1).stack(level=1))
tm.assert_frame_equal(
df.stack(level=[-2, -1]), df.stack(level=1).stack(level=1)
)
df_named = df.copy()
return_value = df_named.columns.set_names(range(3), inplace=True)
assert return_value is None
tm.assert_frame_equal(
df_named.stack(level=[1, 2]), df_named.stack(level=1).stack(level=1)
)
def test_stack_mixed_levels(self):
columns = MultiIndex.from_tuples(
[
("A", "cat", "long"),
("B", "cat", "long"),
("A", "dog", "short"),
("B", "dog", "short"),
],
names=["exp", "animal", "hair_length"],
)
df = DataFrame(np.random.randn(4, 4), columns=columns)
animal_hair_stacked = df.stack(level=["animal", "hair_length"])
exp_hair_stacked = df.stack(level=["exp", "hair_length"])
# GH #8584: Need to check that stacking works when a number
# is passed that is both a level name and in the range of
# the level numbers
df2 = df.copy()
df2.columns.names = ["exp", "animal", 1]
tm.assert_frame_equal(
df2.stack(level=["animal", 1]), animal_hair_stacked, check_names=False
)
tm.assert_frame_equal(
df2.stack(level=["exp", 1]), exp_hair_stacked, check_names=False
)
# When mixed types are passed and the ints are not level
# names, raise
msg = (
"level should contain all level names or all level numbers, not "
"a mixture of the two"
)
with pytest.raises(ValueError, match=msg):
df2.stack(level=["animal", 0])
# GH #8584: Having 0 in the level names could raise a
# strange error about lexsort depth
df3 = df.copy()
df3.columns.names = ["exp", "animal", 0]
tm.assert_frame_equal(
df3.stack(level=["animal", 0]), animal_hair_stacked, check_names=False
)
def test_stack_int_level_names(self):
columns = MultiIndex.from_tuples(
[
("A", "cat", "long"),
("B", "cat", "long"),
("A", "dog", "short"),
("B", "dog", "short"),
],
names=["exp", "animal", "hair_length"],
)
df = DataFrame(np.random.randn(4, 4), columns=columns)
exp_animal_stacked = df.stack(level=["exp", "animal"])
animal_hair_stacked = df.stack(level=["animal", "hair_length"])
exp_hair_stacked = df.stack(level=["exp", "hair_length"])
df2 = df.copy()
df2.columns.names = [0, 1, 2]
tm.assert_frame_equal(
df2.stack(level=[1, 2]), animal_hair_stacked, check_names=False
)
tm.assert_frame_equal(
df2.stack(level=[0, 1]), exp_animal_stacked, check_names=False
)
tm.assert_frame_equal(
df2.stack(level=[0, 2]), exp_hair_stacked, check_names=False
)
# Out-of-order int column names
df3 = df.copy()
df3.columns.names = [2, 0, 1]
tm.assert_frame_equal(
df3.stack(level=[0, 1]), animal_hair_stacked, check_names=False
)
tm.assert_frame_equal(
df3.stack(level=[2, 0]), exp_animal_stacked, check_names=False
)
tm.assert_frame_equal(
df3.stack(level=[2, 1]), exp_hair_stacked, check_names=False
)
def test_unstack_bool(self):
df = DataFrame(
[False, False],
index=MultiIndex.from_arrays([["a", "b"], ["c", "l"]]),
columns=["col"],
)
rs = df.unstack()
xp = DataFrame(
np.array([[False, np.nan], [np.nan, False]], dtype=object),
index=["a", "b"],
columns=MultiIndex.from_arrays([["col", "col"], ["c", "l"]]),
)
tm.assert_frame_equal(rs, xp)
def test_unstack_level_binding(self):
# GH9856
mi = MultiIndex(
levels=[["foo", "bar"], ["one", "two"], ["a", "b"]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [1, 0, 1, 0]],
names=["first", "second", "third"],
)
s = Series(0, index=mi)
result = s.unstack([1, 2]).stack(0)
expected_mi = MultiIndex(
levels=[["foo", "bar"], ["one", "two"]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=["first", "second"],
)
expected = DataFrame(
np.array(
[[np.nan, 0], [0, np.nan], [np.nan, 0], [0, np.nan]], dtype=np.float64
),
index=expected_mi,
columns=Index(["a", "b"], name="third"),
)
tm.assert_frame_equal(result, expected)
def test_unstack_to_series(self, float_frame):
# check reversibility
data = float_frame.unstack()
assert isinstance(data, Series)
undo = data.unstack().T
tm.assert_frame_equal(undo, float_frame)
# check NA handling
data = DataFrame({"x": [1, 2, np.NaN], "y": [3.0, 4, np.NaN]})
data.index = Index(["a", "b", "c"])
result = data.unstack()
midx = MultiIndex(
levels=[["x", "y"], ["a", "b", "c"]],
codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]],
)
expected = Series([1, 2, np.NaN, 3, 4, np.NaN], index=midx)
tm.assert_series_equal(result, expected)
# check composability of unstack
old_data = data.copy()
for _ in range(4):
data = data.unstack()
tm.assert_frame_equal(old_data, data)
def test_unstack_dtypes(self):
# GH 2929
rows = [[1, 1, 3, 4], [1, 2, 3, 4], [2, 1, 3, 4], [2, 2, 3, 4]]
df = DataFrame(rows, columns=list("ABCD"))
result = df.dtypes
expected = Series([np.dtype("int64")] * 4, index=list("ABCD"))
tm.assert_series_equal(result, expected)
# single dtype
df2 = df.set_index(["A", "B"])
df3 = df2.unstack("B")
result = df3.dtypes
expected = Series(
[np.dtype("int64")] * 4,
index=MultiIndex.from_arrays(
[["C", "C", "D", "D"], [1, 2, 1, 2]], names=(None, "B")
),
)
tm.assert_series_equal(result, expected)
# mixed
df2 = df.set_index(["A", "B"])
df2["C"] = 3.0
df3 = df2.unstack("B")
result = df3.dtypes
expected = Series(
[np.dtype("float64")] * 2 + [np.dtype("int64")] * 2,
index=MultiIndex.from_arrays(
[["C", "C", "D", "D"], [1, 2, 1, 2]], names=(None, "B")
),
)
tm.assert_series_equal(result, expected)
df2["D"] = "foo"
df3 = df2.unstack("B")
result = df3.dtypes
expected = Series(
[np.dtype("float64")] * 2 + [np.dtype("object")] * 2,
index=MultiIndex.from_arrays(
[["C", "C", "D", "D"], [1, 2, 1, 2]], names=(None, "B")
),
)
tm.assert_series_equal(result, expected)
# GH7405
for c, d in (
(np.zeros(5), np.zeros(5)),
(np.arange(5, dtype="f8"), np.arange(5, 10, dtype="f8")),
):
df = DataFrame(
{
"A": ["a"] * 5,
"C": c,
"D": d,
"B": date_range("2012-01-01", periods=5),
}
)
right = df.iloc[:3].copy(deep=True)
df = df.set_index(["A", "B"])
df["D"] = df["D"].astype("int64")
left = df.iloc[:3].unstack(0)
right = right.set_index(["A", "B"]).unstack(0)
right[("D", "a")] = right[("D", "a")].astype("int64")
assert left.shape == (3, 2)
tm.assert_frame_equal(left, right)
def test_unstack_non_unique_index_names(self):
idx = MultiIndex.from_tuples([("a", "b"), ("c", "d")], names=["c1", "c1"])
df = DataFrame([1, 2], index=idx)
msg = "The name c1 occurs multiple times, use a level number"
with pytest.raises(ValueError, match=msg):
df.unstack("c1")
with pytest.raises(ValueError, match=msg):
df.T.stack("c1")
def test_unstack_unused_levels(self):
# GH 17845: unused codes in index make unstack() cast int to float
idx = MultiIndex.from_product([["a"], ["A", "B", "C", "D"]])[:-1]
df = DataFrame([[1, 0]] * 3, index=idx)
result = df.unstack()
exp_col = MultiIndex.from_product([[0, 1], ["A", "B", "C"]])
expected = DataFrame([[1, 1, 1, 0, 0, 0]], index=["a"], columns=exp_col)
tm.assert_frame_equal(result, expected)
assert (result.columns.levels[1] == idx.levels[1]).all()
# Unused items on both levels
levels = [[0, 1, 7], [0, 1, 2, 3]]
codes = [[0, 0, 1, 1], [0, 2, 0, 2]]
idx = MultiIndex(levels, codes)
block = np.arange(4).reshape(2, 2)
df = DataFrame(np.concatenate([block, block + 4]), index=idx)
result = df.unstack()
expected = DataFrame(
np.concatenate([block * 2, block * 2 + 1], axis=1), columns=idx
)
tm.assert_frame_equal(result, expected)
assert (result.columns.levels[1] == idx.levels[1]).all()
# With mixed dtype and NaN
levels = [["a", 2, "c"], [1, 3, 5, 7]]
codes = [[0, -1, 1, 1], [0, 2, -1, 2]]
idx = MultiIndex(levels, codes)
data = np.arange(8)
df = DataFrame(data.reshape(4, 2), index=idx)
cases = (
(0, [13, 16, 6, 9, 2, 5, 8, 11], [np.nan, "a", 2], [np.nan, 5, 1]),
(1, [8, 11, 1, 4, 12, 15, 13, 16], [np.nan, 5, 1], [np.nan, "a", 2]),
)
for level, idces, col_level, idx_level in cases:
result = df.unstack(level=level)
exp_data = np.zeros(18) * np.nan
exp_data[idces] = data
cols = MultiIndex.from_product([[0, 1], col_level])
expected = DataFrame(exp_data.reshape(3, 6), index=idx_level, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("cols", [["A", "C"], slice(None)])
def test_unstack_unused_level(self, cols):
# GH 18562 : unused codes on the unstacked level
df = DataFrame([[2010, "a", "I"], [2011, "b", "II"]], columns=["A", "B", "C"])
ind = df.set_index(["A", "B", "C"], drop=False)
selection = ind.loc[(slice(None), slice(None), "I"), cols]
result = selection.unstack()
expected = ind.iloc[[0]][cols]
expected.columns = MultiIndex.from_product(
[expected.columns, ["I"]], names=[None, "C"]
)
expected.index = expected.index.droplevel("C")
tm.assert_frame_equal(result, expected)
def test_unstack_long_index(self):
# PH 32624: Error when using a lot of indices to unstack.
# The error occurred only, if a lot of indices are used.
df = DataFrame(
[[1]],
columns=MultiIndex.from_tuples([[0]], names=["c1"]),
index=MultiIndex.from_tuples(
[[0, 0, 1, 0, 0, 0, 1]],
names=["i1", "i2", "i3", "i4", "i5", "i6", "i7"],
),
)
result = df.unstack(["i2", "i3", "i4", "i5", "i6", "i7"])
expected = DataFrame(
[[1]],
columns=MultiIndex.from_tuples(
[[0, 0, 1, 0, 0, 0, 1]],
names=["c1", "i2", "i3", "i4", "i5", "i6", "i7"],
),
index=Index([0], name="i1"),
)
tm.assert_frame_equal(result, expected)
def test_unstack_multi_level_cols(self):
# PH 24729: Unstack a df with multi level columns
df = DataFrame(
[[0.0, 0.0], [0.0, 0.0]],
columns=MultiIndex.from_tuples(
[["B", "C"], ["B", "D"]], names=["c1", "c2"]
),
index=MultiIndex.from_tuples(
[[10, 20, 30], [10, 20, 40]], names=["i1", "i2", "i3"]
),
)
assert df.unstack(["i2", "i1"]).columns.names[-2:] == ["i2", "i1"]
def test_unstack_multi_level_rows_and_cols(self):
# PH 28306: Unstack df with multi level cols and rows
df = DataFrame(
[[1, 2], [3, 4], [-1, -2], [-3, -4]],
columns=MultiIndex.from_tuples([["a", "b", "c"], ["d", "e", "f"]]),
index=MultiIndex.from_tuples(
[
["m1", "P3", 222],
["m1", "A5", 111],
["m2", "P3", 222],
["m2", "A5", 111],
],
names=["i1", "i2", "i3"],
),
)
result = df.unstack(["i3", "i2"])
expected = df.unstack(["i3"]).unstack(["i2"])
tm.assert_frame_equal(result, expected)
def test_unstack_nan_index1(self):
# GH7466
def cast(val):
val_str = "" if val != val else val
return f"{val_str:1}"
def verify(df):
mk_list = lambda a: list(a) if isinstance(a, tuple) else [a]
rows, cols = df.notna().values.nonzero()
for i, j in zip(rows, cols):
left = sorted(df.iloc[i, j].split("."))
right = mk_list(df.index[i]) + mk_list(df.columns[j])
right = sorted(map(cast, right))
assert left == right
df = DataFrame(
{
"jim": ["a", "b", np.nan, "d"],
"joe": ["w", "x", "y", "z"],
"jolie": ["a.w", "b.x", " .y", "d.z"],
}
)
left = df.set_index(["jim", "joe"]).unstack()["jolie"]
right = df.set_index(["joe", "jim"]).unstack()["jolie"].T
tm.assert_frame_equal(left, right)
for idx in itertools.permutations(df.columns[:2]):
mi = df.set_index(list(idx))
for lev in range(2):
udf = mi.unstack(level=lev)
assert udf.notna().values.sum() == len(df)
verify(udf["jolie"])
df = DataFrame(
{
"1st": ["d"] * 3
+ [np.nan] * 5
+ ["a"] * 2
+ ["c"] * 3
+ ["e"] * 2
+ ["b"] * 5,
"2nd": ["y"] * 2
+ ["w"] * 3
+ [np.nan] * 3
+ ["z"] * 4
+ [np.nan] * 3
+ ["x"] * 3
+ [np.nan] * 2,
"3rd": [
67,
39,
53,
72,
57,
80,
31,
18,
11,
30,
59,
50,
62,
59,
76,
52,
14,
53,
60,
51,
],
}
)
df["4th"], df["5th"] = (
df.apply(lambda r: ".".join(map(cast, r)), axis=1),
df.apply(lambda r: ".".join(map(cast, r.iloc[::-1])), axis=1),
)
for idx in itertools.permutations(["1st", "2nd", "3rd"]):
mi = df.set_index(list(idx))
for lev in range(3):
udf = mi.unstack(level=lev)
assert udf.notna().values.sum() == 2 * len(df)
for col in ["4th", "5th"]:
verify(udf[col])
def test_unstack_nan_index2(self):
# GH7403
df = DataFrame({"A": list("aaaabbbb"), "B": range(8), "C": range(8)})
df.iloc[3, 1] = np.NaN
left = df.set_index(["A", "B"]).unstack(0)
vals = [
[3, 0, 1, 2, np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan, 4, 5, 6, 7],
]
vals = list(map(list, zip(*vals)))
idx = Index([np.nan, 0, 1, 2, 4, 5, 6, 7], name="B")
cols = MultiIndex(
levels=[["C"], ["a", "b"]], codes=[[0, 0], [0, 1]], names=[None, "A"]
)
right = DataFrame(vals, columns=cols, index=idx)
tm.assert_frame_equal(left, right)
df = DataFrame({"A": list("aaaabbbb"), "B": list(range(4)) * 2, "C": range(8)})
df.iloc[2, 1] = np.NaN
left = df.set_index(["A", "B"]).unstack(0)
vals = [[2, np.nan], [0, 4], [1, 5], [np.nan, 6], [3, 7]]
cols = MultiIndex(
levels=[["C"], ["a", "b"]], codes=[[0, 0], [0, 1]], names=[None, "A"]
)
idx = Index([np.nan, 0, 1, 2, 3], name="B")
right = | DataFrame(vals, columns=cols, index=idx) | pandas.DataFrame |
#
# Copyright (c) 2022 salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
#
"""
Wrapper around Facebook's popular Prophet model for time series forecasting.
"""
import logging
import os
from typing import Iterable, List, Tuple, Union
try:
import prophet
except ImportError as e:
err_msg = (
"Try installing Merlion with optional dependencies using `pip install salesforce-merlion[prophet]` or "
"`pip install `salesforce-merlion[all]`"
)
raise ImportError(str(e) + ". " + err_msg)
import numpy as np
import pandas as pd
from merlion.models.automl.seasonality import SeasonalityModel
from merlion.models.forecast.base import ForecasterBase, ForecasterConfig
from merlion.utils import TimeSeries, UnivariateTimeSeries, to_pd_datetime, to_timestamp
logger = logging.getLogger(__name__)
class _suppress_stdout_stderr(object):
"""
A context manager for doing a "deep suppression" of stdout and stderr in
Python, i.e. will suppress all print, even if the print originates in a
compiled C/Fortran sub-function.
This will not suppress raised exceptions, since exceptions are printed
to stderr just before a script exits, and after the context manager has
exited (at least, I think that is why it lets exceptions through).
Source: https://github.com/facebook/prophet/issues/223#issuecomment-326455744
"""
def __init__(self):
# Open a pair of null files
self.null_fds = [os.open(os.devnull, os.O_RDWR) for x in range(2)]
# Save the actual stdout (1) and stderr (2) file descriptors.
self.save_fds = [os.dup(1), os.dup(2)]
def __enter__(self):
# Assign the null pointers to stdout and stderr.
os.dup2(self.null_fds[0], 1)
os.dup2(self.null_fds[1], 2)
def __exit__(self, *_):
# Re-assign the real stdout/stderr back to (1) and (2)
os.dup2(self.save_fds[0], 1)
os.dup2(self.save_fds[1], 2)
# Close the null files
for fd in self.null_fds + self.save_fds:
os.close(fd)
class ProphetConfig(ForecasterConfig):
"""
Configuration class for Facebook's `Prophet` model, as described by
`Taylor & Letham, 2017 <https://peerj.com/preprints/3190/>`__.
"""
def __init__(
self,
max_forecast_steps: int = None,
target_seq_index: int = None,
yearly_seasonality: Union[bool, int] = "auto",
weekly_seasonality: Union[bool, int] = "auto",
daily_seasonality: Union[bool, int] = "auto",
seasonality_mode="additive",
holidays=None,
uncertainty_samples: int = 100,
**kwargs,
):
"""
:param max_forecast_steps: Max # of steps we would like to forecast for.
:param target_seq_index: The index of the univariate (amongst all
univariates in a general multivariate time series) whose value we
would like to forecast.
:param yearly_seasonality: If bool, whether to enable yearly seasonality.
By default, it is activated if there are >= 2 years of history, but
deactivated otherwise. If int, this is the number of Fourier series
components used to model the seasonality (default = 10).
:param weekly_seasonality: If bool, whether to enable weekly seasonality.
By default, it is activated if there are >= 2 weeks of history, but
deactivated otherwise. If int, this is the number of Fourier series
components used to model the seasonality (default = 3).
:param daily_seasonality: If bool, whether to enable daily seasonality.
By default, it is activated if there are >= 2 days of history, but
deactivated otherwise. If int, this is the number of Fourier series
components used to model the seasonality (default = 4).
:param seasonality_mode: 'additive' (default) or 'multiplicative'.
:param holidays: pd.DataFrame with columns holiday (string) and ds (date type)
and optionally columns lower_window and upper_window which specify a
range of days around the date to be included as holidays.
lower_window=-2 will include 2 days prior to the date as holidays. Also
optionally can have a column prior_scale specifying the prior scale for
that holiday. Can also be a dict corresponding to the desired pd.DataFrame.
:param uncertainty_samples: The number of posterior samples to draw in
order to calibrate the anomaly scores.
"""
super().__init__(max_forecast_steps=max_forecast_steps, target_seq_index=target_seq_index, **kwargs)
self.yearly_seasonality = yearly_seasonality
self.weekly_seasonality = weekly_seasonality
self.daily_seasonality = daily_seasonality
self.seasonality_mode = seasonality_mode
self.uncertainty_samples = uncertainty_samples
self.holidays = holidays
class Prophet(SeasonalityModel, ForecasterBase):
"""
Facebook's model for time series forecasting. See docs for `ProphetConfig`
and `T<NAME>, 2017 <https://peerj.com/preprints/3190/>`__ for more details.
"""
config_class = ProphetConfig
def __init__(self, config: ProphetConfig):
super().__init__(config)
self.model = prophet.Prophet(
yearly_seasonality=self.yearly_seasonality,
weekly_seasonality=self.weekly_seasonality,
daily_seasonality=self.daily_seasonality,
seasonality_mode=self.seasonality_mode,
uncertainty_samples=self.uncertainty_samples,
holidays=None if self.holidays is None else pd.DataFrame(self.holidays),
)
self.last_forecast_time_stamps_full = None
self.last_forecast_time_stamps = None
self.resid_samples = None
@property
def require_even_sampling(self) -> bool:
return False
def __getstate__(self):
stan_backend = self.model.stan_backend
if hasattr(stan_backend, "logger"):
model_logger = self.model.stan_backend.logger
self.model.stan_backend.logger = None
state_dict = super().__getstate__()
if hasattr(stan_backend, "logger"):
self.model.stan_backend.logger = model_logger
return state_dict
@property
def yearly_seasonality(self):
return self.config.yearly_seasonality
@property
def weekly_seasonality(self):
return self.config.weekly_seasonality
@property
def daily_seasonality(self):
return self.config.daily_seasonality
@property
def add_seasonality(self):
return self.config.add_seasonality
@property
def seasonality_mode(self):
return self.config.seasonality_mode
@property
def holidays(self):
return self.config.holidays
@property
def uncertainty_samples(self):
return self.config.uncertainty_samples
def set_seasonality(self, theta, train_data: UnivariateTimeSeries):
theta = [theta] if not isinstance(theta, Iterable) else theta
dt = train_data.index[1] - train_data.index[0]
for p in theta:
if p > 1:
period = p * dt.total_seconds() / 86400
logger.info(f"Add seasonality {str(p)} ({p * dt})")
self.model.add_seasonality(name=f"extra_season_{p}", period=period, fourier_order=p)
def _train(self, train_data: pd.DataFrame, train_config=None):
series = train_data[self.target_name]
df = | pd.DataFrame({"ds": series.index, "y": series.values}) | pandas.DataFrame |
import copy
import os
import unittest
from enum import auto
import pandas as pd
import numpy as np
from collections import OrderedDict
import logging.config
import torch
from sklearn.model_selection import train_test_split
from torch.utils.data import Dataset
import numpy as np
from tape import TAPETokenizer
import glob
import re
from tcrbert.commons import basename, FileUtils
from tcrbert.bioseq import is_valid_aaseq, rand_aaseq
from tcrbert.commons import StrEnum, BaseTest
# Logger
logger = logging.getLogger('tcrbert')
class TCREpitopeDFLoader(object):
class ColumnName(StrEnum):
epitope = auto()
epitope_gene = auto()
epitope_species = auto()
species = auto()
cdr3b = auto()
mhc = auto()
source = auto()
ref_id = auto()
label = auto()
@classmethod
def values(cls):
return [c.value for c in cls]
# Filters
class Filter(object):
def filter_df(self, df):
raise NotImplementedError()
class NotDuplicateFilter(Filter):
def filter_df(self, df):
logger.debug('Drop duplicates with the same{epitope, CDR3b}')
df = df[~df.index.duplicated()]
logger.debug('Current df_enc.shape: %s' % str(df.shape))
return df
class MoreThanCDR3bNumberFilter(Filter):
def __init__(self, cutoff=None):
self.cutoff = cutoff
def filter_df(self, df):
if self.cutoff and self.cutoff > 0:
logger.debug('Select all epitope with at least %s CDR3B sequences' % self.cutoff)
tmp = df[CN.epitope].value_counts()
tmp = tmp[tmp >= self.cutoff]
df = df[df[CN.epitope].map(lambda x: x in tmp.index)]
logger.debug('Current df_enc.shape: %s' % str(df.shape))
return df
class QueryFilter(Filter):
def __init__(self, query=None):
self.query = query
def filter_df(self, df):
if self.query is not None:
logger.debug("Select all epitope by query: %s" % self.query)
df = df.query(self.query, engine='python')
logger.debug('Current df_enc.shape: %s' % str(df.shape))
return df
# Generate negative examples
class NegativeGenerator(object):
def generate_df(self, df_source):
raise NotImplementedError()
class DefaultNegativeGenerator(object):
def __init__(self,
fn_epitope='../data/bglib/bg_epitope.pkl',
fn_cdr3b='../data/bglib/bg_cdr3b.pkl'):
self.bg_epitopes = FileUtils.pkl_load(fn_epitope)
self.bg_cdr3bs = FileUtils.pkl_load(fn_cdr3b)
def generate_df(self, df_source):
df_pos = df_source[df_source[CN.label] == 1]
# pos_epitopes = df_pos[CN.epitope].unique()
# neg_epitopes = list(filter(lambda x: x not in pos_epitopes, self.bg_epitopes))
# logger.debug('len(pos_epitopes): %s, len(neg_epitopes): %s' % (len(pos_epitopes), len(neg_epitopes)))
pos_cdr3bs = df_pos[CN.cdr3b].unique()
neg_cdr3bs = list(filter(lambda x: x not in pos_cdr3bs, self.bg_cdr3bs))
logger.debug('len(pos_cdr3bs): %s, len(neg_cdr3bs): %s' % (len(pos_cdr3bs), len(neg_cdr3bs)))
df = pd.DataFrame(columns=CN.values())
for epitope, subdf in df_pos.groupby([CN.epitope]):
subdf_neg = subdf.copy()
subdf_neg[CN.source] = 'Control'
subdf_neg[CN.label] = 0
subdf_neg[CN.cdr3b] = np.random.choice(neg_cdr3bs, subdf.shape[0], replace=False)
subdf_neg.index = subdf_neg.apply(lambda row: TCREpitopeDFLoader._make_index(row), axis=1)
df = df.append(subdf_neg)
return df
def __init__(self, filters=None, negative_generator=None):
self.filters = filters
self.negative_generator = negative_generator
def load(self):
df = self._load()
# logger.debug('Select valid epitope and CDR3b seq')
# df_enc = df_enc.dropna(subset=[CN.epitope, CN.cdr3b])
# df_enc = df_enc[
# (df_enc[CN.epitope].map(is_valid_aaseq)) &
# (df_enc[CN.cdr3b].map(is_valid_aaseq))
# ]
# logger.debug('Current df_enc.shape: %s' % str(df_enc.shape))
if self.filters:
logger.debug('Filter data')
for filter in self.filters:
df = filter.filter_df(df)
if self.negative_generator:
logger.debug('Generate negative data')
df_neg = self.negative_generator.generate_df(df_source=df)
df = pd.concat([df, df_neg])
return df
def _load(self):
raise NotImplementedError()
@classmethod
def _make_index(cls, row, sep='_'):
return '%s%s%s' % (row[CN.epitope], sep, row[CN.cdr3b])
CN = TCREpitopeDFLoader.ColumnName
class FileTCREpitopeDFLoader(TCREpitopeDFLoader):
def __init__(self, fn_source=None, filters=None, negative_generator=None):
super().__init__(filters, negative_generator)
self.fn_source = fn_source
def _load(self):
return self._load_from_file(self.fn_source)
def _load_from_file(self, fn_source):
raise NotImplementedError()
class DashTCREpitopeDFLoader(FileTCREpitopeDFLoader):
GENE_INFO_MAP = OrderedDict({
'BMLF': ('EBV', 'GLCTLVAML', 'HLA-A*02:01'),
'pp65': ('CMV', 'NLVPMVATV', 'HLA-A*02:01'),
'M1': ('IAV', 'GILGFVFTL', 'HLA-A*02:01'),
'F2': ('IAV', 'LSLRNPILV', 'H2-Db'),
'NP': ('IAV', 'ASNENMETM', 'H2-Db'),
'PA': ('IAV', 'SSLENFRAYV', 'H2-Db'),
'PB1': ('IAV', 'SSYRRPVGI', 'H2-Kb'),
'm139': ('mCMV', 'TVYGFCLL', 'H2-Kb'),
'M38': ('mCMV', 'SSPPMFRV', 'H2-Kb'),
'M45': ('mCMV', 'HGIRNASFI', 'H2-Db'),
})
def _load_from_file(self, fn_source):
logger.debug('Loading from %s' % fn_source)
df = pd.read_table(fn_source, sep='\t')
logger.debug('Current df_enc.shape: %s' % str(df.shape))
df[CN.epitope_gene] = df['epitope']
df[CN.epitope_species] = df[CN.epitope_gene].map(lambda x: self.GENE_INFO_MAP[x][0])
df[CN.epitope] = df[CN.epitope_gene].map(lambda x: self.GENE_INFO_MAP[x][1])
df[CN.mhc] = df[CN.epitope_gene].map(lambda x: self.GENE_INFO_MAP[x][2])
df[CN.species] = df['subject'].map(lambda x: 'human' if 'human' in x else 'mouse')
df[CN.cdr3b] = df['cdr3b'].str.strip().str.upper()
df[CN.source] = 'Dash'
df[CN.ref_id] = 'PMID:28636592'
df[CN.label] = 1
logger.debug('Select valid beta CDR3 and epitope sequences')
df = df.dropna(subset=[CN.cdr3b, CN.epitope])
df = df[
(df[CN.cdr3b].map(is_valid_aaseq)) &
(df[CN.epitope].map(is_valid_aaseq))
]
logger.debug('Current df_enc.shape: %s' % str(df.shape))
df.index = df.apply(lambda row: self._make_index(row), axis=1)
df = df.loc[:, CN.values()]
return df
class VDJDbTCREpitopeDFLoader(FileTCREpitopeDFLoader):
def _load_from_file(self, fn_source):
logger.debug('Loading from %s' % fn_source)
df = pd.read_table(fn_source, sep='\t', header=0)
logger.debug('Current df_enc.shape: %s' % str(df.shape))
# Select beta CDR3 sequence
logger.debug('Select beta CDR3 sequences and MHC-I restricted epitopes')
df = df[(df['gene'] == 'TRB') & (df['mhc.class'] == 'MHCI')]
logger.debug('Current df_enc.shape: %s' % str(df.shape))
# Select valid CDR3 and peptide sequences
logger.debug('Select valid CDR3 and epitope sequences')
df = df.dropna(subset=['cdr3', 'antigen.epitope'])
df = df[
(df['antigen.epitope'].map(is_valid_aaseq)) &
(df['cdr3'].map(is_valid_aaseq))
]
logger.debug('Current df_enc.shape: %s' % str(df.shape))
logger.debug('Select confidence score > 0')
df = df[df['vdjdb.score'].map(lambda score: score > 0)]
logger.debug('Current df_enc.shape: %s' % str(df.shape))
df[CN.epitope] = df['antigen.epitope'].str.strip().str.upper()
df[CN.epitope_species] = df['antigen.species']
df[CN.epitope_gene] = df['antigen.gene']
df[CN.species] = df['species']
df[CN.cdr3b] = df['cdr3'].str.strip().str.upper()
# df_enc[CN.mhc] = df_enc['mhc.a'].map(lambda x: MHCAlleleName.sub_name(MHCAlleleName.std_name(x)))
df[CN.mhc] = df['mhc.a']
df[CN.source] = 'VDJdb'
df[CN.ref_id] = df['reference.id']
df[CN.label] = 1
df.index = df.apply(lambda row: self._make_index(row), axis=1)
df = df.loc[:, CN.values()]
return df
class McPASTCREpitopeDFLoader(FileTCREpitopeDFLoader):
EPITOPE_SEP = '/'
def _load_from_file(self, fn_source):
logger.debug('Loading from %s' % fn_source)
df = pd.read_csv(fn_source)
logger.debug('Current df_enc.shape: %s' % str(df.shape))
logger.debug('Select valid beta CDR3 and epitope sequences')
df = df.dropna(subset=['CDR3.beta.aa', 'Epitope.peptide'])
df = df[
(df['CDR3.beta.aa'].map(is_valid_aaseq)) &
(df['Epitope.peptide'].map(is_valid_aaseq))
]
logger.debug('Current df_enc.shape: %s' % str(df.shape))
# df_enc[CN.epitope] = df_enc['Epitope.peptide'].map(lambda x: x.split('/')[0].upper())
df[CN.epitope] = df['Epitope.peptide'].str.strip().str.upper()
# Handle multiple epitope
logger.debug('Extend by multi-epitopes')
tmpdf = df[df[CN.epitope].str.contains(self.EPITOPE_SEP)].copy()
for multi_epitope, subdf in tmpdf.groupby([CN.epitope]):
logger.debug('Multi epitope: %s' % multi_epitope)
tokens = multi_epitope.split(self.EPITOPE_SEP)
logger.debug('Convert epitope: %s to %s' % (multi_epitope, tokens[0]))
df[CN.epitope][df[CN.epitope] == multi_epitope] = tokens[0]
for epitope in tokens[1:]:
logger.debug('Extend by epitope: %s' % epitope)
subdf[CN.epitope] = epitope
df = df.append(subdf)
logger.debug('Current df_enc.shape: %s' % (str(df.shape)))
df[CN.epitope_gene] = None
df[CN.epitope_species] = df['Pathology']
df[CN.species] = df['Species']
df[CN.cdr3b] = df['CDR3.beta.aa'].str.strip().str.upper()
df[CN.mhc] = df['MHC'].str.strip()
df[CN.source] = 'McPAS'
df[CN.ref_id] = df['PubMed.ID'].map(lambda x: '%s:%s' % ('PMID', x))
df[CN.label] = 1
df.index = df.apply(lambda row: self._make_index(row), axis=1)
logger.debug('Select MHC-I restricted entries')
df = df[
(df[CN.mhc].notnull()) &
(np.logical_not(df[CN.mhc].str.contains('DR|DP|DQ')))
]
logger.debug('Current df_enc.shape: %s' % str(df.shape))
df = df.loc[:, CN.values()]
return df
class ShomuradovaTCREpitopeDFLoader(FileTCREpitopeDFLoader):
def _load_from_file(self, fn_source):
logger.debug('Loading from %s' % fn_source)
df = pd.read_csv(fn_source, sep='\t')
logger.debug('Current df_enc.shape: %s' % str(df.shape))
logger.debug('Select TRB Gene')
df = df[df['Gene'] == 'TRB']
logger.debug('Current df_enc.shape: %s' % str(df.shape))
df[CN.epitope] = df['Epitope'].str.strip().str.upper()
df[CN.epitope_gene] = df['Epitope gene']
df[CN.epitope_species] = df['Epitope species']
df[CN.mhc] = df['MHC A']
df[CN.cdr3b] = df['CDR3'].str.strip().str.upper()
df[CN.species] = df['Species']
df[CN.source] = 'Shomuradova'
df[CN.ref_id] = 'PMID:33326767'
df[CN.label] = 1
logger.debug('Select valid beta CDR3 and epitope sequences')
df = df.dropna(subset=[CN.cdr3b, CN.epitope])
df = df[
(df[CN.cdr3b].map(is_valid_aaseq)) &
(df[CN.epitope].map(is_valid_aaseq))
]
logger.debug('Current df_enc.shape: %s' % str(df.shape))
df.index = df.apply(lambda row: self._make_index(row), axis=1)
df = df.loc[:, CN.values()]
return df
class ImmuneCODETCREpitopeDFLoader(FileTCREpitopeDFLoader):
def _load_from_file(self, fn_source):
logger.debug('Loading from %s' % fn_source)
df = pd.read_csv(fn_source)
logger.debug('Current df_enc.shape: %s' % str(df.shape))
df[CN.epitope] = 'YLQPRTFLL'
df[CN.epitope_gene] = 'Spike'
df[CN.epitope_species] = 'SARS-CoV-2'
df[CN.mhc] = None
df[CN.cdr3b] = df['cdr3b'].str.strip().str.upper()
df[CN.species] = 'human'
df[CN.source] = 'ImmuneCODE'
df[CN.ref_id] = 'PMC7418738'
df[CN.label] = df['subject'].map(lambda x: 0 if x == 'control' else 1)
logger.debug('Select valid beta CDR3 and epitope sequences')
df = df.dropna(subset=[CN.cdr3b, CN.epitope])
df = df[
(df[CN.cdr3b].map(is_valid_aaseq)) &
(df[CN.epitope].map(is_valid_aaseq))
]
logger.debug('Current df_enc.shape: %s' % str(df.shape))
df.index = df.apply(lambda row: self._make_index(row), axis=1)
df = df.loc[:, CN.values()]
logger.debug('Loaded ImmuneCODE data. Current df_enc.shape: %s' % str(df.shape))
return df
class ImmuneCODE2TCREpitopeDFLoader(FileTCREpitopeDFLoader):
def _load_from_file(self, fn_source):
logger.debug('Loading from %s' % fn_source)
df = pd.read_csv(fn_source)
logger.debug('Current df.shape: %s' % str(df.shape))
rows = []
for i, row in df.iterrows():
cdr3b = row['TCR BioIdentity'].split('+')[0]
epitopes = row['Amino Acids']
orfs = row['ORF Coverage']
for epitope in epitopes.split(','):
rows.append([epitope, orfs, 'SARS-CoV-2', 'human', cdr3b, None, 'ImmuneCODE_002.1', 1])
df = pd.DataFrame(rows, columns=CN.values())
logger.debug('Select valid beta CDR3 and epitope sequences')
df = df.dropna(subset=[CN.cdr3b, CN.epitope])
df = df[
(df[CN.cdr3b].map(is_valid_aaseq)) &
(df[CN.epitope].map(is_valid_aaseq))
]
logger.debug('Current df.shape: %s' % str(df.shape))
df[CN.ref_id] = 'PMC7418738'
df.index = df.apply(lambda row: self._make_index(row), axis=1)
return df
class ZhangTCREpitopeDFLoader(FileTCREpitopeDFLoader):
def _load_from_file(self, source_dir):
logger.debug('Loading from source directory %s' % source_dir)
df_seq = pd.read_csv('%s/pep_seq.csv' % source_dir, index_col=0)
def get_pep_seq(pep_id):
the = re.sub('[\s_-]', '', pep_id)
if the in df_seq.index.values:
return df_seq[df_seq.index == the].peptide.iat[0]
else:
logger.warning('Peptide sequence for %s dose not exist' % the)
return None
dfs = []
for fn in glob.glob('%s/**/*.tsv' % source_dir, recursive=True):
logger.debug('Loading data from %s' % fn)
df = pd.read_csv(fn, sep='\t')
logger.debug('Current df_enc.shape: %s' % str(df.shape))
bname = basename(fn, ext=False)
label = 1 if 'Pos' in bname else 0
if 'Peptide' in df.columns:
df[CN.epitope] = df['Peptide'].map(lambda x: get_pep_seq(x))
else:
pep_id = bname[bname.index('_') + 1:]
df[CN.epitope] = get_pep_seq(pep_id)
df[CN.epitope] = df[CN.epitope].str.strip().str.upper()
df[CN.epitope_gene] = None
df[CN.epitope_species] = 'human'
df[CN.mhc] = None
df[CN.cdr3b] = df['CDR3b'].str.strip().str.upper()
df[CN.species] = 'human'
df[CN.source] = 'Zhang'
df[CN.ref_id] = 'PMID: 32318072'
df[CN.label] = label
df.index = df.apply(lambda row: self._make_index(row), axis=1)
df = df.loc[:, CN.values()]
dfs.append(df)
df = pd.concat(dfs)
logger.debug('Select valid beta CDR3 and epitope sequences')
df = df.dropna(subset=[CN.cdr3b, CN.epitope])
df = df[
(df[CN.cdr3b].map(is_valid_aaseq)) &
(df[CN.epitope].map(is_valid_aaseq))
]
logger.debug('Current df_enc.shape: %s' % str(df.shape))
logger.debug('Loaded Zhang data. Current df_enc.shape: %s' % str(df.shape))
return df
class IEDBTCREpitopeDFLoader(FileTCREpitopeDFLoader):
def _load_from_file(self, fn_source):
logger.debug('Loading from %s' % fn_source)
df = pd.read_csv(fn_source)
logger.debug('Current df_enc.shape: %s' % str(df.shape))
df[CN.epitope] = df['Description'].str.strip().str.upper()
df[CN.epitope_gene] = df['Antigen']
df[CN.epitope_species] = df['Organism']
df[CN.mhc] = df['MHC Allele Names']
df[CN.cdr3b] = df['Chain 2 CDR3 Curated'].str.strip().str.upper()
df[CN.species] = 'human'
df[CN.source] = 'IEDB'
df[CN.ref_id] = df['Reference ID'].map(lambda x: 'IEDB:%s' % x)
df[CN.label] = 1
logger.debug('Select valid beta CDR3 and epitope sequences')
df = df.dropna(subset=[CN.cdr3b, CN.epitope])
df = df[
(df[CN.cdr3b].map(is_valid_aaseq)) &
(df[CN.epitope].map(is_valid_aaseq))
]
logger.debug('Current df_enc.shape: %s' % str(df.shape))
df.index = df.apply(lambda row: self._make_index(row), axis=1)
df = df.loc[:, CN.values()]
logger.debug('Loaded IEDB data. Current df_enc.shape: %s' % str(df.shape))
return df
class NetTCREpitopeDFLoader(FileTCREpitopeDFLoader):
def _load_from_file(self, fn_source):
logger.debug('Loading from %s' % fn_source)
df = | pd.read_csv(fn_source, sep=';') | pandas.read_csv |
import os
import pickle
import numpy as np
from scipy.stats import beta
import pysam
from scipy.stats import binom_test
import pandas as pd
from statsmodels.stats.multitest import multipletests
def phred_to_prob(asciiChar):
"""This function takes an ascii character and as an input and returns
the error probability"""
return 10**(-(ord(asciiChar)-33)/10)
def get_error_probs(final_filt_bam, e_miRbase_dict):
bam_input = pysam.AlignmentFile(final_filt_bam, 'rb')
error_probs_dict = {}
for alignment in bam_input.fetch(until_eof=True):
ref_name = alignment.reference_name
if ref_name.find('_e') != -1:
basename = ref_name[:ref_name.find('_')]
error_probs = list(map(phred_to_prob, list(alignment.qual)))
src_editing_sites = e_miRbase_dict[basename][ref_name]['editingSites']
ref_seq = alignment.get_reference_sequence()
src_ref_seq = e_miRbase_dict[basename][ref_name]['sequence']
match_index = src_ref_seq.find(ref_seq.upper())
edited_sites = list(filter(lambda x: x >= 0 and x < len(ref_seq), [i - match_index for i in src_editing_sites]))
if basename not in error_probs_dict.keys():
error_probs_dict[basename] = {}
for query_editing_site in edited_sites:
ref_editing_site = query_editing_site + match_index
error_probs_dict[basename][ref_editing_site] = [error_probs[query_editing_site]]
elif basename in error_probs_dict.keys():
for query_editing_site in edited_sites:
ref_editing_site = query_editing_site + match_index
if ref_editing_site not in error_probs_dict[basename].keys():
error_probs_dict[basename][ref_editing_site] = [error_probs[query_editing_site]]
elif ref_editing_site in error_probs_dict[basename].keys():
error_probs_dict[basename][ref_editing_site] += [error_probs[query_editing_site]]
return error_probs_dict
def get_haplotypes_counts():
haplotypes_counts = {}
with open('counts.txt', 'r') as file:
for line in file:
line = line.rstrip('\n').split('\t')
haplotypes_counts[line[0]] = float(line[1])
return haplotypes_counts
def get_editing_data(final_filt_bam, path_to_emiRbase):
e_miRbase_dict = pickle.load(open(path_to_emiRbase, 'rb'))
haplotypes_counts = get_haplotypes_counts()
error_probs_dict = get_error_probs(final_filt_bam=final_filt_bam, e_miRbase_dict=e_miRbase_dict)
unedited_sites_counts = {}
edited_sites_counts = {}
for haplotype, count in haplotypes_counts.items():
basename = haplotype[:haplotype.find('_')]
sequence = e_miRbase_dict[basename][haplotype]['sequence']
unedited_sites = [index for index, base in enumerate(sequence) if base == 'A']
if basename not in unedited_sites_counts.keys():
unedited_sites_counts[basename] = {}
for i in unedited_sites:
unedited_sites_counts[basename][i] = count
elif basename in unedited_sites_counts.keys():
for i in unedited_sites:
if i not in unedited_sites_counts[basename].keys():
unedited_sites_counts[basename][i] = count
elif i in unedited_sites_counts[basename].keys():
unedited_sites_counts[basename][i] += count
if haplotype.find('_e') != -1:
edited_sites = e_miRbase_dict[basename][haplotype]['editingSites']
if basename not in edited_sites_counts.keys():
edited_sites_counts[basename] = {}
for i in edited_sites:
edited_sites_counts[basename][i] = count
elif basename in edited_sites_counts.keys():
for i in edited_sites:
if i not in edited_sites_counts[basename].keys():
edited_sites_counts[basename][i] = count
elif i in unedited_sites_counts[basename].keys():
edited_sites_counts[basename][i] += count
return edited_sites_counts, unedited_sites_counts, haplotypes_counts, error_probs_dict
def monte_catlo_p_estimation(final_filt_bam, path_to_emiRbase, resamples):
edited_sites_counts, unedited_sites_counts, haplotypes_counts, error_probs_dict = get_editing_data(final_filt_bam=final_filt_bam,
path_to_emiRbase=path_to_emiRbase)
# editing_data_file = open('editing_info.txt', 'w')
# editing_data_file.write(
# 'miRNA' + '\t' + 'position' + '\t' + 'edited' + '\t' + 'unedited' + '\t' +
# 'editing_level' + '\t' + 'LCI' + '\t' 'UCI' + '\t' + 'p_value' + '\n')
columns = ['miRNA', 'position', 'edited', 'unedited', 'editing_level', 'LCI', 'UCI', 'p_value']
editing_data = | pd.DataFrame(columns=columns) | pandas.DataFrame |
from functools import partial, singledispatch
from types import MappingProxyType
from typing import Collection, Union
import joblib
import nltk
import pandas as pd
from ndg_tools._validation import _validate_strings
from ndg_tools.language.processors.tokens import fetch_stopwords, remove_stopwords
from ndg_tools.language.settings import DEFAULT_TOKENIZER
from ndg_tools.language.utils import chain_processors
from ndg_tools.typing import CallableOnStr, Documents, Tokenizer
from pandas.core.frame import DataFrame
from pandas.core.series import Series
from sklearn.utils import deprecated
from tqdm.notebook import tqdm
NGRAM_FINDERS = MappingProxyType(
{
2: nltk.BigramCollocationFinder,
3: nltk.TrigramCollocationFinder,
4: nltk.QuadgramCollocationFinder,
}
)
"""Mapping for selecting ngram-finder."""
NGRAM_METRICS = MappingProxyType(
{
2: nltk.BigramAssocMeasures,
3: nltk.TrigramAssocMeasures,
4: nltk.QuadgramAssocMeasures,
}
)
"""Mapping for selecting ngram scoring object."""
def categorical_ngrams(
data: DataFrame,
*,
text: str,
cat: Union[str, Series],
n: int = 2,
metric: str = "pmi",
tokenizer: Tokenizer = DEFAULT_TOKENIZER,
preprocessor: CallableOnStr = None,
stopwords: Union[str, Collection[str]] = None,
min_freq: int = 0,
select_best: float = None,
fuse_tuples: bool = False,
sep: str = " ",
n_jobs=None,
):
get_ngrams = partial(
scored_ngrams,
n=n,
metric=metric,
stopwords=stopwords,
preprocessor=preprocessor,
tokenizer=tokenizer,
min_freq=min_freq,
fuse_tuples=fuse_tuples,
sep=sep,
)
get_ngrams = joblib.delayed(get_ngrams)
workers = joblib.Parallel(n_jobs=n_jobs, prefer="processes")
# Get aligned labels and group frames, ignoring empty
labels, groups = zip(
*[(lab, grp) for lab, grp in data.groupby(cat) if not grp.empty]
)
# Search for ngrams with optional multiprocessing
cat_ngrams = workers(get_ngrams(grp.loc[:, text]) for grp in groups)
# Turn each scored ngram Series into a DataFrame
cat_ngrams = [
ng.reset_index().assign(**{cat: lab})
for lab, ng in zip(labels, cat_ngrams)
if not ng.empty
]
# Select top scores in each category
if select_best is not None:
for i, group in enumerate(cat_ngrams):
cut = group.score.quantile(1 - select_best)
cat_ngrams[i] = group.loc[group.score >= cut]
# Stack frames vertically and renumber
return | pd.concat(cat_ngrams) | pandas.concat |
import pathlib
import pandas as pd
from pathlib import Path
from my_module import compressor
from my_module import text_formatter as tf
import re
from sklearn.model_selection import train_test_split
from typing import List
def gen_model_resource(sentences: List[str], labels: List[str]):
x_train, x_test, y_train, y_test = train_test_split(
sentences, labels, test_size=0.1, stratify=labels, random_state=0)
pd.DataFrame({'y_train': y_train, 'x_train': x_train}
).to_csv('train_data.csv', index=False)
pd.DataFrame({'y_test': y_test, 'x_test': x_test}
).to_csv('test_data.csv', index=False)
def aggregate_by_rate(review_path: Path, freq: str = 'M') -> pd.DataFrame:
'''
任意の期間ごとに各評価のレビュー数を集計します
'''
# Read DataFrame.
df = pd.read_csv(review_path)
# Delete 'comments' colum.
df.drop(columns=['comments', 'votes'], inplace=True)
# Convert 'dates' colum into DateTime type
df['dates'] = | pd.to_datetime(df['dates']) | pandas.to_datetime |
import numpy as np
import pandas as pd
from ... import delayed
from .io import from_delayed, from_pandas
def read_sql_table(
table,
uri,
index_col,
divisions=None,
npartitions=None,
limits=None,
columns=None,
bytes_per_chunk=256 * 2 ** 20,
head_rows=5,
schema=None,
meta=None,
engine_kwargs=None,
**kwargs
):
"""
Create dataframe from an SQL table.
If neither divisions or npartitions is given, the memory footprint of the
first few rows will be determined, and partitions of size ~256MB will
be used.
Parameters
----------
table : string or sqlalchemy expression
Select columns from here.
uri : string
Full sqlalchemy URI for the database connection
index_col : string
Column which becomes the index, and defines the partitioning. Should
be a indexed column in the SQL server, and any orderable type. If the
type is number or time, then partition boundaries can be inferred from
npartitions or bytes_per_chunk; otherwide must supply explicit
``divisions=``.
``index_col`` could be a function to return a value, e.g.,
``sql.func.abs(sql.column('value')).label('abs(value)')``.
``index_col=sql.func.abs(sql.column("value")).label("abs(value)")``, or
``index_col=cast(sql.column("id"),types.BigInteger).label("id")`` to convert
the textfield ``id`` to ``BigInteger``.
Note ``sql``, ``cast``, ``types`` methods comes frome ``sqlalchemy`` module.
Labeling columns created by functions or arithmetic operations is
required.
divisions: sequence
Values of the index column to split the table by. If given, this will
override npartitions and bytes_per_chunk. The divisions are the value
boundaries of the index column used to define the partitions. For
example, ``divisions=list('acegikmoqsuwz')`` could be used to partition
a string column lexographically into 12 partitions, with the implicit
assumption that each partition contains similar numbers of records.
npartitions : int
Number of partitions, if divisions is not given. Will split the values
of the index column linearly between limits, if given, or the column
max/min. The index column must be numeric or time for this to work
limits: 2-tuple or None
Manually give upper and lower range of values for use with npartitions;
if None, first fetches max/min from the DB. Upper limit, if
given, is inclusive.
columns : list of strings or None
Which columns to select; if None, gets all; can include sqlalchemy
functions, e.g.,
``sql.func.abs(sql.column('value')).label('abs(value)')``.
Labeling columns created by functions or arithmetic operations is
recommended.
bytes_per_chunk : int
If both divisions and npartitions is None, this is the target size of
each partition, in bytes
head_rows : int
How many rows to load for inferring the data-types, unless passing meta
meta : empty DataFrame or None
If provided, do not attempt to infer dtypes, but use these, coercing
all chunks on load
schema : str or None
If using a table name, pass this to sqlalchemy to select which DB
schema to use within the URI connection
engine_kwargs : dict or None
Specific db engine parameters for sqlalchemy
kwargs : dict
Additional parameters to pass to `pd.read_sql()`
Returns
-------
dask.dataframe
Examples
--------
>>> df = dd.read_sql_table('accounts', 'sqlite:///path/to/bank.db',
... npartitions=10, index_col='id') # doctest: +SKIP
"""
import sqlalchemy as sa
from sqlalchemy import sql
from sqlalchemy.sql import elements
if index_col is None:
raise ValueError("Must specify index column to partition on")
engine_kwargs = {} if engine_kwargs is None else engine_kwargs
engine = sa.create_engine(uri, **engine_kwargs)
m = sa.MetaData()
if isinstance(table, str):
table = sa.Table(table, m, autoload=True, autoload_with=engine, schema=schema)
index = table.columns[index_col] if isinstance(index_col, str) else index_col
if not isinstance(index_col, (str, elements.Label)):
raise ValueError(
"Use label when passing an SQLAlchemy instance as the index (%s)" % index
)
if divisions and npartitions:
raise TypeError("Must supply either divisions or npartitions, not both")
columns = (
[(table.columns[c] if isinstance(c, str) else c) for c in columns]
if columns
else list(table.columns)
)
if index_col not in columns:
columns.append(
table.columns[index_col] if isinstance(index_col, str) else index_col
)
if isinstance(index_col, str):
kwargs["index_col"] = index_col
else:
# function names get pandas auto-named
kwargs["index_col"] = index_col.name
if meta is None:
# derive metadata from first few rows
q = sql.select(columns).limit(head_rows).select_from(table)
head = | pd.read_sql(q, engine, **kwargs) | pandas.read_sql |
# ===============================
# AUTHOR: Dr <NAME>
# CONTACT DETAILS: <EMAIL>
# CREATE DATE: 7 May 2021
# PURPOSE: Model the subline frequency setting problem
# SPECIAL NOTES: -
# ===============================
# Change History: v1
# ==================================
# import solver
import gurobipy as gp
from gurobipy import GRB
import pandas as pd
import numpy as np
model = gp.Model()
#Set the values of sets.
R = (1,2,3,4,5,6,7,8,9,10,11) #list of lines
S = (1,2,3,4,5,6,7,8,9,10,11,12,13,14) #list of stops
#OD-pairs with passenger demand
O=[]
for j in range(1,7):
for i in range(1,8):
if j<i:
O.append((j,i))
for j in range(8,14):
for i in range(8,15):
if j<i:
O.append((j,i))
O=tuple(O)
Iset = np.arange(1,101)
Iset = tuple(Iset)
D={(i, j): 1 for i in R for j in O}
for r in [2,3,4,5,6]:
if r>1:
for j in O:
if (j[0]>8-r and j[0]<7+r) or (j[1]>8-r and j[1]<7+r):
D[r,j]=0
for r in [7,8,9,10,11]:
for j in O:
if (j[0]>=21-r or j[0]<=r-6) or (j[1]>=21-r or j[1]<=-6+r):
D[r,j]=0
F_r = (0,1,2,3,4,5,6,8,10,12,15,20,30,60) #permitted frequencies
W1=3
W2=1.5
Tr={1:0.3, 2:0.237, 3:0.189, 4:0.142, 5:0.11, 6:0.063, 7:0.237, 8:0.189, 9:0.1577, 10:0.11, 11:0.063} #round-trip travel time of every trip in r\in R expressed in hours;
Theta=2 #maximum allowed waiting time to ensure a minimum level of service for any passenger;
K=2 #minimum number of minibusses that should be assigned to the original line;
N=36 #number of available minibuses;
M=100000000
F=0,1,2,3,4,5,6,8,10,12,15,20,30,60 #set of frequencies;
f={0:0,1:1,2:2,3:3,4:4,5:5,6:6,8:8,10:10,12:12,15:15,20:20,30:30,60:60}
F_min=1.0 #minimum required frequency of a sub-line to be allowed to be operational in the time period T;
Tperiod=6 #time period of the planning phase;
c=8 #minibus capacity;
retrieve_data = | pd.read_excel('Data_input/data_input_14stops.xlsx', sheet_name='sbt_fr', index_col=0) | pandas.read_excel |
import scipy as sp
from statsmodels.stats.anova import AnovaRM
import itertools
import pandas as pd
import numpy as np
def AnovaRM_with_post_hoc(data, dep_var, subject, within, only_significant = False):
# One within
anova = AnovaRM(data, dep_var, subject, within)
print(anova.fit())
# Post-hoc with ttest
pairwise_ttest_rel(data,
dep_var,
within = within,
only_significant = only_significant
)
def pairwise_ttest_rel(data, dep_var, within, only_significant = False, only_first_within_comprisons = True):
# ttest related measures - One indep_var
if len(within) == 1:
conditions = data[within[0]].unique()
list_of_ttests = list(itertools.combinations(conditions, 2))
elif len(within) == 2:
list1 = data[within[0]].unique()
list2 = data[within[1]].unique()
list_product = list(itertools.product(list1,list2))
list_of_ttests = list(itertools.combinations(list_product, 2))
print(" Post Hoc inter {}\n==========================================================================".format(' and '.join(within)))
print("{:<48}{:>12} {:>12}".format('Test', 'p-value', 't-value'))
indep_var = within[0]
for combination_of_conditions in list_of_ttests:
if len(within) == 1:
query1 = indep_var + "==" + "'" + combination_of_conditions[0] + "'"
query2 = indep_var + "==" + "'" + combination_of_conditions[1] + "'"
at_least_one_same_cond = True
elif len(within) == 2:
at_least_one_same_cond = (combination_of_conditions[0][0] == combination_of_conditions[1][0]) or (combination_of_conditions[0][1] == combination_of_conditions[1][1])
other_indep_var = within[1]
query1 = indep_var + "==" "'" + combination_of_conditions[0][0] + "' & " + other_indep_var + "==" "'" + combination_of_conditions[0][1] + "'"
query2 = indep_var + "==" "'" + combination_of_conditions[1][0] + "' & " + other_indep_var + "==" "'" + combination_of_conditions[1][1] + "'"
if at_least_one_same_cond and only_first_within_comprisons:
ttest = sp.stats.ttest_rel(data.query(query1)[dep_var],
data.query(query2)[dep_var])
if len(within) == 1:
sep = ''
elif len(within) == 2:
sep = ' '
if ttest.pvalue <= 0.05:
print("\033[91m{:>22} VS {:<22}{:>12.3f}{:>12.3f}\033[0m".format(sep.join(combination_of_conditions[0]), sep.join(combination_of_conditions[1]),
ttest.pvalue,ttest.statistic))
elif not only_significant:
print("{:>22} VS {:<22}{:>12.3f}{:>12.3f}".format(sep.join(combination_of_conditions[0]), sep.join(combination_of_conditions[1]),
ttest.pvalue,ttest.statistic))
print("==========================================================================\n\n")
def remove_outliers(df, columns = ['all'], zscore = 3):
new_df = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/python
"""functions to create the figures for publication
"""
import seaborn as sns
import math
import pyrtools as pt
import neuropythy as ny
import os.path as op
import warnings
import torch
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from mpl_toolkits.axes_grid1.anchored_artists import AnchoredSizeBar
import pandas as pd
import re
import itertools
from sklearn import linear_model
from . import summary_plots
from . import analyze_model
from . import plotting
from . import model
from . import utils
from . import first_level_analysis
from . import style
def create_precision_df(paths, summary_func=np.mean,
df_filter_string='drop_voxels_with_mean_negative_amplitudes,drop_voxels_near_border'):
"""Create dataframe summarizing subjects' precision
When combining parameter estimates into an 'overall' value, we want
to use the precision of each subject's data. To do that, we take the
first level summary dfs (using regex to extract the subject,
session, and task from the path) and call `summary_func` on the
`precision` column. This gives us a single number, which we'll use
when performing the precision-weighted mean
df_filter_string can be used to filter the voxels we examine, so
that we look only at those voxels that the model was fit to
Parameters
----------
paths : list
list of strings giving the paths to the first level summary
dfs.
summary_func : callable, optional
function we use to summarize the precision. Must take an array
as its first input, not require any other inputs, and return a
single value
df_filter_string : str or None, optional
a str specifying how to filter the voxels in the dataset. see
the docstrings for sfp.model.FirstLevelDataset and
sfp.model.construct_df_filter for more details. If None, we
won't filter. Should probably use the default, which is what all
models are trained using.
Returns
-------
df : pd.DataFrame
dataframe containing one row per (subject, session) pair, giving
the precision for that scanning session. used to weight
bootstraps
"""
regex_names = ['subject', 'session', 'task']
regexes = [r'(sub-[a-z0-9]+)', r'(ses-[a-z0-9]+)', r'(task-[a-z0-9]+)']
df = []
for p in paths:
tmp = pd.read_csv(p)
if df_filter_string is not None:
df_filter = model.construct_df_filter(df_filter_string)
tmp = df_filter(tmp).reset_index()
val = summary_func(tmp.precision.values)
if hasattr(val, '__len__') and len(val) > 1:
raise Exception(f"summary_func {summary_func} returned more than one value!")
data = {'precision': val}
for n, regex in zip(regex_names, regexes):
res = re.findall(regex, p)
if len(set(res)) != 1:
raise Exception(f"Unable to infer {n} from path {p}!")
data[n] = res[0]
df.append(pd.DataFrame(data, [0]))
return pd.concat(df).reset_index(drop=True)
def existing_studies_df():
"""create df summarizing earlier studies
there have been a handful of studies looking into this, so we want
to summarize them for ease of reference. Each study is measuring
preferred spatial frequency at multiple eccentricities in V1 using
fMRI (though how exactly they determine the preferred SF and the
stimuli they use vary)
This dataframe contains the following columns:
- Paper: the reference for this line
- Eccentricity: the eccentricity (in degrees) that they measured
preferred spatial frequency at
- Preferred spatial frequency (cpd): the preferred spatial frequency
measured at this eccentricity (in cycles per degree)
- Preferred period (deg): the preferred period measured at this
eccentricity (in degrees per cycle); this is just the inverse of
the preferred spatial frequency
The eccentricity / preferred spatial frequency were often not
reported in a manner that allowed for easy extraction of the data,
so the values should all be taken as approximate, as they involve me
attempting to read values off of figures / colormaps.
Papers included (and their reference in the df):
- Sasaki (2001): <NAME>., <NAME>., <NAME>., <NAME>.,
<NAME>., <NAME>., & <NAME>. (2001). Local and global
attention are mapped retinotopically in human occipital
cortex. Proceedings of the National Academy of Sciences, 98(4),
2077–2082.
- Henriksson (2008): <NAME>., <NAME>., Hyv\"arinen,
Aapo, & <NAME>. (2008). Spatial frequency tuning in human
retinotopic visual areas. Journal of Vision, 8(10),
5. http://dx.doi.org/10.1167/8.10.5
- Kay (2011): <NAME>. (2011). Understanding Visual Representation
By Developing Receptive-Field Models. Visual Population Codes:
Towards a Common Multivariate Framework for Cell Recording and
Functional Imaging, (), 133–162.
- Hess (dominant eye, 2009): <NAME>., <NAME>., <NAME>.,
<NAME>., & <NAME>. (2009). Selectivity as well as
sensitivity loss characterizes the cortical spatial frequency
deficit in amblyopia. Human Brain Mapping, 30(12),
4054–4069. http://dx.doi.org/10.1002/hbm.20829 (this paper reports
spatial frequency separately for dominant and non-dominant eyes in
amblyopes, only the dominant eye is reported here)
- D'Souza (2016): <NAME>., <NAME>., <NAME>., Strasburger,
H., & <NAME>. (2016). Dependence of chromatic responses in v1
on visual field eccentricity and spatial frequency: an fmri
study. JOSA A, 33(3), 53–64.
- Farivar (2017): <NAME>., <NAME>., <NAME>.,
<NAME>., & <NAME>. (2017). Non-uniform phase sensitivity
in spatial frequency maps of the human visual cortex. The Journal
of Physiology, 595(4),
1351–1363. http://dx.doi.org/10.1113/jp273206
- Olsson (pilot, model fit): line comes from a model created by <NAME> in the Winawer lab, fit to pilot data collected by
<NAME> (so note that this is not data). Never ended up
in a paper, but did show in a presentation at VSS 2017: <NAME>,
<NAME>, <NAME>, <NAME> (2017) An anatomically-defined
template of BOLD response in
V1-V3. J. Vis. 17(10):585. DOI:10.1167/17.10.585
Returns
-------
df : pd.DataFrame
Dataframe containing the optimum spatial frequency at multiple
eccentricities from the different papers
"""
data_dict = {
'Paper': ['Sasaki (2001)',]*7,
'Preferred spatial frequency (cpd)': [1.25, .9, .75, .7, .6, .5, .4],
'Eccentricity': [0, 1, 2, 3, 4, 5, 12]
}
data_dict['Paper'].extend(['Henriksson (2008)', ]*5)
data_dict['Preferred spatial frequency (cpd)'].extend([1.2, .68, .46, .40, .18])
data_dict['Eccentricity'].extend([1.7, 4.7, 6.3, 9, 19])
# This is only a single point, so we don't plot it
# data_dict['Paper'].extend(['Kay (2008)'])
# data_dict['Preferred spatial frequency (cpd)'].extend([4.5])
# data_dict['Eccentricity'].extend([ 2.9])
data_dict['Paper'].extend(['Kay (2011)']*5)
data_dict['Preferred spatial frequency (cpd)'].extend([4, 3, 10, 10, 2])
data_dict['Eccentricity'].extend([2.5, 4, .5, 1.5, 7])
data_dict['Paper'].extend(["Hess (dominant eye, 2009)"]*3)
data_dict['Preferred spatial frequency (cpd)'].extend([2.25, 1.9, 1.75])
data_dict['Eccentricity'].extend([2.5, 5, 10])
data_dict['Paper'].extend(["D'Souza (2016)"]*3)
data_dict['Preferred spatial frequency (cpd)'].extend([2, .95, .4])
data_dict['Eccentricity'].extend([1.4, 4.6, 9.8])
data_dict['Paper'].extend(['Farivar (2017)']*2)
data_dict['Preferred spatial frequency (cpd)'].extend([3, 1.5,])
data_dict['Eccentricity'].extend([.5, 3])
# model fit and never published, so don't include.
# data_dict['Paper'].extend(['Olsson (pilot, model fit)']*10)
# data_dict['Preferred spatial frequency (cpd)'].extend([2.11, 1.76, 1.47, 2.75, 1.24, 1.06, .88, .77, .66, .60])
# data_dict['Eccentricity'].extend([2, 3, 4, 1, 5, 6, 7, 8, 9, 10])
# these values gotten using web plot digitizer and then rounded to 2
# decimal points
data_dict["Paper"].extend(['Aghajari (2020)']*9)
data_dict['Preferred spatial frequency (cpd)'].extend([2.24, 1.62, 1.26,
1.09, 0.88, 0.75,
0.78, 0.75, 0.70])
data_dict['Eccentricity'].extend([0.68, 1.78, 2.84, 3.90, 5.00, 6.06, 7.16,
8.22, 9.28])
# Predictions of the scaling hypothesis -- currently unused
# ecc = np.linspace(.01, 20, 50)
# fovea_cutoff = 0
# # two possibilities here
# V1_RF_size = np.concatenate([np.ones(len(ecc[ecc<fovea_cutoff])),
# np.linspace(1, 2.5, len(ecc[ecc>=fovea_cutoff]))])
# V1_RF_size = .2 * ecc
df = pd.DataFrame(data_dict)
df = df.sort_values(['Paper', 'Eccentricity'])
df["Preferred period (deg)"] = 1. / df['Preferred spatial frequency (cpd)']
return df
def _demean_df(df, y='cv_loss', extra_cols=[]):
"""demean a column of the dataframe
Calculate the mean of `y` across the values in the 'subject' and
'loss_func' columns, then demean `y` and return df with several new
columns:
- `demeaned_{y}`: each y with `{y}_mean` subtracted off
- `{y}_mean`: the average of y per subject per loss_func
- `{y}_mean_overall`: the average of `{y}_mean` per loss_func
- `remeaned_{y}`: the `demeaned_{y}` with `{y}_mean_overall` added
back to it
If you use this with the defaults, the overall goal of this is to
enable us to look at how the cv_loss varies across models, because
the biggest effect is the difference in cv_loss across
subjects. Demeaning the cv_loss on a subject-by-subject basis
enables us to put all the subjects together so we can look for
patterns across models. For example, we can then compute error bars
that only capture the variation across models, but not across
subjects. Both remeaned or demeaned will capture this, the question
is what values to have on the y-axis. If you use demeaned, you'll
have negative loss, which might be confusing. If you use remeaned,
the y-axis values will be the average across subjects, which might
be easier to interpret.
Parameters
----------
df : pd.DataFrame
dataframe to demean
y : str, optional
the column to demean
extra_cols : list, optionla
list of columns to de/remean using the mean from `y`. for
example, you might want to de/remean the noise_ceiling using the
mean from the cross-validation loss
Returns
-------
df : pd.DataFrame
dataframe with new, demeaned column
"""
gb_cols = ['subject', 'loss_func']
df = df.set_index(gb_cols)
y_mean = df.groupby(gb_cols)[y].mean()
df[f'{y}_mean'] = y_mean
# here we take the average over the averages. we do this so that we weight
# all of the groups the same. For example, if gb_cols=['subject'] and one
# subject had twice as many rows (because it had two sessions in df, for
# example), then this ensures that subject isn't twice as important when
# computing the mean (which would be the case if we used
# df[f'{y}_mean'].mean() instead). We do, however, want to do this
# separately for each loss function, since they'll probably have different
# means
df = df.reset_index()
df = df.set_index('loss_func')
df[f'{y}_mean_overall'] = y_mean.reset_index().groupby('loss_func')[y].mean()
df[f'demeaned_{y}'] = df[y] - df[f'{y}_mean']
df[f'remeaned_{y}'] = df[f'demeaned_{y}'] + df[f'{y}_mean_overall']
for col in extra_cols:
df[f'demeaned_{col}'] = df[col] - df[f'{y}_mean']
df[f'remeaned_{col}'] = df[f'demeaned_{col}'] + df[f'{y}_mean_overall']
return df.reset_index()
def prep_df(df, task, groupaverage=False):
"""prepare the dataframe by restricting to the appropriate subset
The dataframe created by earlier analysis steps contains all
scanning sessions and potentially multiple visual areas. for our
figures, we just want to grab the relevant scanning sessions and
visual areas (V1), so this function helps do that. If df has the
'frequency_type' column (i.e., it's summarizing the 1d tuning
curves), we also restrict to the "local_sf_magnitude" rows (rather
than "frequency_space")
Parameters
----------
df : pd.DataFrame
dataframe that will be used for plotting figures. contains some
summary of (either 1d or 2d) model information across sessions.
task : {'task-sfrescaled', 'task-sfpconstant'}
this determines which task we'll grab: task-sfprescaled or
task-sfpconstant. task-sfp is also exists, but we consider that
a pilot task and so do not allow it for the creation of figures
(the stimuli were not contrast-rescaled).
groupaverage : bool, optional
whether to grab only the groupaverage subjects (if True) or
every other subject (if False). Note that we'll grab/drop both
i-linear and i-nearest if they're both present
Returns
-------
df : pd.DataFrame
The restricted dataframe.
"""
if task not in ['task-sfprescaled', 'task-sfpconstant']:
raise Exception("Only task-sfprescaled and task-sfpconstant are allowed!")
df = df.query("task==@task")
if 'frequency_type' in df.columns:
df = df.query("frequency_type=='local_sf_magnitude'")
if 'varea' in df.columns:
df = df.query("varea==1")
if 'fit_model_type' in df.columns:
df.fit_model_type = df.fit_model_type.map(dict(zip(plotting.MODEL_ORDER,
plotting.MODEL_PLOT_ORDER)))
if 'subject' in df.columns:
df.subject = df.subject.map(dict(zip(plotting.SUBJECT_ORDER,
plotting.SUBJECT_PLOT_ORDER)))
return df
def prep_model_df(df):
"""prepare models df for plotting
For plotting purposes, we want to rename the model parameters from
their original values (e.g., sf_ecc_slope, abs_mode_cardinals) to
those we use in the equation (e.g., a, p_1). We do that by simply
remapping the names from those given at plotting.ORIG_PARAM_ORDER to
those in plotting.PLOT_PARAM_ORDER. we additionally add a new
column, param_category, which we use to separate out the three types
of parameters: sigma, the effect of eccentricity, and the effect of
orientation / retinal angle.
Parameters
----------
df : pd.DataFrame
models dataframe, that is, the dataframe that summarizes the
parameter values for a variety of models
Returns
-------
df : pd.DataFrame
The remapped dataframe.
"""
rename_params = dict((k, v) for k, v in zip(plotting.ORIG_PARAM_ORDER,
plotting.PLOT_PARAM_ORDER))
df = df.set_index('model_parameter')
df.loc['sigma', 'param_category'] = 'sigma'
df.loc[['sf_ecc_slope', 'sf_ecc_intercept'], 'param_category'] = 'eccen'
df.loc[['abs_mode_cardinals', 'abs_mode_obliques', 'rel_mode_cardinals', 'rel_mode_obliques',
'abs_amplitude_cardinals', 'abs_amplitude_obliques', 'rel_amplitude_cardinals',
'rel_amplitude_obliques'], 'param_category'] = 'orientation'
df = df.reset_index()
df['model_parameter'] = df.model_parameter.map(rename_params)
return df
def append_precision_col(df, col='preferred_period',
gb_cols=['subject', 'session', 'varea', 'stimulus_superclass', 'eccen']):
"""append column giving precision of another column and collapse
this function gives the precision of the value found in a single
column (across the columns that are NOT grouped-by) and collapses
across those columns. The intended use case is to determine the
precision of a parameter estimate across bootstraps for each
(subject, session) (for the 2d model) or for each (subject, session,
stimulus_superclass, eccen) (for the 1d model).
precision is the inverse of the variance, so let :math:`c` be the
68% confidence interval of the column value, then precision is
:math:`\frac{1}{(c/2)^2}`
finally, we collapse across gb_cols, returning the median and
precision of col for each combination of values from those columns.
Parameters
----------
df : pd.DataFrame
the df that contains the values we want the precision for
col : str, optional
the name of the column that contains the values we want the
precision for
gb_cols : list, optional
list of strs containing the columns we want to groupby. we will
compute the precision separately for each combination of values
here.
Returns
-------
df : pd.DataFrame
the modified df, containing the median and precision of col
(also contains the medians of the other values in the original
df, but not their precision)
"""
gb = df.groupby(gb_cols)
df = df.set_index(gb_cols)
df[f'{col}_precision'] = gb[col].apply(first_level_analysis._precision_dist)
df = df.reset_index()
return df.groupby(gb_cols).median().reset_index()
def precision_weighted_bootstrap(df, seed, n_bootstraps=100, col='preferred_period',
gb_cols=['varea', 'stimulus_superclass', 'eccen'],
precision_col='preferred_period_precision'):
"""calculate the precision-weighted bootstrap of a column
to combine across subjects, we want to use a precision-weighted
average, rather than a regular average, because we are trying to
summarize the true value across the population and our uncertainty
in it. Therefore, we down-weight subjects whose estimate is
noisier. Similar to append_precision_col(), we groupby over some of
the columns to combine info across them (gb_cols here should be a
subset of those used for append_precision_col())
You should plot the values here with scatter_ci_dist() or something
similar to draw the 68% CI of the distribution here (not sample it
to draw the CI)
Parameters
----------
df : pd.DataFrame
the df that we want to bootstrap (must already have precision
column, i.e., this should be the df returned by
append_precision_col())
seed : int
seed for numpy's RNG
n_bootstraps : int, optional
the number of independent bootstraps to draw
col : str, optional
the name of the column that contains the values we want to draw
bootstraps for
gb_cols : list, optional
list of strs containing the columns we want to groupby. we will
compute the bootstraps for each combination of values here.
precision_col : str, optional
name of the column that contains the precision, used in the
precision-weighted mean
Returns
-------
df : pd.DataFrame
the df containing the bootstraps of precision-weighted
mean. this will only contain the following columns: col,
*gb_cols, and bootstrap_num
"""
np.random.seed(seed)
if type(gb_cols) != list:
raise Exception("gb_cols must be a list!")
bootstraps = []
for n, g in df.groupby(gb_cols):
# n needs to be a list of the same length as gb_cols for the
# dict(zip()) call to work, but if len(gb_cols) == 1, then it
# will be a single str (or int or float or whatever), so we
# convert it to a list real quick
if len(gb_cols) == 1:
n = [n]
tmp = dict(zip(gb_cols, n))
for j in range(n_bootstraps):
t = g.sample(len(g), replace=True)
tmp[col] = np.average(t[col], weights=t[precision_col])
tmp['bootstrap_num'] = j
bootstraps.append(pd.DataFrame(tmp, [0]))
bootstraps = pd.concat(bootstraps).reset_index(drop=True)
if 'subject' in df.columns and 'subject' not in gb_cols:
bootstraps['subject'] = 'all'
return bootstraps
def _summarize_1d(df, reference_frame, y, row, col, height, facetgrid_legend,
**kwargs):
"""helper function for pref_period_1d and bandwidth_1d
since they're very similar functions.
"eccen" is always plotted on the x-axis, and hue is always
"stimulus_type" (unless overwritten with kwargs)
Parameters
----------
df : pd.DataFrame
pandas DataFrame summarizing all the 1d tuning curves, as
created by the summarize_tuning_curves.py script. If you want
confidence intervals, this should be the "full" version of that
df (i.e., including the fits to each bootstrap).
y : str
which column of the df to plot on the y-axis
reference_frame : {'relative', 'absolute'}
whether the data contained here is in the relative or absolute
reference frame. this will determine both the palette used and
the hue_order
row : str
which column of the df to facet the plot's rows on
col : str
which column of the df to facet the plot's column on
height : float
height of each plot facet
kwargs :
all passed to summary_plots.main() (most of these then get
passed to sns.FacetGrid, see the docstring of summary_plots.main
for more info)
Returns
-------
g : sns.FacetGrid
seaborn FacetGrid object containing the plot
"""
pal = plotting.stimulus_type_palette(reference_frame)
hue_order = plotting.get_order('stimulus_type', reference_frame)
col_order, row_order = None, None
if col is not None:
col_order = plotting.get_order(col, col_unique=df[col].unique())
if row is not None:
row_order = plotting.get_order(row, col_unique=df[row].unique())
kwargs.setdefault('xlim', (0, 12))
g = summary_plots.main(df, row=row, col=col, y=y, eccen_range=(0, 11),
hue_order=hue_order, height=height,
plot_func=[plotting.plot_median_fit, plotting.plot_median_fit,
plotting.scatter_ci_dist],
# these three end up being kwargs passed to the
# functions above, in order
x_jitter=[None, None, .2],
x_vals=[(0, 10.5), None, None],
linestyle=['--', None, None],
palette=pal, col_order=col_order,
row_order=row_order,
facetgrid_legend=facetgrid_legend, **kwargs)
g.set_xlabels('Eccentricity (deg)')
if facetgrid_legend:
g._legend.set_title("Stimulus class")
return g
def pref_period_1d(df, context='paper', reference_frame='relative',
row='session', col='subject', col_wrap=None, **kwargs):
"""Plot the preferred period of the 1d model fits.
Note that we do not restrict the input dataframe in any way, so we
will plot all data contained within it. If this is not what you want
(e.g., you only want to plot some of the tasks), you'll need to do
the restrictions yourself before passing df to this function
The only difference between this and the bandwidth_1d function is
what we plot on the y-axis, and how we label it.
Parameters
----------
df : pd.DataFrame
pandas DataFrame summarizing all the 1d tuning curves, as
created by the summarize_tuning_curves.py script. If you want
confidence intervals, this should be the "full" version of that
df (i.e., including the fits to each bootstrap).
context : {'paper', 'poster'}, optional
plotting context that's being used for this figure (as in
seaborn's set_context function). if poster, will scale things up
reference_frame : {'relative', 'absolute'}, optional
whether the data contained here is in the relative or absolute
reference frame. this will determine both the palette used and
the hue_order
row : str, optional
which column of the df to facet the plot's rows on
col : str, optional
which column of the df to facet the plot's column on
kwargs :
passed to sfp.figures._summarize_1d
Returns
-------
g : sns.FacetGrid
seaborn FacetGrid object containing the plot
"""
# if we're wrapping columns, then we need this to take up the full width in
# order for it to be readable
if col_wrap is not None:
fig_width = 'full'
else:
fig_width = 'half'
params, fig_width = style.plotting_style(context, figsize=fig_width)
if col_wrap is not None:
fig_width /= col_wrap
# there is, as of seaborn 0.11.0, a bug that interacts with our xtick
# label size and height (see
# https://github.com/mwaskom/seaborn/issues/2293), which causes an
# issue if col_wrap == 3. this manual setting is about the same size
# and fixes it
if col_wrap == 3:
fig_width = 2.23
elif col is not None:
fig_width /= df[col].nunique()
plt.style.use(params)
if context == 'paper':
facetgrid_legend = False
kwargs.setdefault('xlim', (0, 11.55))
kwargs.setdefault('ylim', (0, 2.1))
else:
kwargs.setdefault('ylim', (0, 4))
facetgrid_legend = True
g = _summarize_1d(df, reference_frame, 'preferred_period', row, col,
fig_width, facetgrid_legend, col_wrap=col_wrap, **kwargs)
g.set_ylabels('Preferred period (deg)')
yticks = [i for i in range(4) if i <= kwargs['ylim'][1]]
g.set(yticks=yticks)
if context != 'paper':
g.fig.suptitle("Preferred period of 1d tuning curves in each eccentricity band")
g.fig.subplots_adjust(top=.85)
else:
if len(g.axes) == 1:
# remove title if there's only one plot (otherwise it tells us which
# subject is which)
g.axes.flatten()[0].set_title('')
for ax in g.axes.flatten():
ax.axhline(color='gray', linestyle='--')
ax.axvline(color='gray', linestyle='--')
ax.set(xticks=[0, 2, 4, 6, 8, 10])
g.fig.subplots_adjust(wspace=.05, hspace=.15)
return g
def bandwidth_1d(df, context='paper', reference_frame='relative',
row='session', col='subject', units='octaves', **kwargs):
"""plot the bandwidth of the 1d model fits
Note that we do not restrict the input dataframe in any way, so we
will plot all data contained within it. If this is not what you want
(e.g., you only want to plot some of the tasks), you'll need to do
the restrictions yourself before passing df to this function
The only difference between this and the pref_period_1d function is
what we plot on the y-axis, and how we label it.
Parameters
----------
df : pd.DataFrame
pandas DataFrame summarizing all the 1d tuning curves, as
created by the summarize_tuning_curves.py script. If you want
confidence intervals, this should be the "full" version of that
df (i.e., including the fits to each bootstrap).
units : {'octaves', 'degrees}, optional
Whether to plot this data in octaves (in which case we expect it to be
flat with eccentricity) or degrees (in which case we expect it to scale
with eccentricity)
context : {'paper', 'poster'}, optional
plotting context that's being used for this figure (as in
seaborn's set_context function). if poster, will scale things up
reference_frame : {'relative', 'absolute'}, optional
whether the data contained here is in the relative or absolute
reference frame. this will determine both the palette used and
the hue_order
row : str, optional
which column of the df to facet the plot's rows on
col : str, optional
which column of the df to facet the plot's column on
kwargs :
passed to sfp.figures._summarize_1d
Returns
-------
g : sns.FacetGrid
seaborn FacetGrid object containing the plot
"""
params, fig_width = style.plotting_style(context, figsize='half')
plt.style.use(params)
if context == 'paper':
facetgrid_legend = False
kwargs.setdefault('xlim', (0, 11.55))
else:
facetgrid_legend = True
if units == 'degrees':
if 'tuning_curve_bandwidth_degrees' not in df.columns:
df['tuning_curve_bandwidth_degrees'] = df.apply(utils._octave_to_degrees, 1)
y = 'tuning_curve_bandwidth_degrees'
elif units == 'octaves':
y = 'tuning_curve_bandwidth'
kwargs.setdefault('ylim', (0, 8))
g = _summarize_1d(df, reference_frame, y, row, col,
fig_width, facetgrid_legend, **kwargs)
g.set_ylabels(f'Tuning curve FWHM ({units})')
if context != 'paper':
g.fig.suptitle("Full-Width Half-Max of 1d tuning curves in each eccentricity band")
g.fig.subplots_adjust(top=.85)
elif len(g.axes) == 1:
# remove title if there's only one plot (otherwise it tells us which
# subject is which)
g.axes.flatten()[0].set_title('')
return g
def existing_studies_figure(df, y="Preferred period (deg)", legend=True, context='paper'):
"""Plot the results from existing studies
See the docstring for figures.existing_studies_df() for more
details on the information displayed in this figure.
Parameters
----------
df : pd.DataFrame
The existing studies df, as returned by the function
figures.existing_studies_df().
y : {'Preferred period (deg)', 'Preferred spatial frequency (cpd)'}
Whether to plot the preferred period or preferred spatial
frequency on the y-axis. If preferred period, the y-axis is
linear; if preferred SF, the y-axis is log-scaled (base 2). The
ylims will also differ between these two
legend : bool, optional
Whether to add a legend or not
context : {'paper', 'poster'}, optional
plotting context that's being used for this figure (as in
seaborn's set_context function). if poster, will scale things up
Returns
-------
g : sns.FacetGrid
The FacetGrid containing the plot
"""
params, fig_width = style.plotting_style(context, figsize='half')
plt.style.use(params)
fig_height = fig_width / 1.2
pal = sns.color_palette('Set2', df.Paper.nunique())
pal = dict(zip(df.Paper.unique(), pal))
if 'Current study' in df.Paper.unique():
pal['Current study'] = (0, 0, 0)
g = sns.FacetGrid(df, hue='Paper', height=fig_height, aspect=1.2, palette=pal)
if y == "Preferred period (deg)":
g.map(plt.plot, 'Eccentricity', y, marker='o')
g.ax.set_ylim((0, 6))
elif y == "Preferred spatial frequency (cpd)":
g.map(plt.semilogy, 'Eccentricity', y, marker='o', basey=2)
g.ax.set_ylim((0, 11))
g.ax.yaxis.set_major_formatter(mpl.ticker.FuncFormatter(plotting.myLogFormat))
g.ax.set_xlim((0, 20))
if context == 'poster':
g.ax.set(xticks=[0, 5, 10, 15, 20])
g.ax.set_title("Summary of human V1 fMRI results")
if legend:
g.add_legend()
# facetgrid doesn't let us set the title fontsize directly, so need to do
# this hacky work-around
g.fig.legends[0].get_title().set_size(mpl.rcParams['legend.title_fontsize'])
g.ax.set_xlabel('Eccentricity of receptive field center (deg)')
return g
def input_schematic(context='paper', prf_loc=(250, 250), prf_radius=100,
stim_freq=(.01, .03)):
"""Schematic to explain 2d model inputs.
This schematic explains the various inputs of our 2d model:
eccentricity, retinotopic angle, spatial frequency, and
orientation. It does this with a little diagram of a pRF with a
local stimulus, with arrows and labels.
The location and size of the pRF, as well as the frequency of the
stimulus, are all modifiable, and the labels and arrows will update
themselves. The arrows should behave appropriately, but it's hard to
guarantee that the labels will always look good (their positioning
is relative, so it will at least be close). You are restricted to
placing the pRF inside the first quadrant, which helps make the
possibilities more reasonable.
Parameters
----------
context : {'paper', 'poster'}, optional
plotting context that's being used for this figure (as in
seaborn's set_context function). if poster, will scale things up
prf_loc : tuple, optional
2-tuple of floats, location of the prf. Both numbers must lie
between 0 and 500 (i.e., we require this to be in the first
quadrant). Max value on both x and y axes is 500.
prf_radius : float, optional
radius of the prf, in pixels. the local stimulus will have half
this radius
stim_freq : tuple, optional
2-tuple of floats, the (x_freq, y_freq) of the stimulus, in
cycles per pixel
Returns
-------
fig : plt.Figure
Figure containing the schematic
"""
params, fig_width = style.plotting_style(context, figsize='half')
plt.style.use(params)
figsize = (fig_width, fig_width)
fig, ax = plt.subplots(1, 1, figsize=figsize)
def get_xy(distance, angle, origin=(500, 500)):
return [o + distance * func(angle) for o, func in
zip(origin, [np.cos, np.sin])]
pal = sns.color_palette('deep', 2)
if (np.array(prf_loc) > 500).any() or (np.array(prf_loc) < 0).any():
raise Exception("the coordinates of prf_loc must be between 0 and 500, but got "
f"value {prf_loc}!")
# prf_loc is in coordinates relative to the center, so we convert that here
abs_prf_loc = [500 + i for i in prf_loc]
mask = utils.create_circle_mask(*abs_prf_loc, prf_radius/2, 1001)
mask[mask==0] = np.nan
stim = mask * utils.create_sin_cpp(1001, *stim_freq)
plotting.im_plot(stim, ax=ax, origin='lower')
ax.axhline(500, c='.5')
ax.axvline(500, c='.5')
ax.set(xlim=(450, 1001), ylim=(450, 1001))
for s in ax.spines.keys():
ax.spines[s].set_visible(False)
prf = mpl.patches.Circle(abs_prf_loc, prf_radius, fc='none', ec='k', linewidth=2,
linestyle='--', zorder=10)
ax.add_artist(prf)
prf_ecc = np.sqrt(np.square(prf_loc).sum())
prf_angle = np.arctan2(*prf_loc[::-1])
e_loc = get_xy(prf_ecc/2, prf_angle + np.pi/13)
plotting.draw_arrow(ax, (500, 500), abs_prf_loc, arrowprops={'connectionstyle': 'arc3',
'arrowstyle': '<-',
'color': pal[1]})
ax.text(*e_loc, r'$r_v$')
ax.text(600, 500 + 100*np.sin(prf_angle/2), r'$\theta_v$')
angle = mpl.patches.Arc((500, 500), 200, 200, 0, 0, np.rad2deg(prf_angle),
fc='none', ec=pal[1], linestyle='-')
ax.add_artist(angle)
# so that this is the normal vector, the 7000 is just an arbitrary
# scale factor to make the vector a reasonable length
normal_len = 7000 * np.sqrt(np.square(stim_freq).sum())
normal_angle = np.arctan2(*stim_freq[::-1])
omega_loc = get_xy(normal_len, normal_angle, abs_prf_loc)
plotting.draw_arrow(ax, abs_prf_loc, omega_loc, r'$\omega_l$', {'connectionstyle': 'arc3',
'arrowstyle': '<-',
'color': pal[0]})
angle = mpl.patches.Arc(abs_prf_loc, 1.2*normal_len, 1.2*normal_len, 0, 0,
# small adjustment appears to be necessary for some
# reason -- but really only for some spatial
# frequencies.
np.rad2deg(normal_angle)-3,
fc='none', ec=pal[0], linestyle='-')
ax.add_artist(angle)
plotting.draw_arrow(ax, (abs_prf_loc[0] + normal_len, abs_prf_loc[1]), abs_prf_loc,
arrowprops={'connectionstyle': 'angle3', 'arrowstyle': '-', 'color': '.5',
'linestyle': ':'})
theta_loc = get_xy(1.3*normal_len/2, normal_angle/2, abs_prf_loc)
ax.text(*theta_loc, r'$\theta_l$')
return fig
def model_schematic(context='paper'):
"""Create model schematic.
In order to better explain the model, its predictions, and the
effects of its parameters, we create a model schematic that shows
the effects of the different p parameters (those that control the
effect of stimulus orientation and retinotopic angle on preferred
period).
This creates only the polar plots (showing the preferred period contours),
and doesn't have a legend; it's intended that you call
compose_figures.add_legend to add the graphical one (and a space has been
left for it)
Parameters
----------
context : {'paper', 'poster'}, optional
plotting context that's being used for this figure (as in
seaborn's set_context function). if poster, will scale things up
Returns
-------
fig : plt.Figure
Figure containing the schematic
"""
params, fig_width = style.plotting_style(context, figsize='half')
plt.style.use(params)
figsize = (fig_width, fig_width/3)
if context == 'paper':
orientation = np.linspace(0, np.pi, 4, endpoint=False)
elif context == 'poster':
orientation = np.linspace(0, np.pi, 2, endpoint=False)
abs_model = model.LogGaussianDonut('full', sf_ecc_slope=.2, sf_ecc_intercept=.2,
abs_mode_cardinals=.4, abs_mode_obliques=.1)
rel_model = model.LogGaussianDonut('full', sf_ecc_slope=.2, sf_ecc_intercept=.2,
rel_mode_cardinals=.4, rel_mode_obliques=.1)
full_model = model.LogGaussianDonut('full', sf_ecc_slope=.2, sf_ecc_intercept=.2,
abs_mode_cardinals=.4, abs_mode_obliques=.1,
rel_mode_cardinals=.4, rel_mode_obliques=.1)
# we can't use the plotting.feature_df_plot / feature_df_polar_plot
# functions because they use FacetGrids, each of which creates a
# separate figure and we want all of this to be on one figure.
fig, axes = plt.subplots(1, 3, figsize=figsize,
subplot_kw={'projection': 'polar'})
labels = [r'$p_1>p_2>0$', r'$p_3>p_4>0$',
# can't have a newline in a raw string, so have to combine them
# in the last label here
r'$p_1=p_3>$'+'\n'+r'$p_2=p_4>0$']
for i, (m, ax) in enumerate(zip([abs_model, rel_model, full_model], axes)):
plotting.model_schematic(m, [ax], [(-.1, 3)], False,
orientation=orientation)
if i != 0:
ax.set(ylabel='')
if i != 1:
ax.set(xlabel='')
else:
# want to move this closer
ax.set_xlabel(ax.get_xlabel(), labelpad=-10)
ax.set_title(labels[i])
ax.set(xticklabels=[], yticklabels=[])
fig.subplots_adjust(wspace=.075)
return fig
def model_schematic_large(context='paper'):
"""Create larger version of model schematic.
In order to better explain the model, its predictions, and the
effects of its parameters, we create a model schematic that shows
the effects of the different p parameters (those that control the
effect of stimulus orientation and retinotopic angle on preferred
period).
Note that this includes both linear and polar plots, and will probably be
way too large
Parameters
----------
context : {'paper', 'poster'}, optional
plotting context that's being used for this figure (as in
seaborn's set_context function). if poster, will scale things up
Returns
-------
fig : plt.Figure
Figure containing the schematic
"""
if context == 'paper':
orientation = np.linspace(0, np.pi, 4, endpoint=False)
size_scale = 1
elif context == 'poster':
size_scale = 1.5
orientation = np.linspace(0, np.pi, 2, endpoint=False)
abs_model = model.LogGaussianDonut('full', sf_ecc_slope=.2, sf_ecc_intercept=.2,
abs_mode_cardinals=.4, abs_mode_obliques=.1)
rel_model = model.LogGaussianDonut('full', sf_ecc_slope=.2, sf_ecc_intercept=.2,
rel_mode_cardinals=.4, rel_mode_obliques=.1)
full_model = model.LogGaussianDonut('full', sf_ecc_slope=.2, sf_ecc_intercept=.2,
abs_mode_cardinals=.4, abs_mode_obliques=.1,
rel_mode_cardinals=.4, rel_mode_obliques=.1)
# we can't use the plotting.feature_df_plot / feature_df_polar_plot
# functions because they use FacetGrids, each of which creates a
# separate figure and we want all of this to be on one figure.
fig = plt.figure(figsize=(size_scale*15, size_scale*15))
gs = mpl.gridspec.GridSpec(figure=fig, ncols=3, nrows=3)
projs = ['rectilinear', 'polar']
labels = [r'$p_1>p_2>0$', r'$p_3>p_4>0$', r'$p_1=p_3>p_2=p_4>0$']
axes = []
for i, m in enumerate([abs_model, rel_model, full_model]):
model_axes = [fig.add_subplot(gs[i, j], projection=projs[j]) for j in range(2)]
if i == 0:
title = True
else:
title = False
model_axes = plotting.model_schematic(m, model_axes[:2], [(-.1, 4.2), (-.1, 3)], title,
orientation=orientation)
if i != 2:
[ax.set(xlabel='') for ax in model_axes]
model_axes[0].text(size_scale*-.25, .5, labels[i], rotation=90,
transform=model_axes[0].transAxes, va='center',
fontsize=1.5*mpl.rcParams['font.size'])
axes.append(model_axes)
# this needs to be created after the model plots so we can grab
# their axes
legend_axis = fig.add_subplot(gs[1, -1])
legend_axis.legend(*axes[1][1].get_legend_handles_labels(), loc='center left')
legend_axis.axis('off')
return fig
def _catplot(df, x='subject', y='cv_loss', hue='fit_model_type', height=8, aspect=.9,
ci=68, plot_kind='strip', x_rotate=False, legend='full', orient='v', **kwargs):
"""wrapper around seaborn.catplot
several figures call seaborn.catplot and are pretty similar, so this
function bundles a bunch of the stuff we do:
1. determine the proper order for hue and x
2. determine the proper palette for hue
3. always use np.median as estimator and 'full' legend
4. optionally rotate x-axis labels (and add extra room if so)
5. add a horizontal line at the x-axis if we have both negative and
positive values
Parameters
----------
df : pd.DataFrame
pandas DataFrame
x : str, optional
which column of the df to plot on the x-axis
y : str, optional
which column of the df to plot on the y-axis
hue : str, optional
which column of the df to facet as the hue
height : float, optional
height of each plot facet
aspect : float, optional
aspect ratio of each facet
ci : int, optional
size of the confidence intervals (ignored if plot_kind=='strip')
plot_kind : {'point', 'bar', 'strip', 'swarm', 'box', 'violin', or 'boxen'}, optional
type of plot to make, i.e., sns.catplot's kind argument. see
that functions docstring for more details. only 'point' and
'strip' are expected, might do strange things otherwise
x_rotate : bool or int, optional
whether to rotate the x-axis labels or not. if True, we rotate
by 25 degrees. if an int, we rotate by that many degrees. if
False, we don't rotate. If labels are rotated, we'll also shift
the bottom of the plot up to avoid cutting off the bottom.
legend : str or bool, optional
the legend arg to pass through to seaborn.catplot, see its
docstrings for more details
orient : {'h', 'v'}, optional
orientation of plot (horizontal or vertical)
kwargs :
passed to sns.catplot
Returns
-------
g : sns.FacetGrid
seaborn FacetGrid object containing the plot
"""
hue_order = plotting.get_order(hue, col_unique=df[hue].unique())
if 'order' in kwargs.keys():
order = kwargs.pop('order')
else:
order = plotting.get_order(x, col_unique=df[x].unique())
pal = plotting.get_palette(hue, col_unique=df[hue].unique(),
doubleup='doubleup' in x)
if plot_kind == 'strip':
# want the different hues to be in a consistent order on the
# x-axis, which requires this
kwargs.update({'jitter': False, 'dodge': True})
if orient == 'h':
x_copy = x
x = y
y = x_copy
aspect = 1/aspect
kwargs['sharex'] = False
else:
kwargs['sharey'] = False
if 'dodge' not in kwargs.keys():
kwargs['dodge'] = 0
# facetgrid seems to ignore the defaults for these, but we want to use them
# so its consistent with other figures
gridspec_kws = {k: mpl.rcParams[f'figure.subplot.{k}']
for k in ['top', 'bottom', 'left', 'right']}
g = sns.catplot(x, y, hue, data=df, hue_order=hue_order, legend=legend, height=height,
kind=plot_kind, aspect=aspect, order=order, palette=pal, ci=ci,
estimator=np.median, orient=orient, facet_kws={'gridspec_kws': gridspec_kws},
**kwargs)
for ax in g.axes.flatten():
if x_rotate:
if x_rotate is True:
x_rotate = 25
labels = ax.get_xticklabels()
if labels:
ax.set_xticklabels(labels, rotation=x_rotate, ha='right')
if orient == 'v':
if (df[y] < 0).any() and (df[y] > 0).any():
ax.axhline(color='grey', linestyle='dashed')
else:
if (df[x] < 0).any() and (df[x] > 0).any():
ax.axvline(color='grey', linestyle='dashed')
if x_rotate:
if x == 'subject':
g.fig.subplots_adjust(bottom=.15)
else:
g.fig.subplots_adjust(bottom=.2)
return g
def cross_validation_raw(df, seed, noise_ceiling_df=None, orient='v', context='paper'):
"""plot raw cross-validation loss
This does no pre-processing of the df and plots subjects on the
x-axis, model type as hue. (NOTE: this means if there are multiple
scanning sessions for each subject, the plot will combine them,
which is probably NOT what you want)
Parameters
----------
df : pd.DataFrame
dataframe containing the output of the cross-validation
analyses, combined across sessions (i.e., the output of
combine_model_cv_summaries snakemake rule)
seed : int
seed for numpy's RNG
noise_ceiling_df : pd.DataFrame
dataframe containing the results of the noise ceiling analyses
for all subjects (i.e., the output of the
noise_ceiling_monte_carlo_overall rule)
orient : {'h', 'v'}, optional
orientation of plot (horizontal or vertical)
context : {'paper', 'poster'}, optional
plotting context that's being used for this figure (as in
seaborn's set_context function). if poster, will scale things up
Returns
-------
g : sns.FacetGrid
seaborn FacetGrid object containing the plot
"""
np.random.seed(seed)
height = 8
aspect = .9
s = 5
if context == 'poster':
height *= 2
aspect = 1
s *= 2
if noise_ceiling_df is not None:
merge_cols = ['subject', 'mat_type', 'atlas_type', 'session', 'task', 'vareas', 'eccen']
df = pd.merge(df, noise_ceiling_df, 'outer', on=merge_cols, suffixes=['_cv', '_noise'])
g = _catplot(df.query('loss_func in ["weighted_normed_loss", "normed_loss", "cosine_distance_scaled"]'),
legend=False, height=height, s=s, x_rotate=True, orient=orient,
col='loss_func')
if noise_ceiling_df is not None:
g.map_dataframe(plotting.plot_noise_ceiling, 'subject', 'loss')
g.fig.suptitle("Cross-validated loss across subjects")
if orient == 'v':
g.set(ylabel="Cross-validated loss", xlabel="Subject")
elif orient == 'h':
g.set(xlabel="Cross-validated loss", ylabel="Subject")
g.add_legend()
g._legend.set_title("Model type")
ylims = [(0, .06), (0, .0022), (0, .0022)]
for i, ax in enumerate(g.axes.flatten()):
ax.set(ylim=ylims[i])
return g
def cross_validation_demeaned(df, seed, remeaned=False, orient='v', context='paper'):
"""plot demeaned cross-validation loss
This function demeans the cross-validation loss on a
subject-by-subject basis, then plots subjects on the x-axis, model
type as hue. (NOTE: this means if there are multiple scanning
sessions for each subject, the plot will combine them, which is
probably NOT what you want)
Parameters
----------
df : pd.DataFrame
dataframe containing the output of the cross-validation
analyses, combined across sessions (i.e., the output of
combine_model_cv_summaries snakemake rule)
seed : int
seed for numpy's RNG
remeaned : bool, optional
whether to use the demeaned cross-validation loss or the
remeaned one. Remeaned has the mean across subjects added back
to it, so that there won't be any negative y-values. This will
only affect the values on the y-axis; the relative placements of
the points will all be the same.
orient : {'h', 'v'}, optional
orientation of plot (horizontal or vertical)
context : {'paper', 'poster'}, optional
plotting context that's being used for this figure (as in
seaborn's set_context function). if poster, will scale things up
Returns
-------
g : sns.FacetGrid
seaborn FacetGrid object containing the plot
"""
np.random.seed(seed)
height = 8
aspect = .9
if context == 'poster':
height *= 2
aspect = 1
df = _demean_df(df)
if remeaned:
name = 'remeaned'
else:
name = 'demeaned'
g = _catplot(df, y=f'{name}_cv_loss', height=height, aspect=aspect, x_rotate=True,
orient=orient, col='loss_func')
g.fig.suptitle(f"{name.capitalize()} cross-validated loss across subjects")
if orient == 'v':
g.set(ylabel=f"Cross-validated loss ({name} by subject)", xlabel="Subject")
elif orient == 'h':
g.set(xlabel=f"Cross-validated loss ({name} by subject)", ylabel="Subject")
g._legend.set_title("Model type")
return g
def cross_validation_model(df, seed, plot_kind='strip', remeaned=False, noise_ceiling_df=None,
orient='v', sort=False, doubleup=False, context='paper'):
"""plot demeaned cross-validation loss, as function of model type
This function demeans the cross-validation loss on a
subject-by-subject basis, then plots model type on the x-axis,
subject as hue. (NOTE: this means if there are multiple scanning
sessions for each subject, the plot will combine them, which is
probably NOT what you want)
Parameters
----------
df : pd.DataFrame
dataframe containing the output of the cross-validation
analyses, combined across sessions (i.e., the output of
combine_model_cv_summaries snakemake rule)
seed : int
seed for numpy's RNG
plot_kind : {'strip', 'point'}, optional
whether to create a strip plot (each subject as a separate
point) or a point plot (combine across subjects, plotting the
median and bootstrapped 68% CI)
remeaned : bool, optional
whether to use the demeaned cross-validation loss or the
remeaned one. Remeaned has the mean across subjects added back
to it, so that there won't be any negative y-values. This will
only affect the values on the y-axis; the relative placements of
the points (and the size of the error bars if
`plot_kind='point'`) will all be the same.
noise_ceiling_df : pd.DataFrame
dataframe containing the results of the noise ceiling analyses
for all subjects (i.e., the output of the
noise_ceiling_monte_carlo_overall rule)
orient : {'h', 'v'}, optional
orientation of plot (horizontal or vertical)
sort : bool, optional
whether to sort the models by the median loss of the
weighted_normed_loss or show them in numbered order
doubleup : bool, optional
whether to "double-up" models so that we plot two models on the same
row if they're identical except for fitting A3/A4. this then shows the
version fitting A3/A4 as a fainter color of the version that doesn't.
context : {'paper', 'poster'}, optional
plotting context that's being used for this figure (as in
seaborn's set_context function). if poster, will scale things up
Returns
-------
g : sns.FacetGrid
seaborn FacetGrid object containing the plot
"""
kwargs = {}
np.random.seed(seed)
params, fig_width = style.plotting_style(context, figsize='half')
plt.style.use(params)
if doubleup:
height = fig_width * .855
else:
height = fig_width
aspect = 1
if noise_ceiling_df is not None:
merge_cols = ['subject', 'mat_type', 'atlas_type', 'session', 'task', 'vareas', 'eccen']
noise_ceiling_df = noise_ceiling_df.groupby(merge_cols).median().reset_index()
df = pd.merge(df, noise_ceiling_df, 'inner', on=merge_cols, suffixes=['_cv', '_noise'])
extra_cols = ['loss']
else:
extra_cols = []
df = _demean_df(df, extra_cols=extra_cols)
if plot_kind == 'strip':
hue = 'subject'
legend_title = "Subject"
legend = 'full'
elif plot_kind == 'point':
hue = 'fit_model_type'
legend = False
if remeaned:
name = 'remeaned'
else:
name = 'demeaned'
if sort:
gb = df.query("loss_func == 'weighted_normed_loss'").groupby('fit_model_type')
kwargs['order'] = gb[f'{name}_cv_loss'].median().sort_values(ascending=False).index
if doubleup:
df['fit_model_doubleup'] = df.fit_model_type.map(dict(zip(plotting.MODEL_PLOT_ORDER,
plotting.MODEL_PLOT_ORDER_DOUBLEUP)))
x = 'fit_model_doubleup'
if noise_ceiling_df is not None:
nc_map = {k: k for k in range(1, 8)}
nc_map.update({10: 8, 12: 9})
df['fit_model_nc'] = df.fit_model_doubleup.map(nc_map)
else:
x = 'fit_model_type'
if noise_ceiling_df is not None:
df['fit_model_nc'] = df.fit_model_type
g = _catplot(df, x=x, y=f'{name}_cv_loss', hue=hue,
col='loss_func', plot_kind=plot_kind, height=height,
aspect=aspect, orient=orient, legend=legend, **kwargs)
title = f"{name.capitalize()} cross-validated loss across model types"
if noise_ceiling_df is not None:
g.map_dataframe(plotting.plot_noise_ceiling, 'fit_model_nc', f'{name}_loss', ci=0,
orient=orient)
title += "\n Median noise ceiling shown as blue line"
if orient == 'v':
g.set(ylabel=f"Cross-validated loss ({name} by subject)", xlabel="Model type")
elif orient == 'h':
g.set(xlabel=f"Cross-validated loss ({name} by subject)", ylabel="")
# if plot_kind=='point', then there is no legend, so the following
# would cause an error
if plot_kind == 'strip':
g._legend.set_title(legend_title)
# don't want title in the paper version
if context != 'paper':
g.fig.suptitle(title)
else:
if orient == 'h':
# also want to remove the y axis, since it's duplicating the one from
# the other figure
for ax in g.axes.flatten():
ax.yaxis.set_visible(False)
ax.spines['left'].set_visible(False)
if plot_kind == 'point':
# this way, the ylims line up whether or not we plotted the
# noise ceiling line
if doubleup:
ax.set_ylim((8.5, -0.5))
else:
ax.set_ylim((13.5, -0.5))
return g
def model_types(context='paper', palette_type='model', annotate=False,
order=None, doubleup=False):
"""Create plot showing which model fits which parameters.
We have 11 different parameters, which might seem like a lot, so we
do cross-validation to determine whether they're all necessary. This
plot shows which parameters are fit by each model, in a little
table.
Parameters
----------
context : {'paper', 'poster'}, optional
plotting context that's being used for this figure (as in
seaborn's set_context function). if poster, will scale things up
palette_type : {'model', 'simple', 'simple_r', seaborn palette name}, optional
palette to use for this plot. if 'model', the parameter each
model fits is shown in its color (as used in other plots). If
'simple' or 'simple_r', we'll use a white/black colormap with
either black (if 'simple') or white (if 'simple_r') showing the
parameter is fit. Else, should be a str giving a seaborn palette
name, i.e., an arg that can be passed to seaborn.color_palette.
annotate : bool, optional
whether to annotate the schematic with info on the parameter
categories (e.g., period/amplitude, eccentricity/orientation,
etc)
order : pandas index or None, optional
If None, we plot the models in the default order. Else, should be an
index object that gives the order to plot them in (from top to bottom).
Returns
-------
fig : plt.Figure
The figure with the plot on it
"""
params, fig_width = style.plotting_style(context, figsize='half')
# these ticks don't add anything and are confusing
params['xtick.bottom'] = False
params['ytick.left'] = False
plt.style.use(params)
figsize = (fig_width, fig_width)
extra_space = 0
model_names = plotting.MODEL_PLOT_ORDER
parameters = plotting.PLOT_PARAM_ORDER
model_variants = np.zeros((len(model_names), len(parameters)))
if palette_type == 'model':
pal = plotting.get_palette('fit_model_type', col_unique=model_names,
doubleup=doubleup)
try:
pal = pal.tolist()
except AttributeError:
# then it's already a list
pass
pal = [(1, 1, 1)] + pal
fill_vals = dict(zip(range(len(model_names)), range(1, len(model_names)+1)))
else:
if palette_type.startswith('simple'):
black, white = [(0, 0, 0), (1, 1, 1)]
if palette_type.endswith('_r'):
pal = [black, white]
else:
pal = [white, black]
else:
pal = sns.color_palette(palette_type, 2)
fill_vals = dict(zip(range(len(model_names)), len(model_names) * [True]))
if not doubleup:
model_variants[0, [0, 2]] = fill_vals[0]
model_variants[1, [0, 1]] = fill_vals[1]
model_variants[2, [0, 1, 2]] = fill_vals[2]
model_variants[3, [0, 1, 2, 3, 4]] = fill_vals[3]
model_variants[4, [0, 1, 2, 5, 6]] = fill_vals[4]
model_variants[5, [0, 1, 2, 3, 4, 5, 6]] = fill_vals[5]
model_variants[6, [0, 1, 2, 7, 8]] = fill_vals[6]
model_variants[7, [0, 1, 2, 9, 10]] = fill_vals[7]
model_variants[8, [0, 1, 2, 7, 8, 9, 10]] = fill_vals[8]
model_variants[9, [0, 1, 2, 3, 4, 7, 8]] = fill_vals[9]
model_variants[10, [0, 1, 2, 5, 6, 9, 10]] = fill_vals[10]
model_variants[11, [0, 1, 2, 3, 4, 5, 6, 7, 8]] = fill_vals[11]
model_variants[12, [0, 1, 2, 3, 4, 5, 6, 9, 10]] = fill_vals[12]
model_variants[13, :] = fill_vals[13]
# while in theory, we want square to be True here too, we messed with
# all the size in such a way that it works with it set to False
square = False
else:
model_variants[0, [0, 2]] = fill_vals[0]
model_variants[1, [0, 1]] = fill_vals[1]
model_variants[2, [0, 1, 2]] = fill_vals[2]
model_variants[3, [0, 1, 2, 3, 4]] = fill_vals[3]
model_variants[4, [0, 1, 2, 5, 6]] = fill_vals[4]
model_variants[5, [0, 1, 2, 3, 4, 5, 6]] = fill_vals[5]
model_variants[6, [0, 1, 2, 7, 8]] = fill_vals[6]
model_variants[2, [9, 10]] = fill_vals[7]
model_variants[6, [9, 10]] = fill_vals[8]
model_variants[9, [0, 1, 2, 3, 4, 7, 8]] = fill_vals[9]
model_variants[4, [9, 10]] = fill_vals[10]
model_variants[11, [0, 1, 2, 3, 4, 5, 6, 7, 8]] = fill_vals[11]
model_variants[5, [9, 10]] = fill_vals[12]
model_variants[11, [9, 10]] = fill_vals[13]
# drop the rows that are all 0s
model_variants = model_variants[~(model_variants==0).all(1)]
warnings.warn("when doubling-up, we just use sequential numbers for models "
"(the numbers therefore have a different meaning than for "
"non-doubled-up version)")
model_names = np.arange(1, model_variants.shape[0]+1)
square = True
model_variants = pd.DataFrame(model_variants, model_names, parameters)
if order is not None:
model_variants = model_variants.reindex(order)
fig = plt.figure(figsize=figsize)
ax = sns.heatmap(model_variants, cmap=pal, cbar=False, square=square)
ax.set_yticklabels(model_variants.index, rotation=0)
ax.set_ylabel("Model type")
# we want the labels on the top here, not the bottom
ax.tick_params(labelbottom=False, labeltop=True, pad=-2)
if annotate:
arrowprops = {'connectionstyle': 'bar', 'arrowstyle': '-', 'color': '0'}
text = ['Eccentricity', 'Absolute', 'Relative', 'Absolute', 'Relative']
text = ['Ecc', 'Abs', 'Rel', 'Abs', 'Rel']
for i, pos in enumerate(range(1, 10, 2)):
plotting.draw_arrow(ax, ((pos+.5)/11, 1.08+extra_space),
((pos+1.5)/11, 1.08+extra_space), arrowprops=arrowprops,
xycoords='axes fraction', textcoords='axes fraction')
ax.text((pos+1)/11, 1.11+extra_space, text[i], transform=ax.transAxes,
ha='center', va='bottom')
arrowprops['connectionstyle'] = f'bar,fraction={.3/5}'
plotting.draw_arrow(ax, (1.5/11, 1.17+extra_space), (6.5/11, 1.17+extra_space),
arrowprops=arrowprops,
xycoords='axes fraction', textcoords='axes fraction')
ax.text(4/11, 1.22+extra_space, 'Period', transform=ax.transAxes,
ha='center', va='bottom')
arrowprops['connectionstyle'] = f'bar,fraction={.3/3}'
plotting.draw_arrow(ax, (7.5/11, 1.17+extra_space), (10.5/11, 1.17+extra_space),
arrowprops=arrowprops,
xycoords='axes fraction', textcoords='axes fraction')
ax.text(9/11, 1.22+extra_space, 'Amplitude', transform=ax.transAxes,
ha='center', va='bottom')
return fig
def model_parameters(df, plot_kind='point', visual_field='all', fig=None, add_legend=True,
context='paper', **kwargs):
"""plot model parameter values, across subjects
Parameters
----------
df : pd.DataFrame
dataframe containing all the model parameter values, across
subjects. note that this should first have gone through
prep_model_df, which renames the values of the model_parameter
columns so they're more pleasant to look at on the plot and adds
a column, param_category, which enables us to break up the
figure into three subplots
plot_kind : {'point', 'strip', 'dist'}, optional
What type of plot to make. If 'point' or 'strip', it's assumed
that df contains only the fits to the median data across
bootstraps (thus, one value per subject per parameter); if
'dist', it's assumed that df contains the fits to all bootstraps
(thus, 100 values per subject per parameter). this function
should run if those are not true, but it will look weird:
- 'point': point plot, so show 68% CI across subjects
- 'strip': strip plot, so show each subject as a separate point
- 'dist': distribution, show each each subject as a separate
point with their own 68% CI across bootstraps
visual_field : str, optional
in addition to fitting the model across the whole visual field,
we also fit the model to some portions of it (the left half,
right half, etc). this arg allows us to easily modify the title
of the plot to make it clear which portion of the visual field
we're plotting. If 'all' (the default), we don't modify the
title at all, otherwise we append "in {visual_field} visual
field" to it.
fig : plt.Figure or None, optional
the figure to plot on. If None, we create a new figure. Intended
use case for this is to plot the data from multiple sessions on
the same axes (with different display kwargs), in order to
directly compare how parameter values change.
add_legend : bool, optional
whether to add a legend or not. If True, will add just outside
the right-most axis
context : {'paper', 'poster'}, optional
plotting context that's being used for this figure (as in
seaborn's set_context function). if poster, will scale things up
kwargs :
Passed directly to the plotting function, which depends on the
value of plot_kind
Returns
-------
fig : plt.Figure
Figure containin the plot
"""
params, fig_width = style.plotting_style(context, figsize='full')
plt.style.use(params)
# in order to make the distance between the hues appear roughly
# equivalent, need to set the ax_xlims in a particular way
n_ori_params = df.query("param_category=='orientation'").model_parameter.nunique()
ax_xlims = [[-.5, .5], [-.5, 1.5], [-.5, n_ori_params - .5]]
yticks = [[0, .5, 1, 1.5, 2, 2.5], [0, .1, .2, .3, .4], [-.03, 0, .03, .06, .09]]
axhline = [2]
if fig is None:
fig, axes = plt.subplots(1, 3, figsize=(fig_width, fig_width/2),
gridspec_kw={'width_ratios': [.12, .25, .63],
'wspace': .3})
else:
axes = fig.axes
order = plotting.get_order('model_parameter', col_unique=df.model_parameter.unique())
if plot_kind == 'point':
pal = plotting.get_palette('model_parameter', col_unique=df.model_parameter.unique(),
as_dict=True)
elif plot_kind == 'strip':
# then we're showing this across subjects
if 'subject' in df.columns and df.subject.nunique() > 1:
hue = 'subject'
# this is sub-groupaverage
else:
hue = 'groupaverage_seed'
pal = plotting.get_palette(hue, col_unique=df[hue].unique(), as_dict=True)
hue_order = plotting.get_order(hue, col_unique=df[hue].unique())
elif plot_kind == 'dist':
# then we're showing this across subjects
if 'subject' in df.columns and df.subject.nunique() > 1:
pal = plotting.get_palette('subject', col_unique=df.subject.unique(), as_dict=True)
hue_order = plotting.get_order('subject', col_unique=df.subject.unique())
gb_col = 'subject'
# copied from how seaborn's stripplot handles this, by looking
# at lines 368 and 1190 in categorical.py (version 0.9.0)
dodge = np.linspace(0, .8 - (.8 / df.subject.nunique()), df.subject.nunique())
dodge -= dodge.mean()
yticks = [[0, .5, 1, 1.5, 2, 2.5, 3.0],
[-.1, 0, .1, .2, .3, .4, .5, .6, .7, .8, .9, 1],
[-.2, -.1, 0, .1, .2, .3]]
ax_xlims = [[-1, 1], [-1, 2], [-.75, n_ori_params-.5]]
axhline += [1]
# else we've combined across all subjects
else:
pal = plotting.get_palette('model_parameter', col_unique=df.model_parameter.unique(),
as_dict=True)
gb_col = 'model_parameter'
dodge = np.zeros(df.model_parameter.nunique())
for i, ax in enumerate(axes):
cat = ['sigma', 'eccen', 'orientation'][i]
tmp = df.query("param_category==@cat")
ax_order = [i for i in order if i in tmp.model_parameter.unique()]
if plot_kind == 'point':
sns.pointplot('model_parameter', 'fit_value', 'model_parameter', data=tmp,
estimator=np.median, ax=ax, order=ax_order, palette=pal, ci=68, **kwargs)
elif plot_kind == 'strip':
# want to make sure that the different hues end up in the
# same order everytime, which requires doing this with
# jitter and dodge
sns.stripplot('model_parameter', 'fit_value', hue, data=tmp, ax=ax,
order=ax_order, palette=pal, hue_order=hue_order, jitter=False,
dodge=True, **kwargs)
elif plot_kind == 'dist':
handles, labels = [], []
for j, (n, g) in enumerate(tmp.groupby(gb_col)):
dots, _, _ = plotting.scatter_ci_dist('model_parameter', 'fit_value', data=g,
label=n, ax=ax, color=pal[n],
x_dodge=dodge[j], x_order=ax_order, **kwargs)
handles.append(dots)
labels.append(n)
ax.set(xlim=ax_xlims[i], yticks=yticks[i])
ax.tick_params(pad=0)
if ax.legend_:
ax.legend_.remove()
if i == 2:
if add_legend:
if plot_kind == 'dist':
legend = ax.legend(handles, labels, loc='lower center', ncol=3,
borderaxespad=0, frameon=False,
bbox_to_anchor=(.49, -.3), bbox_transform=fig.transFigure)
else:
legend = ax.legend(loc=(1.01, .3), borderaxespad=0, frameon=False)
# explicitly adding the legend artist allows us to add a
# second legend if we want
ax.add_artist(legend)
if i in axhline:
ax.axhline(color='grey', linestyle='dashed')
if i == 0:
ax.set(ylabel='Parameter value')
fig.text(.5, 0, "Parameter", ha='center')
if context != 'paper':
# don't want title in paper context
suptitle = "Model parameters"
if visual_field != 'all':
suptitle += f' in {visual_field} visual field'
fig.suptitle(suptitle)
fig.subplots_adjust(top=.85)
return fig
def model_parameters_pairplot(df, drop_outlier=False):
"""plot pairwise distribution of model parameters
There's one very obvious outlier (sub-wlsubj007, ses-04, bootstrap
41), where the $a$ parameter (sf_ecc_slope) is less than 0 (other
parameters are also weird). If you want to drop that, set
drop_outlier=True
Parameters
----------
df : pd.DataFrame
dataframe containing all the model parameter values, across
subjects. note that this should first have gone through
prep_model_df, which renames the values of the model_parameter
columns so they're more pleasant to look at on the plot
drop_outlier : bool, optional
whether to drop the outlier or not (see above)
Returns
-------
g : sns.PairGrid
the PairGrid containing the plot
"""
pal = plotting.get_palette('subject', col_unique=df.subject.unique())
pal = dict(zip(df.subject.unique(), pal))
df = pd.pivot_table(df, index=['subject', 'bootstrap_num'], columns='model_parameter',
values='fit_value').reset_index()
# this is a real outlier: one subject, one bootstrap (see docstring)
if drop_outlier:
df = df[df.get('$a$') > 0]
g = sns.pairplot(df, hue='subject', vars=plotting.PLOT_PARAM_ORDER, palette=pal)
for ax in g.axes.flatten():
ax.axhline(color='grey', linestyle='dashed')
ax.axvline(color='grey', linestyle='dashed')
return g
def model_parameters_compare_plot(df, bootstrap_df):
"""plot comparison of model parameters from bootstrap vs median fits
we have two different ways of fitting the data: to all of the
bootstraps or just to the median across bootstraps. if we compare
the resulting parameter values, they shouldn't be that different,
which is what we do here.
Parameters
----------
df : pd.DataFrame
dataframe containing all the model parameter values, across
subjects. note that this should first have gone through
prep_model_df, which renames the values of the model_parameter
columns so they're more pleasant to look at on the plot
bootstrap_df : pd.DataFrame
dataframe containing all the model parameter values, across
subjects and bootstraps. note that this should first have gone
through prep_model_df, which renames the values of the
model_parameter columns so they're more pleasant to look at on
the plot
Returns
-------
g : sns.FacetGrid
the FacetGrid containing the plot
"""
pal = plotting.get_palette('subject', col_unique=df.subject.unique(), as_dict=True)
order = plotting.get_order('subject', col_unique=df.subject.unique())
compare_cols = ['model_parameter', 'subject', 'session', 'task']
compare_df = df[compare_cols + ['fit_value']]
tmp = bootstrap_df[compare_cols + ['fit_value']].rename(columns={'fit_value': 'fit_value_bs'})
compare_df = pd.merge(tmp, compare_df, on=compare_cols)
compare_df = compare_df.sort_values(compare_cols)
g = sns.FacetGrid(compare_df, col='model_parameter', hue='subject', col_wrap=4, sharey=False,
aspect=2.5, height=3, col_order=plotting.PLOT_PARAM_ORDER, hue_order=order,
palette=pal)
g.map_dataframe(plotting.scatter_ci_dist, 'subject', 'fit_value_bs')
g.map_dataframe(plt.scatter, 'subject', 'fit_value')
for ax in g.axes.flatten():
ax.set_xticklabels(ax.get_xticklabels(), rotation=25, ha='right')
return g
def training_loss_check(df, hue='test_subset', thresh=.2):
"""check last epoch training loss
in order to check that one of the models didn't get stuck in a local
optimum in, e.g., one of the cross-validation folds or bootstraps,
we here plot the loss for each subject and model, with median and
68% CI across batches. they should hopefully look basically all the
same
Parameters
----------
df : pd.DataFrame
dataframe with the last epoch loss, as created by
`analyze_model.collect_final_loss`
hue : str, optional
which df column to use as the hue arg for the FacetGrid
thresh : float, optional
the loss threshold for getting stuck in local optima. we
annotate the plot with any training sessions whose median
training loss on the last epoch is above this value
Returns
-------
g : sns.FacetGrid
the FacetGrid containing the plot
"""
# to make sure we show the full dataframe below, from
# https://stackoverflow.com/a/42293737
| pd.set_option('display.max_columns', None) | pandas.set_option |
import pathlib
import pandas as pd
import numpy as np
from typing import TypedDict
from .. import TacoProject
from .. utils import NoClonesError
class ConjugationProject():
"""Holds project data"""
def __init__(self, taco_project:TacoProject):
self._project_name = taco_project.project_name + '_conjugation'
self.tp = taco_project
self.constructs = list()
@property
def project_name(self) -> str:
"""Contains the name of the project."""
return self._project_name
def generate_conjugation_template(self, filename:str=None, number_of_plates:int=1, pcr_positive:bool=True, seq_positive:bool=False):
""" Generates an Excel file to be used as template for a conjugation run.
The function selects at least one positive clone per construct and fills up to run capacity.
Args:
filename (str): Filename for the exported template. If None, project name + '_template.xlsx' will be used.
number_of_plates (int): How many conjugation plates (96 well) should be prepared.
pcr_positive (bool): Only take clones with a positive colony PCR result.
seq_positive (bool): Only take clones with a positive sequencing result.
"""
if filename == None:
_filename = pathlib.Path(f'{self.project_name}_template.xlsx')
else:
_filename = pathlib.Path(filename)
plate = np.concatenate([np.repeat(i+1, 96) for i in range(number_of_plates)])
pos = np.concatenate([np.array([f'{l}{n}' for n in range(1,13) for l in 'ABCDEFGH']) for i in range(number_of_plates)])
inputdata = self.tp.get_validated_clones(pcr=pcr_positive, seq=seq_positive)
n = 1
constructs = []
clones = []
while len(constructs) < number_of_plates*96:
subset = inputdata.groupby('construct').nth(n)
if len(subset) == 0:
n = 1
else:
constructs += list(subset.index)
clones += list(subset.loc[:, 'clone'])
n += 1
template = pd.DataFrame(
[constructs[0:number_of_plates*96],
clones[0:number_of_plates*96],
plate, pos, np.repeat(np.nan, number_of_plates*96)],
).T
template.columns = | pd.Index(['construct', 'clone', 'conjugation_plate', 'conjugation_position', 'number_of_clones']) | pandas.Index |
#!/usr/bin/env python
# coding: utf-8
__author__ = "<NAME>"
"""
Dash Server for visualizing the decision boundrary of a DenseNet (or general CNN with adapdation) classifier.
Several parts regarding the DB handling are adapted from <NAME>, github.com/choosehappy
"""
# In[5]:
import re
import colorsys
import matplotlib.cm
import argparse
import flask
import umap
import tables
import numpy as np
import pandas as pd
from textwrap import dedent as d
from pathlib import Path
# import jupyterlab_dash
from sklearn.metrics import confusion_matrix
import torch
from torch import nn
from torch.utils.data import DataLoader
from torchvision.models import DenseNet
from torch.utils.data.dataloader import default_collate
import albumentations as albmt
from albumentations.pytorch import ToTensor
import dash
import dash_html_components as html
import dash_core_components as dcc
from dash.dependencies import Input, Output
parser = argparse.ArgumentParser(description='Run a server for visualization of a CNN classifier')
parser.add_argument('--load_from_file', '-l', action='store_true', default=False, help='Load the embedding from a csv file. Does not compute the embedding',)
parser.add_argument('--target_class', '-t', default=None, help='Target Label, if the classifier was trained in one vs all fashion',)
parser.add_argument('--port', '-p', help='Server Port', default=8050, type = int)
parser.add_argument('database', help='Database containing image patches, labels ...',)
parser.add_argument('filename', help='Creates a csv file of the embedding')
parser.add_argument('model', help='Saved torch model dict, and architecture')
arguments = parser.parse_args()
file_name = arguments.filename
use_existing = arguments.load_from_file
target_class = arguments.target_class
use_port = arguments.port
db_path = arguments.database
model_path = arguments.model
batch_size = 32
patch_size = 224
server = flask.Flask(__name__)
app = dash.Dash(__name__, server=server)
# depending on how many colors needed taking either the tab 10 or tab20 pallete
def color_pallete(n):
num = int(min(np.ceil(5/10), 2)*10)
colors = matplotlib.cm.get_cmap(f'tab{num}').colors
if n > 20:
return ['#%02x%02x%02x' % tuple(
np.array(np.array(colorsys.hsv_to_rgb(i,0.613,246 ))*255,
dtype=np.uint8)) for i in np.linspace(0, 1, n+1)][:-1]
return ['#%02x%02x%02x' % tuple(np.array(np.array(i) * 255,dtype=np.uint8)) for i in colors]
class Dataset(object):
"Dabase handler for torch.utils.DataLoader written by <NAME>"
def __init__(self, fname, img_transform=None):
self.fname = fname
self.img_transform = img_transform
with tables.open_file(self.fname, 'r') as db:
self.nitems = db.root.imgs.shape[0]
self.imgs = None
self.filenames = None
self.label = None
def __getitem__(self, index):
# opening should be done in __init__ but seems to be
# an issue with multithreading so doing here. need to do it everytime, otherwise hdf5 crashes
with tables.open_file(self.fname, 'r') as db:
self.imgs = db.root.imgs
self.filenames = db.root.filenames
self.label = db.root.labels
# get the requested image and mask from the pytable
img = self.imgs[index, :, :, :]
fname = self.filenames[index]
label = self.label[index]
img_new = img
if self.img_transform:
img_new = self.img_transform(image=img)['image']
return img_new, img, label, fname
def __len__(self):
return self.nitems
# In[7]:
def get_dataloader(batch_size, patch_size, db_path):
# +
def id_collate(batch):
new_batch = []
ids = []
for _batch in batch:
new_batch.append(_batch[:-1])
ids.append(_batch[-1])
return default_collate(new_batch), ids
# +
img_transform = albmt.Compose([
albmt.RandomSizedCrop((patch_size, patch_size), patch_size, patch_size),
ToTensor()
])
if db_path[0] != '/':
db_path = f'./{db_path}'
# more workers do not seem no improve perfomance
dataset = Dataset(db_path, img_transform=img_transform)
dataLoader = DataLoader(dataset, batch_size=batch_size,
shuffle=False, num_workers=0,
pin_memory=True, collate_fn=id_collate)
print(f"dataset size:\t{len(dataset)}")
# -
return dataLoader, dataset
def load_model(model_path):
device = torch.device('cuda')
checkpoint = torch.load(
model_path, map_location=lambda storage, loc: storage)
# load checkpoint to CPU and then put to device https://discuss.pytorch.org/t/saving-and-loading-torch-models-on-2-machines-with-different-number-of-gpu-devices/6666
model = DenseNet(growth_rate=checkpoint["growth_rate"],
block_config=checkpoint["block_config"],
num_init_features=checkpoint["num_init_features"],
bn_size=checkpoint["bn_size"],
drop_rate=checkpoint["drop_rate"],
num_classes=checkpoint["num_classes"]).to(device)
model.load_state_dict(checkpoint["model_dict"])
print(
f"total params: \t{sum([np.prod(p.size()) for p in model.parameters()])}")
model.eval()
return model, device
def load_embedding(dataLoader, model, device):
out = {}
def hook(module, input, output):
out[module] = input[0]
# works for torchvision.models.DenseNet, register_forward_hook on last layer before classifier.
model.classifier.register_forward_hook(hook)
# +
# all_preds=[]
all_last_layer = []
all_fnames = []
all_labels = []
all_predictions = []
# cmatrix = np.zeros((checkpoint['num_classes'], checkpoint['num_classes']))
# add notification sstuff? (X, xorig, label), fname = next(iter(dataLoader[phase]))
for (X, xorig, label), fname in dataLoader:
X = X.to(device)
label_pred = model(X)
last_layer = out[model.classifier].detach().cpu().numpy()
all_last_layer.append(last_layer)
# yflat = label.numpy() == target_class
all_labels.extend(label.numpy())
pred_class = np.argmax(label_pred.detach().cpu().numpy(), axis=1)
all_predictions.extend(pred_class)
all_fnames.extend([Path(fn.decode()).name for fn in fname])
# cmatrix = cmatrix + \
# confusion_matrix(yflat, pred_class, labels=range(
# checkpoint['num_classes']))
# print(cmatrix)
# acc = (cmatrix/cmatrix.sum()).trace()
# print(acc)
features_hists = np.vstack(all_last_layer)
# -
# +
reducer = umap.UMAP(n_neighbors=50, min_dist=0.0, n_components=3)
embedding = reducer.fit_transform(features_hists)
return embedding, all_labels, all_predictions, dataset, all_fnames
def create_confusion_map(embedding_a, target_class):
# n_classes = len(embedding_a.Prediction.unique())
pred = embedding_a.Prediction.values
label = embedding_a.Label.values
label = (label)
label = np.array(label, dtype=np.uint8)
conf = [f'{label[i]}{pred[i]}' for i in range(len(label))]
return embedding_a.assign(Confusion=conf)
text_style = dict(color='#444', fontFamily='sans-serif', fontWeight=300)
dataLoader, dataset = get_dataloader(batch_size, patch_size, db_path)
if use_existing is True:
embedding_a = pd.read_csv(file_name)
else:
# model is not saved to variable to enable garbage collector to clean it after it is not used anymore
embedding, all_labels, all_predictions, dataset, fnames = load_embedding(
dataLoader, *load_model(model_path))
embedding_a = pd.DataFrame({"x": embedding[:, 0],
"y": embedding[:, 1],
"z": embedding[:, 2],
"Label": all_labels,
"Prediction": all_predictions,
"index": [*range(len(all_labels))],
"Slide": [i[:i.find(re.findall('[A-Za-z\.\s\_]*$', i)[0])] for i in fnames]})
embedding_a.to_csv(file_name)
embedding_a = create_confusion_map(embedding_a, target_class)
def plotly_figure(value, plot_type='2D'):
colors = color_pallete(len(embedding_a[value].unique()))
label_to_type = {'2D': 'scattergl', '3D': 'scatter3d'}
type_to_size = {'2D': 15, '3D': 2.5}
linesize = {'2D': 0.5, '3D': 0}
return {
'data': [dict(
x=embedding_a[embedding_a[value] == target]['x'],
y=embedding_a[embedding_a[value] == target]['y'],
z=embedding_a[embedding_a[value] == target]['z'],
text=embedding_a[embedding_a[value] == target]['index'],
index=embedding_a[embedding_a[value] == target]['index'],
customdata=embedding_a[embedding_a[value] == target]['index'],
mode='markers',
type=label_to_type[plot_type],
name=f'{target}',
marker={
'size': type_to_size[plot_type],
'opacity': 0.5,
'color': colors[i],
'line': {'width': linesize[plot_type], 'color': 'white'}
}
) for i, target in enumerate(sorted(embedding_a[value].unique()))],
'layout': dict(
xaxis={
'title': "x",
'type': 'linear'
},
yaxis={
'title': "y",
'type': 'linear'
},
margin={'l': 40, 'b': 30, 't': 10, 'r': 0},
height=750,
width=850,
hovermode='closest',
clickmode='event+select',
uirevision='no reset of zoom',
legend={'itemsizing': 'constant'}
)
}
app.layout = html.Div([
html.H2('CNN Classification Viewer', style=text_style),
# dcc.Input(id='predictor', placeholder='box', value=''),
html.Div([
html.Div([
dcc.RadioItems(
id='color-plot1',
options=[{'label': i, 'value': i}
for i in ['Label', 'Prediction', 'Confusion', 'Slide']],
value='Label',
labelStyle={}
),
dcc.RadioItems(
id='plot-type',
options=[{'label': i, 'value': i}
for i in ['2D', '3D']],
value='2D',)], style={'width': '49%', 'display': 'inline'}), # , 'float': 'left', 'display': 'inline-block'}),
html.Div([
dcc.Graph(id='plot1', figure=plotly_figure('Label'))
], style={'float': 'left', 'display': 'inline-block'}),
html.Div([
html.Div([html.Img(id='image', width=patch_size, height=patch_size)], style={'display': 'inline-block'}),
dcc.Markdown(d("""
**Image Properties**
""")),
html.Pre(id='hover-data'),
dcc.Markdown(d("""
**Frequency in selected**
""")),
html.Pre(id='selected-data')
], style={'float': 'left', 'display': 'inline-block'}, className='three columns'),
])], style={'width': '65%'})
@app.callback(
Output('selected-data', 'children'),
[Input('plot1', 'selectedData')])
def display_selected_data(selectedData):
text = ""
if selectedData is not None:
indices = pd.DataFrame.from_dict(selectedData['points'])['customdata'].values
frame = embedding_a[embedding_a['index'].isin(indices)]
conf_freq = frame['Confusion'].value_counts()
df2 = | pd.DataFrame({'Confusion': conf_freq._index, 'Label': conf_freq.values}) | pandas.DataFrame |
# coding: utf-8
# In[ ]:
import pandas as pd
import nsepy as ns
from datetime import date
import math
# In[ ]:
stocks = | pd.read_csv("stocklist.csv") | pandas.read_csv |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pandas
import numpy as np
from .dataframe import DataFrame
from .utils import _reindex_helper
def concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False,
keys=None, levels=None, names=None, verify_integrity=False,
copy=True):
if keys is not None:
objs = [objs[k] for k in keys]
else:
objs = list(objs)
if len(objs) == 0:
raise ValueError("No objects to concatenate")
objs = [obj for obj in objs if obj is not None]
if len(objs) == 0:
raise ValueError("All objects passed were None")
try:
type_check = next(obj for obj in objs
if not isinstance(obj, (pandas.Series,
pandas.DataFrame,
DataFrame)))
except StopIteration:
type_check = None
if type_check is not None:
raise ValueError("cannot concatenate object of type \"{0}\"; only "
"pandas.Series, pandas.DataFrame, "
"and modin.pandas.DataFrame objs are "
"valid", type(type_check))
all_series = all(isinstance(obj, pandas.Series)
for obj in objs)
if all_series:
return DataFrame(pandas.concat(objs, axis, join, join_axes,
ignore_index, keys, levels, names,
verify_integrity, copy))
if isinstance(objs, dict):
raise NotImplementedError(
"Obj as dicts not implemented. To contribute to "
"Pandas on Ray, please visit github.com/ray-project/ray.")
axis = | pandas.DataFrame() | pandas.DataFrame |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, time, timedelta, date
import sys
import os
import operator
from distutils.version import LooseVersion
import nose
import numpy as np
randn = np.random.randn
from pandas import (Index, Series, TimeSeries, DataFrame,
isnull, date_range, Timestamp, Period, DatetimeIndex,
Int64Index, to_datetime, bdate_range, Float64Index)
import pandas.core.datetools as datetools
import pandas.tseries.offsets as offsets
import pandas.tseries.tools as tools
import pandas.tseries.frequencies as fmod
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tslib import NaT, iNaT
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.index as _index
from pandas.compat import range, long, StringIO, lrange, lmap, zip, product
import pandas.core.datetools as dt
from numpy.random import rand
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_frame_equal
import pandas.compat as compat
import pandas.core.common as com
from pandas import concat
from pandas import _np_version_under1p7
from numpy.testing.decorators import slow
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest("pytz not installed")
def _skip_if_has_locale():
import locale
lang, _ = locale.getlocale()
if lang is not None:
raise nose.SkipTest("Specific locale is set {0}".format(lang))
class TestTimeSeriesDuplicates(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
dates = [datetime(2000, 1, 2), datetime(2000, 1, 2),
datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 3), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 4),
datetime(2000, 1, 4), datetime(2000, 1, 5)]
self.dups = Series(np.random.randn(len(dates)), index=dates)
def test_constructor(self):
tm.assert_isinstance(self.dups, TimeSeries)
tm.assert_isinstance(self.dups.index, DatetimeIndex)
def test_is_unique_monotonic(self):
self.assertFalse(self.dups.index.is_unique)
def test_index_unique(self):
uniques = self.dups.index.unique()
expected = DatetimeIndex([datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 5)])
self.assertEqual(uniques.dtype, 'M8[ns]') # sanity
self.assertTrue(uniques.equals(expected))
self.assertEqual(self.dups.index.nunique(), 4)
# #2563
self.assertTrue(isinstance(uniques, DatetimeIndex))
dups_local = self.dups.index.tz_localize('US/Eastern')
dups_local.name = 'foo'
result = dups_local.unique()
expected = DatetimeIndex(expected, tz='US/Eastern')
self.assertTrue(result.tz is not None)
self.assertEqual(result.name, 'foo')
self.assertTrue(result.equals(expected))
# NaT
arr = [ 1370745748 + t for t in range(20) ] + [iNaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
arr = [ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
def test_index_dupes_contains(self):
d = datetime(2011, 12, 5, 20, 30)
ix = DatetimeIndex([d, d])
self.assertTrue(d in ix)
def test_duplicate_dates_indexing(self):
ts = self.dups
uniques = ts.index.unique()
for date in uniques:
result = ts[date]
mask = ts.index == date
total = (ts.index == date).sum()
expected = ts[mask]
if total > 1:
assert_series_equal(result, expected)
else:
assert_almost_equal(result, expected[0])
cp = ts.copy()
cp[date] = 0
expected = Series(np.where(mask, 0, ts), index=ts.index)
assert_series_equal(cp, expected)
self.assertRaises(KeyError, ts.__getitem__, datetime(2000, 1, 6))
# new index
ts[datetime(2000,1,6)] = 0
self.assertEqual(ts[datetime(2000,1,6)], 0)
def test_range_slice(self):
idx = DatetimeIndex(['1/1/2000', '1/2/2000', '1/2/2000', '1/3/2000',
'1/4/2000'])
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts['1/2/2000':]
expected = ts[1:]
assert_series_equal(result, expected)
result = ts['1/2/2000':'1/3/2000']
expected = ts[1:4]
assert_series_equal(result, expected)
def test_groupby_average_dup_values(self):
result = self.dups.groupby(level=0).mean()
expected = self.dups.groupby(self.dups.index).mean()
assert_series_equal(result, expected)
def test_indexing_over_size_cutoff(self):
import datetime
# #1821
old_cutoff = _index._SIZE_CUTOFF
try:
_index._SIZE_CUTOFF = 1000
# create large list of non periodic datetime
dates = []
sec = datetime.timedelta(seconds=1)
half_sec = datetime.timedelta(microseconds=500000)
d = datetime.datetime(2011, 12, 5, 20, 30)
n = 1100
for i in range(n):
dates.append(d)
dates.append(d + sec)
dates.append(d + sec + half_sec)
dates.append(d + sec + sec + half_sec)
d += 3 * sec
# duplicate some values in the list
duplicate_positions = np.random.randint(0, len(dates) - 1, 20)
for p in duplicate_positions:
dates[p + 1] = dates[p]
df = DataFrame(np.random.randn(len(dates), 4),
index=dates,
columns=list('ABCD'))
pos = n * 3
timestamp = df.index[pos]
self.assertIn(timestamp, df.index)
# it works!
df.ix[timestamp]
self.assertTrue(len(df.ix[[timestamp]]) > 0)
finally:
_index._SIZE_CUTOFF = old_cutoff
def test_indexing_unordered(self):
# GH 2437
rng = date_range(start='2011-01-01', end='2011-01-15')
ts = Series(randn(len(rng)), index=rng)
ts2 = concat([ts[0:4],ts[-4:],ts[4:-4]])
for t in ts.index:
s = str(t)
expected = ts[t]
result = ts2[t]
self.assertTrue(expected == result)
# GH 3448 (ranges)
def compare(slobj):
result = ts2[slobj].copy()
result = result.sort_index()
expected = ts[slobj]
assert_series_equal(result,expected)
compare(slice('2011-01-01','2011-01-15'))
compare(slice('2010-12-30','2011-01-15'))
compare(slice('2011-01-01','2011-01-16'))
# partial ranges
compare(slice('2011-01-01','2011-01-6'))
compare(slice('2011-01-06','2011-01-8'))
compare(slice('2011-01-06','2011-01-12'))
# single values
result = ts2['2011'].sort_index()
expected = ts['2011']
assert_series_equal(result,expected)
# diff freq
rng = date_range(datetime(2005, 1, 1), periods=20, freq='M')
ts = Series(np.arange(len(rng)), index=rng)
ts = ts.take(np.random.permutation(20))
result = ts['2005']
for t in result.index:
self.assertTrue(t.year == 2005)
def test_indexing(self):
idx = date_range("2001-1-1", periods=20, freq='M')
ts = Series(np.random.rand(len(idx)),index=idx)
# getting
# GH 3070, make sure semantics work on Series/Frame
expected = ts['2001']
df = DataFrame(dict(A = ts))
result = df['2001']['A']
assert_series_equal(expected,result)
# setting
ts['2001'] = 1
expected = ts['2001']
df.loc['2001','A'] = 1
result = df['2001']['A']
assert_series_equal(expected,result)
# GH3546 (not including times on the last day)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:00', freq='H')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:59', freq='S')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = [ Timestamp('2013-05-31 00:00'), Timestamp(datetime(2013,5,31,23,59,59,999999))]
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013']
assert_series_equal(expected,ts)
# GH 3925, indexing with a seconds resolution string / datetime object
df = DataFrame(randn(5,5),columns=['open','high','low','close','volume'],index=date_range('2012-01-02 18:01:00',periods=5,tz='US/Central',freq='s'))
expected = df.loc[[df.index[2]]]
result = df['2012-01-02 18:01:02']
assert_frame_equal(result,expected)
# this is a single date, so will raise
self.assertRaises(KeyError, df.__getitem__, df.index[2],)
def test_recreate_from_data(self):
if _np_version_under1p7:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N', 'C']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, periods=1)
idx = DatetimeIndex(org, freq=f)
self.assertTrue(idx.equals(org))
# unbale to create tz-aware 'A' and 'C' freq
if _np_version_under1p7:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, tz='US/Pacific', periods=1)
idx = DatetimeIndex(org, freq=f, tz='US/Pacific')
self.assertTrue(idx.equals(org))
def assert_range_equal(left, right):
assert(left.equals(right))
assert(left.freq == right.freq)
assert(left.tz == right.tz)
class TestTimeSeries(tm.TestCase):
_multiprocess_can_split_ = True
def test_is_(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
self.assertTrue(dti.is_(dti))
self.assertTrue(dti.is_(dti.view()))
self.assertFalse(dti.is_(dti.copy()))
def test_dti_slicing(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
dti2 = dti[[1, 3, 5]]
v1 = dti2[0]
v2 = dti2[1]
v3 = dti2[2]
self.assertEqual(v1, Timestamp('2/28/2005'))
self.assertEqual(v2, Timestamp('4/30/2005'))
self.assertEqual(v3, Timestamp('6/30/2005'))
# don't carry freq through irregular slicing
self.assertIsNone(dti2.freq)
def test_pass_datetimeindex_to_index(self):
# Bugs in #1396
rng = date_range('1/1/2000', '3/1/2000')
idx = Index(rng, dtype=object)
expected = Index(rng.to_pydatetime(), dtype=object)
self.assert_numpy_array_equal(idx.values, expected.values)
def test_contiguous_boolean_preserve_freq(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
mask = np.zeros(len(rng), dtype=bool)
mask[10:20] = True
masked = rng[mask]
expected = rng[10:20]
self.assertIsNotNone(expected.freq)
assert_range_equal(masked, expected)
mask[22] = True
masked = rng[mask]
self.assertIsNone(masked.freq)
def test_getitem_median_slice_bug(self):
index = date_range('20090415', '20090519', freq='2B')
s = Series(np.random.randn(13), index=index)
indexer = [slice(6, 7, None)]
result = s[indexer]
expected = s[indexer[0]]
assert_series_equal(result, expected)
def test_series_box_timestamp(self):
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng)
tm.assert_isinstance(s[5], Timestamp)
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng, index=rng)
tm.assert_isinstance(s[5], Timestamp)
tm.assert_isinstance(s.iget_value(5), Timestamp)
def test_date_range_ambiguous_arguments(self):
# #2538
start = datetime(2011, 1, 1, 5, 3, 40)
end = datetime(2011, 1, 1, 8, 9, 40)
self.assertRaises(ValueError, date_range, start, end,
freq='s', periods=10)
def test_timestamp_to_datetime(self):
_skip_if_no_pytz()
rng = date_range('20090415', '20090519',
tz='US/Eastern')
stamp = rng[0]
dtval = stamp.to_pydatetime()
self.assertEqual(stamp, dtval)
self.assertEqual(stamp.tzinfo, dtval.tzinfo)
def test_index_convert_to_datetime_array(self):
_skip_if_no_pytz()
def _check_rng(rng):
converted = rng.to_pydatetime()
tm.assert_isinstance(converted, np.ndarray)
for x, stamp in zip(converted, rng):
tm.assert_isinstance(x, datetime)
self.assertEqual(x, stamp.to_pydatetime())
self.assertEqual(x.tzinfo, stamp.tzinfo)
rng = date_range('20090415', '20090519')
rng_eastern = date_range('20090415', '20090519', tz='US/Eastern')
rng_utc = date_range('20090415', '20090519', tz='utc')
_check_rng(rng)
_check_rng(rng_eastern)
_check_rng(rng_utc)
def test_ctor_str_intraday(self):
rng = DatetimeIndex(['1-1-2000 00:00:01'])
self.assertEqual(rng[0].second, 1)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range('20090415', '20090519', freq='B')
data = dict((k, 1) for k in rng)
result = Series(data, index=rng)
self.assertIs(result.index, rng)
def test_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index)
result = result.fillna(method='bfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index, method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index, method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_setitem_timestamp(self):
# 2155
columns = DatetimeIndex(start='1/1/2012', end='2/1/2012',
freq=datetools.bday)
index = lrange(10)
data = DataFrame(columns=columns, index=index)
t = datetime(2012, 11, 1)
ts = Timestamp(t)
data[ts] = np.nan # works
def test_sparse_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
ss = s[:2].reindex(index).to_sparse()
result = ss.fillna(method='pad', limit=5)
expected = ss.fillna(method='pad', limit=5)
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
ss = s[-2:].reindex(index).to_sparse()
result = ss.fillna(method='backfill', limit=5)
expected = ss.fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
s = s.to_sparse()
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index, method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index, method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_sparse_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_pad_require_monotonicity(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
rng2 = rng[::2][::-1]
self.assertRaises(ValueError, rng2.get_indexer, rng,
method='pad')
def test_frame_ctor_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
df = DataFrame({'A': np.random.randn(len(rng)), 'B': dates})
self.assertTrue(np.issubdtype(df['B'].dtype, np.dtype('M8[ns]')))
def test_frame_add_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
df = DataFrame(index=np.arange(len(rng)))
df['A'] = rng
self.assertTrue(np.issubdtype(df['A'].dtype, np.dtype('M8[ns]')))
def test_frame_datetime64_pre1900_repr(self):
df = DataFrame({'year': date_range('1/1/1700', periods=50,
freq='A-DEC')})
# it works!
repr(df)
def test_frame_add_datetime64_col_other_units(self):
n = 100
units = ['h', 'm', 's', 'ms', 'D', 'M', 'Y']
ns_dtype = np.dtype('M8[ns]')
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df[unit] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertEqual(df[unit].dtype, ns_dtype)
self.assertTrue((df[unit].values == ex_vals).all())
# Test insertion into existing datetime64 column
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df['dates'] = np.arange(n, dtype=np.int64).view(ns_dtype)
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
tmp = df.copy()
tmp['dates'] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertTrue((tmp['dates'].values == ex_vals).all())
def test_to_datetime_unit(self):
epoch = 1370745748
s = Series([ epoch + t for t in range(20) ])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = concat([Series([ epoch + t for t in range(20) ]).astype(float),Series([np.nan])],ignore_index=True)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
def test_series_ctor_datetime64(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
series = Series(dates)
self.assertTrue(np.issubdtype(series.dtype, np.dtype('M8[ns]')))
def test_index_cast_datetime64_other_units(self):
arr = np.arange(0, 100, 10, dtype=np.int64).view('M8[D]')
idx = Index(arr)
self.assertTrue((idx.values == tslib.cast_to_nanoseconds(arr)).all())
def test_index_astype_datetime64(self):
idx = Index([datetime(2012, 1, 1)], dtype=object)
if not _np_version_under1p7:
raise nose.SkipTest("test only valid in numpy < 1.7")
casted = idx.astype(np.dtype('M8[D]'))
expected = DatetimeIndex(idx.values)
tm.assert_isinstance(casted, DatetimeIndex)
self.assertTrue(casted.equals(expected))
def test_reindex_series_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
series = Series(rng)
result = series.reindex(lrange(15))
self.assertTrue(np.issubdtype(result.dtype, np.dtype('M8[ns]')))
mask = result.isnull()
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_reindex_frame_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
df = DataFrame({'A': np.random.randn(len(rng)), 'B': rng})
result = df.reindex(lrange(15))
self.assertTrue(np.issubdtype(result['B'].dtype, np.dtype('M8[ns]')))
mask = com.isnull(result)['B']
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_series_repr_nat(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
result = repr(series)
expected = ('0 1970-01-01 00:00:00\n'
'1 1970-01-01 00:00:00.000001\n'
'2 1970-01-01 00:00:00.000002\n'
'3 NaT\n'
'dtype: datetime64[ns]')
self.assertEqual(result, expected)
def test_fillna_nat(self):
series = Series([0, 1, 2, iNaT], dtype='M8[ns]')
filled = series.fillna(method='pad')
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='pad')
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
series = Series([iNaT, 0, 1, 2], dtype='M8[ns]')
filled = series.fillna(method='bfill')
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='bfill')
filled2 = df.fillna(value=series[1])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
def test_string_na_nat_conversion(self):
# GH #999, #858
from pandas.compat import parse_date
strings = np.array(['1/1/2000', '1/2/2000', np.nan,
'1/4/2000, 12:34:56'], dtype=object)
expected = np.empty(4, dtype='M8[ns]')
for i, val in enumerate(strings):
if com.isnull(val):
expected[i] = iNaT
else:
expected[i] = parse_date(val)
result = tslib.array_to_datetime(strings)
assert_almost_equal(result, expected)
result2 = to_datetime(strings)
tm.assert_isinstance(result2, DatetimeIndex)
assert_almost_equal(result, result2)
malformed = np.array(['1/100/2000', np.nan], dtype=object)
result = to_datetime(malformed)
assert_almost_equal(result, malformed)
self.assertRaises(ValueError, to_datetime, malformed,
errors='raise')
idx = ['a', 'b', 'c', 'd', 'e']
series = Series(['1/1/2000', np.nan, '1/3/2000', np.nan,
'1/5/2000'], index=idx, name='foo')
dseries = Series([to_datetime('1/1/2000'), np.nan,
to_datetime('1/3/2000'), np.nan,
to_datetime('1/5/2000')], index=idx, name='foo')
result = to_datetime(series)
dresult = to_datetime(dseries)
expected = Series(np.empty(5, dtype='M8[ns]'), index=idx)
for i in range(5):
x = series[i]
if isnull(x):
expected[i] = iNaT
else:
expected[i] = to_datetime(x)
assert_series_equal(result, expected)
self.assertEqual(result.name, 'foo')
assert_series_equal(dresult, expected)
self.assertEqual(dresult.name, 'foo')
def test_to_datetime_iso8601(self):
result = to_datetime(["2012-01-01 00:00:00"])
exp = Timestamp("2012-01-01 00:00:00")
self.assertEqual(result[0], exp)
result = to_datetime(['20121001']) # bad iso 8601
exp = Timestamp('2012-10-01')
self.assertEqual(result[0], exp)
def test_to_datetime_default(self):
rs = to_datetime('2001')
xp = datetime(2001, 1, 1)
self.assertTrue(rs, xp)
#### dayfirst is essentially broken
#### to_datetime('01-13-2012', dayfirst=True)
#### self.assertRaises(ValueError, to_datetime('01-13-2012', dayfirst=True))
def test_to_datetime_on_datetime64_series(self):
# #2699
s = Series(date_range('1/1/2000', periods=10))
result = to_datetime(s)
self.assertEqual(result[0], s[0])
def test_to_datetime_with_apply(self):
# this is only locale tested with US/None locales
_skip_if_has_locale()
# GH 5195
# with a format and coerce a single item to_datetime fails
td = Series(['May 04', 'Jun 02', 'Dec 11'], index=[1,2,3])
expected = pd.to_datetime(td, format='%b %y')
result = td.apply(pd.to_datetime, format='%b %y')
assert_series_equal(result, expected)
td = pd.Series(['May 04', 'Jun 02', ''], index=[1,2,3])
self.assertRaises(ValueError, lambda : pd.to_datetime(td,format='%b %y'))
self.assertRaises(ValueError, lambda : td.apply(pd.to_datetime, format='%b %y'))
expected = pd.to_datetime(td, format='%b %y', coerce=True)
result = td.apply(lambda x: pd.to_datetime(x, format='%b %y', coerce=True))
assert_series_equal(result, expected)
def test_nat_vector_field_access(self):
idx = DatetimeIndex(['1/1/2000', None, None, '1/4/2000'])
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(idx, field)
expected = [getattr(x, field) if x is not NaT else -1
for x in idx]
self.assert_numpy_array_equal(result, expected)
def test_nat_scalar_field_access(self):
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(NaT, field)
self.assertEqual(result, -1)
self.assertEqual(NaT.weekday(), -1)
def test_to_datetime_types(self):
# empty string
result = to_datetime('')
self.assertIs(result, NaT)
result = to_datetime(['', ''])
self.assertTrue(isnull(result).all())
# ints
result = Timestamp(0)
expected = to_datetime(0)
self.assertEqual(result, expected)
# GH 3888 (strings)
expected = to_datetime(['2012'])[0]
result = to_datetime('2012')
self.assertEqual(result, expected)
### array = ['2012','20120101','20120101 12:01:01']
array = ['20120101','20120101 12:01:01']
expected = list(to_datetime(array))
result = lmap(Timestamp,array)
tm.assert_almost_equal(result,expected)
### currently fails ###
### result = Timestamp('2012')
### expected = to_datetime('2012')
### self.assertEqual(result, expected)
def test_to_datetime_unprocessable_input(self):
# GH 4928
self.assert_numpy_array_equal(
to_datetime([1, '1']),
np.array([1, '1'], dtype='O')
)
self.assertRaises(TypeError, to_datetime, [1, '1'], errors='raise')
def test_to_datetime_other_datetime64_units(self):
# 5/25/2012
scalar = np.int64(1337904000000000).view('M8[us]')
as_obj = scalar.astype('O')
index = DatetimeIndex([scalar])
self.assertEqual(index[0], scalar.astype('O'))
value = Timestamp(scalar)
self.assertEqual(value, as_obj)
def test_to_datetime_list_of_integers(self):
rng = date_range('1/1/2000', periods=20)
rng = DatetimeIndex(rng.values)
ints = list(rng.asi8)
result = DatetimeIndex(ints)
self.assertTrue(rng.equals(result))
def test_to_datetime_dt64s(self):
in_bound_dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
for dt in in_bound_dts:
self.assertEqual(
pd.to_datetime(dt),
Timestamp(dt)
)
oob_dts = [
np.datetime64('1000-01-01'),
np.datetime64('5000-01-02'),
]
for dt in oob_dts:
self.assertRaises(ValueError, pd.to_datetime, dt, errors='raise')
self.assertRaises(ValueError, tslib.Timestamp, dt)
self.assertIs(pd.to_datetime(dt, coerce=True), NaT)
def test_to_datetime_array_of_dt64s(self):
dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
# Assuming all datetimes are in bounds, to_datetime() returns
# an array that is equal to Timestamp() parsing
self.assert_numpy_array_equal(
pd.to_datetime(dts, box=False),
np.array([Timestamp(x).asm8 for x in dts])
)
# A list of datetimes where the last one is out of bounds
dts_with_oob = dts + [np.datetime64('9999-01-01')]
self.assertRaises(
ValueError,
pd.to_datetime,
dts_with_oob,
coerce=False,
errors='raise'
)
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=True),
np.array(
[
Timestamp(dts_with_oob[0]).asm8,
Timestamp(dts_with_oob[1]).asm8,
iNaT,
],
dtype='M8'
)
)
# With coerce=False and errors='ignore', out of bounds datetime64s
# are converted to their .item(), which depending on the version of
# numpy is either a python datetime.datetime or datetime.date
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=False),
np.array(
[dt.item() for dt in dts_with_oob],
dtype='O'
)
)
def test_index_to_datetime(self):
idx = Index(['1/1/2000', '1/2/2000', '1/3/2000'])
result = idx.to_datetime()
expected = DatetimeIndex(datetools.to_datetime(idx.values))
self.assertTrue(result.equals(expected))
today = datetime.today()
idx = Index([today], dtype=object)
result = idx.to_datetime()
expected = DatetimeIndex([today])
self.assertTrue(result.equals(expected))
def test_to_datetime_freq(self):
xp = bdate_range('2000-1-1', periods=10, tz='UTC')
rs = xp.to_datetime()
self.assertEqual(xp.freq, rs.freq)
self.assertEqual(xp.tzinfo, rs.tzinfo)
def test_range_misspecified(self):
# GH #1095
self.assertRaises(ValueError, date_range, '1/1/2000')
self.assertRaises(ValueError, date_range, end='1/1/2000')
self.assertRaises(ValueError, date_range, periods=10)
self.assertRaises(ValueError, date_range, '1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, end='1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, periods=10, freq='H')
def test_reasonable_keyerror(self):
# GH #1062
index = DatetimeIndex(['1/3/2000'])
try:
index.get_loc('1/1/2000')
except KeyError as e:
self.assertIn('2000', str(e))
def test_reindex_with_datetimes(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
result = ts.reindex(list(ts.index[5:10]))
expected = ts[5:10]
tm.assert_series_equal(result, expected)
result = ts[list(ts.index[5:10])]
tm.assert_series_equal(result, expected)
def test_promote_datetime_date(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
ts_slice = ts[5:]
ts2 = ts_slice.copy()
ts2.index = [x.date() for x in ts2.index]
result = ts + ts2
result2 = ts2 + ts
expected = ts + ts[5:]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# test asfreq
result = ts2.asfreq('4H', method='ffill')
expected = ts[5:].asfreq('4H', method='ffill')
assert_series_equal(result, expected)
result = rng.get_indexer(ts2.index)
expected = rng.get_indexer(ts_slice.index)
self.assert_numpy_array_equal(result, expected)
def test_asfreq_normalize(self):
rng = date_range('1/1/2000 09:30', periods=20)
norm = date_range('1/1/2000', periods=20)
vals = np.random.randn(20)
ts = Series(vals, index=rng)
result = ts.asfreq('D', normalize=True)
norm = date_range('1/1/2000', periods=20)
expected = Series(vals, index=norm)
assert_series_equal(result, expected)
vals = np.random.randn(20, 3)
ts = DataFrame(vals, index=rng)
result = ts.asfreq('D', normalize=True)
expected = DataFrame(vals, index=norm)
assert_frame_equal(result, expected)
def test_date_range_gen_error(self):
rng = date_range('1/1/2000 00:00', '1/1/2000 00:18', freq='5min')
self.assertEqual(len(rng), 4)
def test_first_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.first('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.first('10d')
self.assertEqual(len(result), 10)
result = ts.first('3M')
expected = ts[:'3/31/2000']
assert_series_equal(result, expected)
result = ts.first('21D')
expected = ts[:21]
assert_series_equal(result, expected)
result = ts[:0].first('3M')
assert_series_equal(result, ts[:0])
def test_last_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.last('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.last('10d')
self.assertEqual(len(result), 10)
result = ts.last('21D')
expected = ts['12/12/2009':]
assert_series_equal(result, expected)
result = ts.last('21D')
expected = ts[-21:]
assert_series_equal(result, expected)
result = ts[:0].last('3M')
assert_series_equal(result, ts[:0])
def test_add_offset(self):
rng = date_range('1/1/2000', '2/1/2000')
result = rng + offsets.Hour(2)
expected = date_range('1/1/2000 02:00', '2/1/2000 02:00')
self.assertTrue(result.equals(expected))
def test_format_pre_1900_dates(self):
rng = date_range('1/1/1850', '1/1/1950', freq='A-DEC')
rng.format()
ts = Series(1, index=rng)
repr(ts)
def test_repeat(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
def test_at_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts[time(9, 30)]
result_df = df.ix[time(9, 30)]
expected = ts[(rng.hour == 9) & (rng.minute == 30)]
exp_df = df[(rng.hour == 9) & (rng.minute == 30)]
# expected.index = date_range('1/1/2000', '1/4/2000')
assert_series_equal(result, expected)
tm.assert_frame_equal(result_df, exp_df)
chunk = df.ix['1/4/2000':]
result = chunk.ix[time(9, 30)]
expected = result_df[-1:]
tm.assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.at_time(time(0, 0))
assert_series_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = Series(np.random.randn(len(rng)), rng)
rs = ts.at_time('16:00')
self.assertEqual(len(rs), 0)
def test_at_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_frame_equal(result, expected)
result = ts.ix[time(9, 30)]
expected = ts.ix[(rng.hour == 9) & (rng.minute == 30)]
assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts.at_time(time(0, 0))
assert_frame_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = DataFrame(np.random.randn(len(rng), 2), rng)
rs = ts.at_time('16:00')
self.assertEqual(len(rs), 0)
def test_between_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue(t >= stime)
else:
self.assertTrue(t > stime)
if inc_end:
self.assertTrue(t <= etime)
else:
self.assertTrue(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_series_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue((t >= stime) or (t <= etime))
else:
self.assertTrue((t > stime) or (t <= etime))
if inc_end:
self.assertTrue((t <= etime) or (t >= stime))
else:
self.assertTrue((t < etime) or (t >= stime))
def test_between_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue(t >= stime)
else:
self.assertTrue(t > stime)
if inc_end:
self.assertTrue(t <= etime)
else:
self.assertTrue(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_frame_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue((t >= stime) or (t <= etime))
else:
self.assertTrue((t > stime) or (t <= etime))
if inc_end:
self.assertTrue((t <= etime) or (t >= stime))
else:
self.assertTrue((t < etime) or (t >= stime))
def test_dti_constructor_preserve_dti_freq(self):
rng = date_range('1/1/2000', '1/2/2000', freq='5min')
rng2 = DatetimeIndex(rng)
self.assertEqual(rng.freq, rng2.freq)
def test_normalize(self):
rng = date_range('1/1/2000 9:30', periods=10, freq='D')
result = rng.normalize()
expected = date_range('1/1/2000', periods=10, freq='D')
self.assertTrue(result.equals(expected))
rng_ns = pd.DatetimeIndex(np.array([1380585623454345752, 1380585612343234312]).astype("datetime64[ns]"))
rng_ns_normalized = rng_ns.normalize()
expected = pd.DatetimeIndex(np.array([1380585600000000000, 1380585600000000000]).astype("datetime64[ns]"))
self.assertTrue(rng_ns_normalized.equals(expected))
self.assertTrue(result.is_normalized)
self.assertFalse(rng.is_normalized)
def test_to_period(self):
from pandas.tseries.period import period_range
ts = _simple_ts('1/1/2000', '1/1/2001')
pts = ts.to_period()
exp = ts.copy()
exp.index = period_range('1/1/2000', '1/1/2001')
assert_series_equal(pts, exp)
pts = ts.to_period('M')
self.assertTrue(pts.index.equals(exp.index.asfreq('M')))
def create_dt64_based_index(self):
data = [Timestamp('2007-01-01 10:11:12.123456Z'),
Timestamp('2007-01-01 10:11:13.789123Z')]
index = DatetimeIndex(data)
return index
def test_to_period_millisecond(self):
index = self.create_dt64_based_index()
period = index.to_period(freq='L')
self.assertEqual(2, len(period))
self.assertEqual(period[0], Period('2007-01-01 10:11:12.123Z', 'L'))
self.assertEqual(period[1], Period('2007-01-01 10:11:13.789Z', 'L'))
def test_to_period_microsecond(self):
index = self.create_dt64_based_index()
period = index.to_period(freq='U')
self.assertEqual(2, len(period))
self.assertEqual(period[0], Period('2007-01-01 10:11:12.123456Z', 'U'))
self.assertEqual(period[1], Period('2007-01-01 10:11:13.789123Z', 'U'))
def test_to_period_tz(self):
_skip_if_no_pytz()
from dateutil.tz import tzlocal
from pytz import utc as UTC
xp = date_range('1/1/2000', '4/1/2000').to_period()
ts = date_range('1/1/2000', '4/1/2000', tz='US/Eastern')
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
ts = date_range('1/1/2000', '4/1/2000', tz=UTC)
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
ts = date_range('1/1/2000', '4/1/2000', tz=tzlocal())
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
def test_frame_to_period(self):
K = 5
from pandas.tseries.period import period_range
dr = date_range('1/1/2000', '1/1/2001')
pr = period_range('1/1/2000', '1/1/2001')
df = DataFrame(randn(len(dr), K), index=dr)
df['mix'] = 'a'
pts = df.to_period()
exp = df.copy()
exp.index = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M')
self.assertTrue(pts.index.equals(exp.index.asfreq('M')))
df = df.T
pts = df.to_period(axis=1)
exp = df.copy()
exp.columns = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M', axis=1)
self.assertTrue(pts.columns.equals(exp.columns.asfreq('M')))
self.assertRaises(ValueError, df.to_period, axis=2)
def test_timestamp_fields(self):
# extra fields from DatetimeIndex like quarter and week
idx = tm.makeDateIndex(100)
fields = ['dayofweek', 'dayofyear', 'week', 'weekofyear', 'quarter', 'is_month_start', 'is_month_end', 'is_quarter_start', 'is_quarter_end', 'is_year_start', 'is_year_end']
for f in fields:
expected = getattr(idx, f)[-1]
result = getattr(Timestamp(idx[-1]), f)
self.assertEqual(result, expected)
self.assertEqual(idx.freq, Timestamp(idx[-1], idx.freq).freq)
self.assertEqual(idx.freqstr, Timestamp(idx[-1], idx.freq).freqstr)
def test_woy_boundary(self):
# make sure weeks at year boundaries are correct
d = datetime(2013,12,31)
result = Timestamp(d).week
expected = 1 # ISO standard
self.assertEqual(result, expected)
d = datetime(2008,12,28)
result = Timestamp(d).week
expected = 52 # ISO standard
self.assertEqual(result, expected)
d = datetime(2009,12,31)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
d = datetime(2010,1,1)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
d = datetime(2010,1,3)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
result = np.array([Timestamp(datetime(*args)).week for args in
[(2000,1,1),(2000,1,2),(2005,1,1),(2005,1,2)]])
self.assertTrue((result == [52, 52, 53, 53]).all())
def test_timestamp_date_out_of_range(self):
self.assertRaises(ValueError, Timestamp, '1676-01-01')
self.assertRaises(ValueError, Timestamp, '2263-01-01')
# 1475
self.assertRaises(ValueError, DatetimeIndex, ['1400-01-01'])
self.assertRaises(ValueError, DatetimeIndex, [datetime(1400, 1, 1)])
def test_timestamp_repr(self):
# pre-1900
stamp = Timestamp('1850-01-01', tz='US/Eastern')
repr(stamp)
iso8601 = '1850-01-01 01:23:45.012345'
stamp = Timestamp(iso8601, tz='US/Eastern')
result = repr(stamp)
self.assertIn(iso8601, result)
def test_timestamp_from_ordinal(self):
# GH 3042
dt = datetime(2011, 4, 16, 0, 0)
ts = Timestamp.fromordinal(dt.toordinal())
self.assertEqual(ts.to_pydatetime(), dt)
# with a tzinfo
stamp = Timestamp('2011-4-16', tz='US/Eastern')
dt_tz = stamp.to_pydatetime()
ts = Timestamp.fromordinal(dt_tz.toordinal(),tz='US/Eastern')
self.assertEqual(ts.to_pydatetime(), dt_tz)
def test_datetimeindex_integers_shift(self):
rng = date_range('1/1/2000', periods=20)
result = rng + 5
expected = rng.shift(5)
self.assertTrue(result.equals(expected))
result = rng - 5
expected = rng.shift(-5)
self.assertTrue(result.equals(expected))
def test_astype_object(self):
# NumPy 1.6.1 weak ns support
rng = date_range('1/1/2000', periods=20)
casted = rng.astype('O')
exp_values = list(rng)
self.assert_numpy_array_equal(casted, exp_values)
def test_catch_infinite_loop(self):
offset = datetools.DateOffset(minute=5)
# blow up, don't loop forever
self.assertRaises(Exception, date_range, datetime(2011, 11, 11),
datetime(2011, 11, 12), freq=offset)
def test_append_concat(self):
rng = date_range('5/8/2012 1:45', periods=10, freq='5T')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
result = ts.append(ts)
result_df = df.append(df)
ex_index = DatetimeIndex(np.tile(rng.values, 2))
self.assertTrue(result.index.equals(ex_index))
self.assertTrue(result_df.index.equals(ex_index))
appended = rng.append(rng)
self.assertTrue(appended.equals(ex_index))
appended = rng.append([rng, rng])
ex_index = DatetimeIndex(np.tile(rng.values, 3))
self.assertTrue(appended.equals(ex_index))
# different index names
rng1 = rng.copy()
rng2 = rng.copy()
rng1.name = 'foo'
rng2.name = 'bar'
self.assertEqual(rng1.append(rng1).name, 'foo')
self.assertIsNone(rng1.append(rng2).name)
def test_append_concat_tz(self):
#GH 2938
_skip_if_no_pytz()
rng = date_range('5/8/2012 1:45', periods=10, freq='5T',
tz='US/Eastern')
rng2 = date_range('5/8/2012 2:35', periods=10, freq='5T',
tz='US/Eastern')
rng3 = date_range('5/8/2012 1:45', periods=20, freq='5T',
tz='US/Eastern')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
ts2 = Series(np.random.randn(len(rng2)), rng2)
df2 = DataFrame(np.random.randn(len(rng2), 4), index=rng2)
result = ts.append(ts2)
result_df = df.append(df2)
self.assertTrue(result.index.equals(rng3))
self.assertTrue(result_df.index.equals(rng3))
appended = rng.append(rng2)
self.assertTrue(appended.equals(rng3))
def test_set_dataframe_column_ns_dtype(self):
x = DataFrame([datetime.now(), datetime.now()])
self.assertEqual(x[0].dtype, np.dtype('M8[ns]'))
def test_groupby_count_dateparseerror(self):
dr = date_range(start='1/1/2012', freq='5min', periods=10)
# BAD Example, datetimes first
s = Series(np.arange(10), index=[dr, lrange(10)])
grouped = s.groupby(lambda x: x[1] % 2 == 0)
result = grouped.count()
s = Series(np.arange(10), index=[lrange(10), dr])
grouped = s.groupby(lambda x: x[0] % 2 == 0)
expected = grouped.count()
assert_series_equal(result, expected)
def test_datetimeindex_repr_short(self):
dr = date_range(start='1/1/2012', periods=1)
repr(dr)
dr = date_range(start='1/1/2012', periods=2)
repr(dr)
dr = date_range(start='1/1/2012', periods=3)
repr(dr)
def test_constructor_int64_nocopy(self):
# #1624
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr)
arr[50:100] = -1
self.assertTrue((index.asi8[50:100] == -1).all())
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr, copy=True)
arr[50:100] = -1
self.assertTrue((index.asi8[50:100] != -1).all())
def test_series_interpolate_method_values(self):
# #1646
ts = _simple_ts('1/1/2000', '1/20/2000')
ts[::2] = np.nan
result = ts.interpolate(method='values')
exp = ts.interpolate()
assert_series_equal(result, exp)
def test_frame_datetime64_handling_groupby(self):
# it works!
df = DataFrame([(3, np.datetime64('2012-07-03')),
(3, np.datetime64('2012-07-04'))],
columns=['a', 'date'])
result = df.groupby('a').first()
self.assertEqual(result['date'][3], Timestamp('2012-07-03'))
def test_series_interpolate_intraday(self):
# #1698
index = pd.date_range('1/1/2012', periods=4, freq='12D')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(days=1)).order()
exp = ts.reindex(new_index).interpolate(method='time')
index = pd.date_range('1/1/2012', periods=4, freq='12H')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(hours=1)).order()
result = ts.reindex(new_index).interpolate(method='time')
self.assert_numpy_array_equal(result.values, exp.values)
def test_frame_dict_constructor_datetime64_1680(self):
dr = date_range('1/1/2012', periods=10)
s = Series(dr, index=dr)
# it works!
DataFrame({'a': 'foo', 'b': s}, index=dr)
DataFrame({'a': 'foo', 'b': s.values}, index=dr)
def test_frame_datetime64_mixed_index_ctor_1681(self):
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
ts = Series(dr)
# it works!
d = DataFrame({'A': 'foo', 'B': ts}, index=dr)
self.assertTrue(d['B'].isnull().all())
def test_frame_timeseries_to_records(self):
index = date_range('1/1/2000', periods=10)
df = DataFrame(np.random.randn(10, 3), index=index,
columns=['a', 'b', 'c'])
result = df.to_records()
result['index'].dtype == 'M8[ns]'
result = df.to_records(index=False)
def test_frame_datetime64_duplicated(self):
dates = date_range('2010-07-01', end='2010-08-05')
tst = DataFrame({'symbol': 'AAA', 'date': dates})
result = tst.duplicated(['date', 'symbol'])
self.assertTrue((-result).all())
tst = DataFrame({'date': dates})
result = tst.duplicated()
self.assertTrue((-result).all())
def test_timestamp_compare_with_early_datetime(self):
# e.g. datetime.min
stamp = Timestamp('2012-01-01')
self.assertFalse(stamp == datetime.min)
self.assertFalse(stamp == datetime(1600, 1, 1))
self.assertFalse(stamp == datetime(2700, 1, 1))
self.assertNotEqual(stamp, datetime.min)
self.assertNotEqual(stamp, datetime(1600, 1, 1))
self.assertNotEqual(stamp, datetime(2700, 1, 1))
self.assertTrue(stamp > datetime(1600, 1, 1))
self.assertTrue(stamp >= datetime(1600, 1, 1))
self.assertTrue(stamp < datetime(2700, 1, 1))
self.assertTrue(stamp <= datetime(2700, 1, 1))
def test_to_html_timestamp(self):
rng = date_range('2000-01-01', periods=10)
df = DataFrame(np.random.randn(10, 4), index=rng)
result = df.to_html()
self.assertIn('2000-01-01', result)
def test_to_csv_numpy_16_bug(self):
frame = DataFrame({'a': date_range('1/1/2000', periods=10)})
buf = StringIO()
frame.to_csv(buf)
result = buf.getvalue()
self.assertIn('2000-01-01', result)
def test_series_map_box_timestamps(self):
# #2689, #2627
s = Series(date_range('1/1/2000', periods=10))
def f(x):
return (x.hour, x.day, x.month)
# it works!
s.map(f)
s.apply(f)
DataFrame(s).applymap(f)
def test_concat_datetime_datetime64_frame(self):
# #2624
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 'hi'])
df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])
ind = date_range(start="2000/1/1", freq="D", periods=10)
df1 = DataFrame({'date': ind, 'test':lrange(10)})
# it works!
pd.concat([df1, df2_obj])
def test_period_resample(self):
# GH3609
s = Series(range(100),index=date_range('20130101', freq='s', periods=100), dtype='float')
s[10:30] = np.nan
expected = Series([34.5, 79.5], index=[Period('2013-01-01 00:00', 'T'), Period('2013-01-01 00:01', 'T')])
result = s.to_period().resample('T', kind='period')
assert_series_equal(result, expected)
result2 = s.resample('T', kind='period')
assert_series_equal(result2, expected)
def test_period_resample_with_local_timezone(self):
# GH5430
_skip_if_no_pytz()
import pytz
local_timezone = pytz.timezone('America/Los_Angeles')
start = datetime(year=2013, month=11, day=1, hour=0, minute=0, tzinfo=pytz.utc)
# 1 day later
end = datetime(year=2013, month=11, day=2, hour=0, minute=0, tzinfo=pytz.utc)
index = pd.date_range(start, end, freq='H')
series = pd.Series(1, index=index)
series = series.tz_convert(local_timezone)
result = series.resample('D', kind='period')
# Create the expected series
expected_index = (pd.period_range(start=start, end=end, freq='D') - 1) # Index is moved back a day with the timezone conversion from UTC to Pacific
expected = pd.Series(1, index=expected_index)
assert_series_equal(result, expected)
def test_pickle(self):
#GH4606
from pandas.compat import cPickle
import pickle
for pick in [pickle, cPickle]:
p = pick.loads(pick.dumps(NaT))
self.assertTrue(p is NaT)
idx = pd.to_datetime(['2013-01-01', NaT, '2014-01-06'])
idx_p = pick.loads(pick.dumps(idx))
self.assertTrue(idx_p[0] == idx[0])
self.assertTrue(idx_p[1] is NaT)
self.assertTrue(idx_p[2] == idx[2])
def _simple_ts(start, end, freq='D'):
rng = date_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
class TestDatetimeIndex(tm.TestCase):
_multiprocess_can_split_ = True
def test_hash_error(self):
index = date_range('20010101', periods=10)
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(index).__name__):
hash(index)
def test_stringified_slice_with_tz(self):
#GH2658
import datetime
start=datetime.datetime.now()
idx=DatetimeIndex(start=start,freq="1d",periods=10)
df=DataFrame(lrange(10),index=idx)
df["2013-01-14 23:44:34.437768-05:00":] # no exception here
def test_append_join_nondatetimeindex(self):
rng = date_range('1/1/2000', periods=10)
idx = Index(['a', 'b', 'c', 'd'])
result = rng.append(idx)
tm.assert_isinstance(result[0], Timestamp)
# it works
rng.join(idx, how='outer')
def test_astype(self):
rng = date_range('1/1/2000', periods=10)
result = rng.astype('i8')
self.assert_numpy_array_equal(result, rng.asi8)
def test_to_period_nofreq(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-04'])
self.assertRaises(ValueError, idx.to_period)
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'],
freq='infer')
idx.to_period()
def test_000constructor_resolution(self):
# 2252
t1 = Timestamp((1352934390 * 1000000000) + 1000000 + 1000 + 1)
idx = DatetimeIndex([t1])
self.assertEqual(idx.nanosecond[0], t1.nanosecond)
def test_constructor_coverage(self):
rng = date_range('1/1/2000', periods=10.5)
exp = date_range('1/1/2000', periods=10)
self.assertTrue(rng.equals(exp))
self.assertRaises(ValueError, DatetimeIndex, start='1/1/2000',
periods='foo', freq='D')
self.assertRaises(ValueError, DatetimeIndex, start='1/1/2000',
end='1/10/2000')
self.assertRaises(ValueError, DatetimeIndex, '1/1/2000')
# generator expression
gen = (datetime(2000, 1, 1) + timedelta(i) for i in range(10))
result = DatetimeIndex(gen)
expected = DatetimeIndex([datetime(2000, 1, 1) + timedelta(i)
for i in range(10)])
self.assertTrue(result.equals(expected))
# NumPy string array
strings = np.array(['2000-01-01', '2000-01-02', '2000-01-03'])
result = DatetimeIndex(strings)
expected = DatetimeIndex(strings.astype('O'))
self.assertTrue(result.equals(expected))
from_ints = DatetimeIndex(expected.asi8)
self.assertTrue(from_ints.equals(expected))
# non-conforming
self.assertRaises(ValueError, DatetimeIndex,
['2000-01-01', '2000-01-02', '2000-01-04'],
freq='D')
self.assertRaises(ValueError, DatetimeIndex,
start='2011-01-01', freq='b')
self.assertRaises(ValueError, DatetimeIndex,
end='2011-01-01', freq='B')
self.assertRaises(ValueError, DatetimeIndex, periods=10, freq='D')
def test_constructor_name(self):
idx = DatetimeIndex(start='2000-01-01', periods=1, freq='A',
name='TEST')
self.assertEqual(idx.name, 'TEST')
def test_comparisons_coverage(self):
rng = date_range('1/1/2000', periods=10)
# raise TypeError for now
self.assertRaises(TypeError, rng.__lt__, rng[3].value)
result = rng == list(rng)
exp = rng == rng
self.assert_numpy_array_equal(result, exp)
def test_map(self):
rng = date_range('1/1/2000', periods=10)
f = lambda x: x.strftime('%Y%m%d')
result = rng.map(f)
exp = [f(x) for x in rng]
self.assert_numpy_array_equal(result, exp)
def test_add_union(self):
rng = date_range('1/1/2000', periods=5)
rng2 = date_range('1/6/2000', periods=5)
result = rng + rng2
expected = rng.union(rng2)
self.assertTrue(result.equals(expected))
def test_misc_coverage(self):
rng = date_range('1/1/2000', periods=5)
result = rng.groupby(rng.day)
tm.assert_isinstance(list(result.values())[0][0], Timestamp)
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
self.assertTrue(idx.equals(list(idx)))
non_datetime = Index(list('abc'))
self.assertFalse(idx.equals(list(non_datetime)))
def test_union_coverage(self):
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
ordered = DatetimeIndex(idx.order(), freq='infer')
result = ordered.union(idx)
self.assertTrue(result.equals(ordered))
result = ordered[:0].union(ordered)
self.assertTrue(result.equals(ordered))
self.assertEqual(result.freq, ordered.freq)
def test_union_bug_1730(self):
rng_a = date_range('1/1/2012', periods=4, freq='3H')
rng_b = date_range('1/1/2012', periods=4, freq='4H')
result = rng_a.union(rng_b)
exp = DatetimeIndex(sorted(set(list(rng_a)) | set(list(rng_b))))
self.assertTrue(result.equals(exp))
def test_union_bug_1745(self):
left = DatetimeIndex(['2012-05-11 15:19:49.695000'])
right = DatetimeIndex(['2012-05-29 13:04:21.322000',
'2012-05-11 15:27:24.873000',
'2012-05-11 15:31:05.350000'])
result = left.union(right)
exp = DatetimeIndex(sorted(set(list(left)) | set(list(right))))
self.assertTrue(result.equals(exp))
def test_union_bug_4564(self):
from pandas import DateOffset
left = date_range("2013-01-01", "2013-02-01")
right = left + DateOffset(minutes=15)
result = left.union(right)
exp = DatetimeIndex(sorted(set(list(left)) | set(list(right))))
self.assertTrue(result.equals(exp))
def test_intersection_bug_1708(self):
from pandas import DateOffset
index_1 = date_range('1/1/2012', periods=4, freq='12H')
index_2 = index_1 + DateOffset(hours=1)
result = index_1 & index_2
self.assertEqual(len(result), 0)
# def test_add_timedelta64(self):
# rng = date_range('1/1/2000', periods=5)
# delta = rng.values[3] - rng.values[1]
# result = rng + delta
# expected = rng + timedelta(2)
# self.assertTrue(result.equals(expected))
def test_get_duplicates(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-02',
'2000-01-03', '2000-01-03', '2000-01-04'])
result = idx.get_duplicates()
ex = DatetimeIndex(['2000-01-02', '2000-01-03'])
self.assertTrue(result.equals(ex))
def test_argmin_argmax(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
self.assertEqual(idx.argmin(), 1)
self.assertEqual(idx.argmax(), 0)
def test_order(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
ordered = idx.order()
self.assertTrue(ordered.is_monotonic)
ordered = idx.order(ascending=False)
self.assertTrue(ordered[::-1].is_monotonic)
ordered, dexer = idx.order(return_indexer=True)
self.assertTrue(ordered.is_monotonic)
self.assert_numpy_array_equal(dexer, [1, 2, 0])
ordered, dexer = idx.order(return_indexer=True, ascending=False)
self.assertTrue(ordered[::-1].is_monotonic)
self.assert_numpy_array_equal(dexer, [0, 2, 1])
def test_insert(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
result = idx.insert(2, datetime(2000, 1, 5))
exp = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-05',
'2000-01-02'])
self.assertTrue(result.equals(exp))
# insertion of non-datetime should coerce to object index
result = idx.insert(1, 'inserted')
expected = Index([datetime(2000, 1, 4), 'inserted', datetime(2000, 1, 1),
datetime(2000, 1, 2)])
self.assertNotIsInstance(result, DatetimeIndex)
tm.assert_index_equal(result, expected)
idx = date_range('1/1/2000', periods=3, freq='M')
result = idx.insert(3, datetime(2000, 4, 30))
self.assertEqual(result.freqstr, 'M')
def test_map_bug_1677(self):
index = DatetimeIndex(['2012-04-25 09:30:00.393000'])
f = index.asof
result = index.map(f)
expected = np.array([f(index[0])])
self.assert_numpy_array_equal(result, expected)
def test_groupby_function_tuple_1677(self):
df = DataFrame(np.random.rand(100),
index=date_range("1/1/2000", periods=100))
monthly_group = df.groupby(lambda x: (x.year, x.month))
result = monthly_group.mean()
tm.assert_isinstance(result.index[0], tuple)
def test_append_numpy_bug_1681(self):
# another datetime64 bug
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
a = DataFrame()
c = DataFrame({'A': 'foo', 'B': dr}, index=dr)
result = a.append(c)
self.assertTrue((result['B'] == dr).all())
def test_isin(self):
index = tm.makeDateIndex(4)
result = index.isin(index)
self.assertTrue(result.all())
result = index.isin(list(index))
self.assertTrue(result.all())
assert_almost_equal(index.isin([index[2], 5]),
[False, False, True, False])
def test_union(self):
i1 = Int64Index(np.arange(0, 20, 2))
i2 = Int64Index(np.arange(10, 30, 2))
result = i1.union(i2)
expected = Int64Index(np.arange(0, 30, 2))
self.assert_numpy_array_equal(result, expected)
def test_union_with_DatetimeIndex(self):
i1 = Int64Index(np.arange(0, 20, 2))
i2 = DatetimeIndex(start='2012-01-03 00:00:00', periods=10, freq='D')
i1.union(i2) # Works
i2.union(i1) # Fails with "AttributeError: can't set attribute"
def test_time(self):
rng = pd.date_range('1/1/2000', freq='12min', periods=10)
result = pd.Index(rng).time
expected = [t.time() for t in rng]
self.assertTrue((result == expected).all())
def test_date(self):
rng = | pd.date_range('1/1/2000', freq='12H', periods=10) | pandas.date_range |
import os
import sys
path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(path)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "prs_project.settings")
import django
django.setup()
import pickle
import logging
import pandas as pd
from sklearn import linear_model
from sklearn.model_selection import train_test_split
from analytics.models import Rating
from builder.item_similarity_calculator import ItemSimilarityMatrixBuilder
from builder.lda_model_calculator import LdaModel
from recs.content_based_recommender import ContentBasedRecs
from recs.neighborhood_based_recommender import NeighborhoodBasedRecs
from recs.fwls_recommender import FeatureWeightedLinearStacking
def ensure_dir(file_path):
directory = os.path.dirname(file_path)
if not os.path.exists(directory):
os.makedirs(directory)
class FWLSCalculator(object):
def __init__(self, save_path, data_size=1000):
self.save_path = save_path
self.logger = logging.getLogger('FWLS')
self.train_data = None
self.test_data = None
self.rating_count = None
self.cb = ContentBasedRecs()
self.cf = NeighborhoodBasedRecs()
self.fwls = FeatureWeightedLinearStacking()
self.data_size = data_size
def get_real_training_data(self):
columns = ['user_id', 'movie_id', 'rating', 'type']
ratings_data = Rating.objects.all().values(*columns)[:self.data_size]
df = | pd.DataFrame.from_records(ratings_data, columns=columns) | pandas.DataFrame.from_records |
import pandas as pd
from modules import tqdm
import argparse
import codecs
import os
def conll2003_preprocess(
data_dir, train_name="eng.train", dev_name="eng.testa", test_name="eng.testb"):
train_f = read_data(os.path.join(data_dir, train_name))
dev_f = read_data(os.path.join(data_dir, dev_name))
test_f = read_data(os.path.join(data_dir, test_name))
train = pd.DataFrame({"labels": [x[0] for x in train_f], "text": [x[1] for x in train_f]})
train["cls"] = train["labels"].apply(lambda x: all([y.split("_")[0] == "O" for y in x.split()]))
train.to_csv(os.path.join(data_dir, "{}.train.csv".format(train_name)), index=False, sep="\t")
dev = | pd.DataFrame({"labels": [x[0] for x in dev_f], "text": [x[1] for x in dev_f]}) | pandas.DataFrame |
__author__ = 'thorwhalen'
"""
Includes various adwords elements diagnosis functions
"""
#from ut.util.var import my_to_list as to_list, my_to_list
from numpy.lib import arraysetops
from numpy import array
from numpy import argmax
import pandas as pd
from ut.util.ulist import ascertain_list
import ut.util.var as util_var
from operator import eq, lt, le, gt, ge
operator_strings = dict()
operator_strings[eq] = 'are equal'
operator_strings[lt] = 'are lower than'
operator_strings[le] = 'are at most'
operator_strings[gt] = 'are greater than'
operator_strings[ge] = 'are at least'
operator_sym = dict()
operator_sym[eq] = '='
operator_sym[lt] = '<'
operator_sym[le] = '<='
operator_sym[gt] = '>'
operator_sym[ge] = '>='
def diag_df(df):
df = df.reset_index(drop=True) # added this 150613 because problems with obj and str indices
cols = df.columns
t = list()
for c in cols:
lidx = df[c].notnull()
x = df[c].iloc[argmax(lidx)]
if x == '':
x = df[c].iloc[argmax(lidx & (array(df[c]) != ''))]
item = {'column': c,
'type': type(x).__name__,
'non_null_value': x}
try:
item['num_uniques'] = df[c].nunique()
except Exception:
item['num_uniques'] = None
try:
item['num_nonzero'] = len(df[c].nonzero()[0])
except Exception:
item['num_nonzero'] = None
try:
item['num_nonnan'] = len(df[c].dropna())
except Exception:
item['num_nonnan'] = None
t.append(item)
return | pd.DataFrame(t) | pandas.DataFrame |
from json import load
from matplotlib.pyplot import title
from database.database import DbClient
from discord import Embed
import pandas as pd
from util.data import load_data
class Analytics:
def __init__(self, server_id: str, db):
self.server_id = server_id
self.db = db
@staticmethod
def no_data_embed(topic: str) -> Embed:
"""CREATE AN EMBED IF NO DATA WAS COLLECTED"""
embed = Embed(title="SORRY", description=f"Sorry, but there were no `{topic}` data collected on this server!")
return embed
async def analyze_message(self):
"""ANALYZE THE MESSAGE DATA"""
data = await load_data(self.db, self.server_id)
data = data["message"]
if len(data) == 0:
return self.no_data_embed("message")
# ANALYZE THE DATA:
df = pd.DataFrame(data)
channelid_counts = pd.value_counts(df["channelid"])
role_counts = pd.value_counts(df["roles"])
df["timestamp"] = pd.to_datetime(df["timestamp"])
df["hours"] = df["timestamp"].dt.hour
df["weekday"] = df["timestamp"].dt.day_name()
hours_count = pd.value_counts(df["hours"])
weekday_count = pd.value_counts(df["weekday"])
embed_title = "Message ~ Analytics"
embeds = [
Embed(title=embed_title, description="Here you can see the analyzed message data"),
Embed(title=embed_title, description="Message counted in channels:\n"f"```{channelid_counts}```"),
Embed(title=embed_title, description="Message send from roles:\n"f"```{role_counts}```"),
Embed(title=embed_title, description="Message counted in which hours:\n"f"```{hours_count}```"),
Embed(title=embed_title, description="Message counted on which weekday:\n"f"```{weekday_count}```")
]
return embeds
async def analyze_message_delete(self):
"""ANALYZE MESSAGE DELETE"""
data = await load_data(self.db, self.server_id)
data = data["message_delete"]
if len(data) == 0:
return self.no_data_embed("message delete")
# ANALYZE THE DATA
df = pd.DataFrame(data)
role_counts = pd.value_counts(df["roles"])
channelid_counts = pd.value_counts(df["channelid"])
df["timestamp"] = | pd.to_datetime(df["timestamp"]) | pandas.to_datetime |
'''
The analysis module
Handles the analyses of the info and data space for experiment evaluation and design.
'''
from slm_lab.agent import AGENT_DATA_NAMES
from slm_lab.env import ENV_DATA_NAMES
from slm_lab.lib import logger, math_util, util, viz
from slm_lab.spec import spec_util
import numpy as np
import os
import pandas as pd
import pydash as ps
import regex as re
import shutil
FITNESS_COLS = ['strength', 'speed', 'stability', 'consistency']
# TODO improve to make it work with any reward mean
FITNESS_STD = util.read('slm_lab/spec/_fitness_std.json')
NOISE_WINDOW = 0.05
NORM_ORDER = 1 # use L1 norm in fitness vector norm
MA_WINDOW = 100
logger = logger.get_logger(__name__)
'''
Fitness analysis
'''
def calc_strength_sr(aeb_df, rand_reward, std_reward):
'''
Calculate strength for each reward as
strength = (reward - rand_reward) / (std_reward - rand_reward)
'''
return (aeb_df['reward'] - rand_reward) / (std_reward - rand_reward)
def calc_strength(aeb_df):
'''
Strength of an agent in fitness is its maximum strength_ma. Moving average is used to denoise signal.
For an agent total reward at a time, calculate strength by normalizing it with a given baseline rand_reward and solution std_reward, i.e.
strength = (reward - rand_reward) / (std_reward - rand_reward)
**Properties:**
- random agent has strength 0, standard agent has strength 1.
- strength is standardized to be independent of the actual sign and scale of raw reward
- scales relative to std_reward: if an agent achieve x2 std_reward, the strength is x2, and so on.
This allows for standard comparison between agents on the same problem using an intuitive measurement of strength. With proper scaling by a difficulty factor, we can compare across problems of different difficulties.
'''
return aeb_df['strength_ma'].max()
def calc_speed(aeb_df, std_timestep):
'''
Find the maximum strength_ma, and the time to first reach it. Then the strength/time divided by the standard std_strength/std_timestep is speed, i.e.
speed = (max_strength_ma / timestep_to_first_reach) / (std_strength / std_timestep)
**Properties:**
- random agent has speed 0, standard agent has speed 1.
- if both agents reach the same max strength_ma, and one reaches it in half the timesteps, it is twice as fast.
- speed is standardized regardless of the scaling of absolute timesteps, or even the max strength attained
This allows an intuitive measurement of learning speed and the standard comparison between agents on the same problem.
'''
first_max_idx = aeb_df['strength_ma'].idxmax() # this returns the first max
max_row = aeb_df.loc[first_max_idx]
std_strength = 1.
if max_row['total_t'] == 0: # especially for random agent
speed = 0.
else:
speed = (max_row['strength_ma'] / max_row['total_t']) / (std_strength / std_timestep)
return speed
def calc_stability(aeb_df):
'''
Stability = fraction of monotonically increasing elements in the denoised series of strength_ma, or 0 if strength_ma is all <= 0.
**Properties:**
- stable agent has value 1, unstable agent < 1, and non-solution = 0.
- uses strength_ma to be more robust to noise
- sharp gain in strength is considered stable
- monotonically increasing implies strength can keep growing and as long as it does not fall much, it is considered stable
'''
if (aeb_df['strength_ma'].values <= 0.).all():
stability = 0.
else:
mono_inc_sr = np.diff(aeb_df['strength_ma']) >= 0.
stability = mono_inc_sr.sum() / mono_inc_sr.size
return stability
def calc_consistency(aeb_fitness_df):
'''
Calculate the consistency of trial by the fitness_vectors of its sessions:
consistency = ratio of non-outlier vectors
**Properties:**
- outliers are calculated using MAD modified z-score
- if all the fitness vectors are zero or all strength are zero, consistency = 0
- works for all sorts of session fitness vectors, with the standard scale
When an agent fails to achieve standard strength, it is meaningless to measure consistency or give false interpolation, so consistency is 0.
'''
fitness_vecs = aeb_fitness_df.values
if ~np.any(fitness_vecs) or ~np.any(aeb_fitness_df['strength']):
# no consistency if vectors all 0
consistency = 0.
elif len(fitness_vecs) == 2:
# if only has 2 vectors, check norm_diff
diff_norm = np.linalg.norm(np.diff(fitness_vecs, axis=0), NORM_ORDER) / np.linalg.norm(np.ones(len(fitness_vecs[0])), NORM_ORDER)
consistency = diff_norm <= NOISE_WINDOW
else:
is_outlier_arr = math_util.is_outlier(fitness_vecs)
consistency = (~is_outlier_arr).sum() / len(is_outlier_arr)
return consistency
def calc_epi_reward_ma(aeb_df, ckpt=None):
'''Calculates the episode reward moving average with the MA_WINDOW'''
rewards = aeb_df['reward']
if ckpt == 'eval':
# online eval mode reward is reward_ma from avg
aeb_df['reward_ma'] = rewards
else:
aeb_df['reward_ma'] = rewards.rolling(window=MA_WINDOW, min_periods=0, center=False).mean()
return aeb_df
def calc_fitness(fitness_vec):
'''
Takes a vector of qualifying standardized dimensions of fitness and compute the normalized length as fitness
use L1 norm for simplicity and intuititveness of linearity
'''
if isinstance(fitness_vec, pd.Series):
fitness_vec = fitness_vec.values
elif isinstance(fitness_vec, pd.DataFrame):
fitness_vec = fitness_vec.iloc[0].values
std_fitness_vector = np.ones(len(fitness_vec))
fitness = np.linalg.norm(fitness_vec, NORM_ORDER) / np.linalg.norm(std_fitness_vector, NORM_ORDER)
return fitness
def calc_aeb_fitness_sr(aeb_df, env_name):
'''Top level method to calculate fitness vector for AEB level data (strength, speed, stability)'''
std = FITNESS_STD.get(env_name)
if std is None:
std = FITNESS_STD.get('template')
logger.warn(f'The fitness standard for env {env_name} is not built yet. Contact author. Using a template standard for now.')
# calculate the strength sr and the moving-average (to denoise) first before calculating fitness
aeb_df['strength'] = calc_strength_sr(aeb_df, std['rand_epi_reward'], std['std_epi_reward'])
aeb_df['strength_ma'] = aeb_df['strength'].rolling(MA_WINDOW, min_periods=0, center=False).mean()
strength = calc_strength(aeb_df)
speed = calc_speed(aeb_df, std['std_timestep'])
stability = calc_stability(aeb_df)
aeb_fitness_sr = pd.Series({
'strength': strength, 'speed': speed, 'stability': stability})
return aeb_fitness_sr
'''
Checkpoint and early termination analysis
'''
def get_reward_mas(agent, name='eval_reward_ma'):
'''Return array of the named reward_ma for all of an agent's bodies.'''
bodies = getattr(agent, 'nanflat_body_a', [agent.body])
return np.array([getattr(body, name) for body in bodies], dtype=np.float16)
def get_std_epi_rewards(agent):
'''Return array of std_epi_reward for each of the environments.'''
bodies = getattr(agent, 'nanflat_body_a', [agent.body])
return np.array([ps.get(FITNESS_STD, f'{body.env.name}.std_epi_reward') for body in bodies], dtype=np.float16)
def new_best(agent):
'''Check if algorithm is now the new best result, then update the new best'''
best_reward_mas = get_reward_mas(agent, 'best_reward_ma')
eval_reward_mas = get_reward_mas(agent, 'eval_reward_ma')
best = (eval_reward_mas >= best_reward_mas).all()
if best:
bodies = getattr(agent, 'nanflat_body_a', [agent.body])
for body in bodies:
body.best_reward_ma = body.eval_reward_ma
return best
def all_solved(agent):
'''Check if envs have all been solved using std from slm_lab/spec/_fitness_std.json'''
eval_reward_mas = get_reward_mas(agent, 'eval_reward_ma')
std_epi_rewards = get_std_epi_rewards(agent)
solved = (
not np.isnan(std_epi_rewards).any() and
(eval_reward_mas >= std_epi_rewards).all()
)
return solved
def is_unfit(fitness_df, session):
'''Check if a fitness_df is unfit. Used to determine of trial should stop running more sessions'''
if FITNESS_STD.get(session.spec['env'][0]['name']) is None:
return False # fitness not known
mean_fitness_df = calc_mean_fitness(fitness_df)
return mean_fitness_df['strength'].iloc[0] <= NOISE_WINDOW
'''
Analysis interface methods
'''
def save_spec(spec, info_space, unit='experiment'):
'''Save spec to proper path. Called at Experiment or Trial init.'''
prepath = util.get_prepath(spec, info_space, unit)
util.write(spec, f'{prepath}_spec.json')
def calc_mean_fitness(fitness_df):
'''Method to calculated mean over all bodies for a fitness_df'''
return fitness_df.mean(axis=1, level=3)
def get_session_data(session, body_df_kind='eval', tmp_space_session_sub=False):
'''
Gather data from session from all the bodies
Depending on body_df_kind, will use eval_df or train_df
'''
session_data = {}
for aeb, body in util.ndenumerate_nonan(session.aeb_space.body_space.data):
aeb_df = body.eval_df if body_df_kind == 'eval' else body.train_df
# TODO tmp substitution since SpaceSession does not have run_eval_episode yet
if tmp_space_session_sub:
aeb_df = body.train_df
session_data[aeb] = aeb_df.copy()
return session_data
def calc_session_fitness_df(session, session_data):
'''Calculate the session fitness df'''
session_fitness_data = {}
for aeb in session_data:
aeb_df = session_data[aeb]
aeb_df = calc_epi_reward_ma(aeb_df, ps.get(session.info_space, 'ckpt'))
util.downcast_float32(aeb_df)
body = session.aeb_space.body_space.data[aeb]
aeb_fitness_sr = calc_aeb_fitness_sr(aeb_df, body.env.name)
aeb_fitness_df = pd.DataFrame([aeb_fitness_sr], index=[session.index])
aeb_fitness_df = aeb_fitness_df.reindex(FITNESS_COLS[:3], axis=1)
session_fitness_data[aeb] = aeb_fitness_df
# form multi_index df, then take mean across all bodies
session_fitness_df = | pd.concat(session_fitness_data, axis=1) | pandas.concat |
from sodapy import Socrata
import geopandas
import pandas as pd
from dateutil.relativedelta import relativedelta
from datetime import timedelta, date
import numpy as np
from flask import Flask, send_from_directory
import csv
import json
#Directory for data files
ASSET_DIR = './Asset'
app = Flask(__name__, static_url_path='', static_folder='D3_Visualization')
with open(ASSET_DIR + '/wards.geojson', 'r') as f:
wardsDict = json.load(f)
#Home endpoint
@app.route('/')
def home():
return app.send_static_file('index.html')
#Border of wards endpoint
@app.route('/wards')
def getWards():
return wardsDict
#Crime endpoint
@app.route('/crimes')
def getCrimes():
dict = getUpdatedCrimeData()
#print(dict)
return dict
def getUpdatedCrimeData():
# Unauthenticated client only works with public data sets. Note 'None'
# in place of application token, and no username or password:
client = Socrata("data.cityofchicago.org", None)
# Example authenticated client (needed for non-public datasets):
# client = Socrata(data.cityofchicago.org,
# MyAppToken,
# userame="<EMAIL>",
# password="<PASSWORD>")
# First 2000 results, returned as JSON from API / converted to Python list of
# dictionaries by sodapy.
results = client.get("ijzp-q8t2", order="date DESC", limit=70000)
# Convert to pandas DataFrame
results_df = pd.DataFrame.from_records(results)
results_df = results_df[results_df.primary_type.isin(
results_df.primary_type.value_counts()[:6].index)]
test_df = results_df
xbound = (-87.9361, -87.5245)
ybound = (41.6447, 42.023)
test_df = test_df[test_df.latitude.notna()].sort_values([
'date'], ascending=[0])
test_df['date'] = pd.to_datetime(test_df['date'])
test_df['updated_on'] = | pd.to_datetime(test_df['updated_on']) | pandas.to_datetime |
#!/usr/bin/env python
#----------------------------------------------------------------------#
'''
A module to analyze token trends on the BSC blockchain.
This is very much a work in progress.
'''
#----------------------------------------------------------------------#
# System Module Imports
import os
import sys
import datetime
import configparser
# Additional Module Imports
import tqdm
import pandas as pd
import requests
# Local Imports
#----------------------------------------------------------------------#
# Read in my API keys from a config file
config = configparser.ConfigParser()
config.read(os.path.join(os.getenv('HOME'), '.config', 'api_keys.ini'))
#----------------------------------------------------------------------#
# BITQUERY API
#----------------------------------------------------------------------#
url_bitquery = 'https://graphql.bitquery.io'
#----------------------------------------------------------------------#
def run_query(query): # A simple function to use requests.post to make the API call.
headers = {'X-API-KEY': config['bitquery']['key']}
request = requests.post(url_bitquery,
json={'query': query}, headers=headers)
if request.status_code == 200:
return request.json()
else:
raise Exception('Query failed and return code is {}. {}'.format(request.status_code, query))
#----------------------------------------------------------------------#
def q_pancake_recent_daily(start):
return '''{
ethereum(network: bsc) {
dexTrades(
options: {limit: 10000, desc: "trades"}
date: {since: "%s"}
exchangeName: {in: ["Pancake", "Pancake v2"]}
quoteCurrency: {is: "0xbb4cdb9cbd36b01bd1cbaebf2de08d9173bc095c"}
) {
timeInterval {
day(count: 1)
}
baseCurrency {
symbol
address
}
baseAmount
quoteCurrency {
symbol
address
}
quoteAmount
trades: count
quotePrice
open_price: minimum(of: block, get: quote_price)
high_price: quotePrice(calculate: maximum)
low_price: quotePrice(calculate: minimum)
close_price: maximum(of: block, get: quote_price)
}
}
}
''' % (start,)
#----------------------------------------------------------------------#
def q_ohlc_periods(
address,
start,
period= 'minute',
periods_per_candle= 1,
limit_candles= None,
quote_address= '0xbb4cdb9cbd36b01bd1cbaebf2de08d9173bc095c'):
'Construct a query to obtain OHLC data for a given address.'
# Apply the limit if one was given
limit = (limit_candles is not None) and f'options: {{limit: {limit_candles}, asc: "timeInterval.{period}"}}' or ''
# Now construct and return the query
return '''{
ethereum(network: bsc) {
dexTrades(%s
date: {since: "%s"}
exchangeName: {in: ["Pancake", "Pancake v2"]}
baseCurrency: {is: "%s"}
quoteCurrency: {is: "%s"}
) {
timeInterval {
%s(count: %s)
}
baseCurrency {
symbol
address
}
trades: count
open_price: minimum(of: block, get: quote_price)
high_price: quotePrice(calculate: maximum)
low_price: quotePrice(calculate: minimum)
close_price: maximum(of: block, get: quote_price)
}
}
}
''' % (limit, start, address, quote_address, period, periods_per_candle)
#----------------------------------------------------------------------#
def q_tokens_created(start_time, end_time):
return '''{
ethereum(network: bsc) {
smartContractCalls(
options: {asc: "block.height", limit: 2147483647}
smartContractMethod: {is: "Contract Creation"}
smartContractType: {is: Token}
time: {after: "%s", before: "%s"}
) {
transaction {
hash
}
block {
height
timestamp {
iso8601
}
}
smartContract {
contractType
address {
address
annotation
}
currency {
name
symbol
decimals
tokenType
}
}
caller {
address
}
}
}
}
''' % (start_time, end_time)
#----------------------------------------------------------------------#
def get_recent_tokens(from_days_ago= 5, to_days_ago= 4):
'Find all tokens registered within a given time period.'
# Construct the query
now = datetime.datetime.now()
start = now - datetime.timedelta(days=from_days_ago)
end = now - datetime.timedelta(days= to_days_ago)
query = q_tokens_created(start.isoformat(), end.isoformat())
# Now run the query
result = run_query(query)
# Basic error handling
if 'errors' in result:
raise RuntimeError(f'ERROR: New tokens query failed with {result["errors"]}')
# Collect info on each new token
new_tokens = [
{
'created' : datetime.datetime.fromisoformat(record['block']['timestamp']['iso8601'].rstrip('Z')),
'owner' : record['caller']['address'],
'address' : record['smartContract']['address']['address'],
'decimals' : record['smartContract']['currency']['decimals'],
'name' : record['smartContract']['currency']['name'],
'symbol' : record['smartContract']['currency']['symbol'],
'tokenType' : record['smartContract']['currency']['tokenType'],
}
for record in result['data']['ethereum']['smartContractCalls']
]
return new_tokens
#----------------------------------------------------------------------#
def float_nan(value):
if value is None:
return float('nan')
return float(value)
#----------------------------------------------------------------------#
def get_ohlc(address, start_time, period= 'minute', periods_per_candle= 1, limit_candles= 24*60, quote_address= '0xbb4cdb9cbd36b01bd1cbaebf2de08d9173bc095c'):
'Obtain OHLC data on an address.'
# Construct and run a query to get OHLC data
query = q_ohlc_periods(address, start_time, period, periods_per_candle, limit_candles, quote_address)
result = run_query(query)
# Basic error handling
if 'errors' in result:
raise RuntimeError(f'ERROR: OHLC query ({address}, {start_time}, {period}, {periods_per_candle}, {limit_candles}) failed with {result["errors"]}')
trades = result['data']['ethereum']['dexTrades']
times = [pd.Timestamp(trade['timeInterval']['minute']) for trade in trades]
ohlc = [
(
float(trade['open_price']),
(trade['high_price'] is None) and max(float(trade['open_price']),float(trade['close_price'])) or float(trade['high_price']),
(trade['low_price'] is None) and min(float(trade['open_price']),float(trade['close_price'])) or float(trade['low_price' ]),
float(trade['close_price']),
int(trade['trades']),
)
for trade in trades
]
ohlc_df = pd.DataFrame(ohlc, columns= ['open', 'high', 'low', 'close', 'trades'], index= times)
return ohlc_df
#----------------------------------------------------------------------#
class OHLCData(dict):
'''
A class that obtains OHLC data from whatever source, can save/load as JSON, and can update on demand.
It calculates a list of statistical indicators. Supported indicator types are:
ema : exponential moving average - alpha is controlled by number of periods in window
crossover : abs=# periods since the last time val-a went up over val-b, sign=current comparison
'''
start_date = None
data = None
token_address = '<KEY>'
quote_address = '0xe9e7cea3dedca5984780bafc599bd69add087d56'
def __init__(self, token_address= None, quote_address= None, start_date= '2022-02-10', today= False):
self.today = today and 1 or 0
if token_address is not None:
self.token_address = token_address
if quote_address is not None:
self.quote_address = quote_address
self.start_date = start_date
self.otherdata = {}
self.load()
self.retrieve()
return
def __len__(self):
return self.data is not None and len(self.data) or 0
def __contains__(self, key):
if self.data is None:
return False
try:
return len(self.data.loc[key]) > 0
except:
pass
return False
def __getitem__(self, key):
if self.data is None:
raise IndexError('Empty data')
if isinstance(key, slice):
try:
return self.data[key]
except:
pass
try:
return self.data.loc[key]
except:
pass
raise IndexError(f'Unable to process slice [{key}]')
if key in self.data:
return self.data[key]
if key in self.data.index:
return self.data.loc[key]
raise IndexError(f'Unable to process query [{key}]')
def __repr__(self):
return f'OHLCData({repr(self.data)})'
def __str__(self):
return str(self.data)
def save(self, verbose= True):
'Save OHLC data and stats to a file.'
if self.data is None:
return
try:
self.data.to_pickle(f'ohlc_{self.token_address}_{self.quote_address}.pickle'.lower())
if verbose:
print(f'Saved {int(len(self.data) / 1440)} days of OHLC to storage file')
except Exception as err:
print(f'Unable to save storage file: {err}')
return
def load(self, verbose= True):
'Load OHLC data and stats from a file.'
try:
self.data = pd.read_pickle(f'ohlc_{self.token_address}_{self.quote_address}.pickle'.lower())
if verbose:
print(f'Loaded {int(len(self.data) / 1440)} days of OHLC from storage file')
except Exception as err:
print(f'Unable to load storage file: {err}')
return
def retrieve(self):
'Retrieve any missing data, and calculate stats over all data.'
# Figure out what dates we will loop over
date = datetime.date.fromisoformat(self.start_date)
day = datetime.timedelta(days= 1)
now = datetime.datetime.now()
today = now.date()
n_days = self.today + int((today - date) / day)
n_pulled = 0
n_saved = 0
# Include any existing data we may have
if self.data is not None:
frames = [self.data]
else:
frames = []
print('Retrieving data:')
dates = tqdm.tqdm(range(n_days))
for ii in dates:
# Pull each day worth of OHLC from the server
isodate = date.isoformat()
dates.set_description(f'OHLC data [{isodate}] pulled={n_pulled:4} saved={n_saved:4} ')
if isodate not in self or today == date:
frames.append(get_ohlc(self.token_address, isodate, 'minute', 1, 24*60, self.quote_address))
n_pulled += 1
if n_pulled > 27:
self.data = pd.concat(frames)
self.save(False)
frames = [self.data]
n_saved += n_pulled
n_pulled = 0
date += day
# Save the result
if frames:
self.data = pd.concat(frames)
self.save()
return
#----------------------------------------------------------------------#
class OHLCStats(object):
def __init__(self, ohlc_data, ema_spans= [10, 20, 60, 120]):
self.spans = ema_spans
self.data = ohlc_data
self.emas = | pd.DataFrame(index=self.data.index) | pandas.DataFrame |
# LIBRARIES
# set up backend for ssh -x11 figures
import matplotlib
matplotlib.use('Agg')
# read and write
import os
import sys
import glob
import re
import fnmatch
import csv
import shutil
from datetime import datetime
# maths
import numpy as np
import pandas as pd
import math
import random
# miscellaneous
import warnings
import gc
import timeit
# sklearn
from sklearn.utils import resample
from sklearn.preprocessing import OneHotEncoder
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score, log_loss, roc_auc_score, \
accuracy_score, f1_score, precision_score, recall_score, confusion_matrix, average_precision_score
from sklearn.utils.validation import check_is_fitted
from sklearn.model_selection import KFold, PredefinedSplit, cross_validate
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LinearRegression, ElasticNet
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.preprocessing import StandardScaler
# Statistics
from scipy.stats import pearsonr, ttest_rel, norm
# Other tools for ensemble models building (<NAME>'s InnerCV class)
from hyperopt import fmin, tpe, space_eval, Trials, hp, STATUS_OK
from xgboost import XGBRegressor
from lightgbm import LGBMRegressor
# CPUs
from multiprocessing import Pool
# GPUs
from GPUtil import GPUtil
# tensorflow
import tensorflow as tf
# keras
from keras_preprocessing.image import ImageDataGenerator, Iterator
from keras_preprocessing.image.utils import load_img, img_to_array, array_to_img
from tensorflow.keras.utils import Sequence
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Flatten, Dense, Dropout, GlobalAveragePooling2D, concatenate
from tensorflow.keras import regularizers
from tensorflow.keras.optimizers import Adam, RMSprop, Adadelta
from tensorflow.keras.callbacks import Callback, EarlyStopping, ReduceLROnPlateau, ModelCheckpoint, CSVLogger
from tensorflow.keras.losses import MeanSquaredError, BinaryCrossentropy
from tensorflow.keras.metrics import RootMeanSquaredError, MeanAbsoluteError, AUC, BinaryAccuracy, Precision, Recall, \
TruePositives, FalsePositives, FalseNegatives, TrueNegatives
from tensorflow_addons.metrics import RSquare, F1Score
# Plots
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from PIL import Image
from bioinfokit import visuz
# Model's attention
from keract import get_activations, get_gradients_of_activations
from scipy.ndimage.interpolation import zoom
# Survival
from lifelines.utils import concordance_index
# Necessary to define MyCSVLogger
import collections
import csv
import io
import six
from tensorflow.python.lib.io import file_io
from tensorflow.python.util.compat import collections_abc
from tensorflow.keras.backend import eval
# Set display parameters
pd.set_option('display.max_rows', 200)
# CLASSES
class Basics:
"""
Root class herited by most other class. Includes handy helper functions
"""
def __init__(self):
# seeds for reproducibility
self.seed = 0
os.environ['PYTHONHASHSEED'] = str(self.seed)
np.random.seed(self.seed)
random.seed(self.seed)
# other parameters
self.path_data = '../data/'
self.folds = ['train', 'val', 'test']
self.n_CV_outer_folds = 10
self.outer_folds = [str(x) for x in list(range(self.n_CV_outer_folds))]
self.modes = ['', '_sd', '_str']
self.id_vars = ['id', 'eid', 'instance', 'outer_fold']
self.instances = ['0', '1', '1.5', '1.51', '1.52', '1.53', '1.54', '2', '3']
self.ethnicities_vars_forgot_Other = \
['Ethnicity.White', 'Ethnicity.British', 'Ethnicity.Irish', 'Ethnicity.White_Other', 'Ethnicity.Mixed',
'Ethnicity.White_and_Black_Caribbean', 'Ethnicity.White_and_Black_African', 'Ethnicity.White_and_Asian',
'Ethnicity.Mixed_Other', 'Ethnicity.Asian', 'Ethnicity.Indian', 'Ethnicity.Pakistani',
'Ethnicity.Bangladeshi', 'Ethnicity.Asian_Other', 'Ethnicity.Black', 'Ethnicity.Caribbean',
'Ethnicity.African', 'Ethnicity.Black_Other', 'Ethnicity.Chinese', 'Ethnicity.Other_ethnicity',
'Ethnicity.Do_not_know', 'Ethnicity.Prefer_not_to_answer', 'Ethnicity.NA']
self.ethnicities_vars = \
['Ethnicity.White', 'Ethnicity.British', 'Ethnicity.Irish', 'Ethnicity.White_Other', 'Ethnicity.Mixed',
'Ethnicity.White_and_Black_Caribbean', 'Ethnicity.White_and_Black_African', 'Ethnicity.White_and_Asian',
'Ethnicity.Mixed_Other', 'Ethnicity.Asian', 'Ethnicity.Indian', 'Ethnicity.Pakistani',
'Ethnicity.Bangladeshi', 'Ethnicity.Asian_Other', 'Ethnicity.Black', 'Ethnicity.Caribbean',
'Ethnicity.African', 'Ethnicity.Black_Other', 'Ethnicity.Chinese', 'Ethnicity.Other',
'Ethnicity.Other_ethnicity', 'Ethnicity.Do_not_know', 'Ethnicity.Prefer_not_to_answer', 'Ethnicity.NA']
self.demographic_vars = ['Age', 'Sex'] + self.ethnicities_vars
self.names_model_parameters = ['target', 'organ', 'view', 'transformation', 'architecture', 'n_fc_layers',
'n_fc_nodes', 'optimizer', 'learning_rate', 'weight_decay', 'dropout_rate',
'data_augmentation_factor']
self.targets_regression = ['Age']
self.targets_binary = ['Sex']
self.models_types = ['', '_bestmodels']
self.dict_prediction_types = {'Age': 'regression', 'Sex': 'binary'}
self.dict_side_predictors = {'Age': ['Sex'] + self.ethnicities_vars_forgot_Other,
'Sex': ['Age'] + self.ethnicities_vars_forgot_Other}
self.organs = ['Brain', 'Eyes', 'Arterial', 'Heart', 'Abdomen', 'Musculoskeletal']
self.left_right_organs_views = ['Eyes_Fundus', 'Eyes_OCT', 'Arterial_Carotids', 'Musculoskeletal_Hips',
'Musculoskeletal_Knees']
self.dict_organs_to_views = {'Brain': ['MRI'],
'Eyes': ['Fundus', 'OCT'],
'Arterial': ['Carotids'],
'Heart': ['MRI'],
'Abdomen': ['Liver', 'Pancreas'],
'Musculoskeletal': ['Spine', 'Hips', 'Knees', 'FullBody'],
'PhysicalActivity': ['FullWeek']}
self.dict_organsviews_to_transformations = \
{'Brain_MRI': ['SagittalRaw', 'SagittalReference', 'CoronalRaw', 'CoronalReference', 'TransverseRaw',
'TransverseReference'],
'Arterial_Carotids': ['Mixed', 'LongAxis', 'CIMT120', 'CIMT150', 'ShortAxis'],
'Heart_MRI': ['2chambersRaw', '2chambersContrast', '3chambersRaw', '3chambersContrast', '4chambersRaw',
'4chambersContrast'],
'Musculoskeletal_Spine': ['Sagittal', 'Coronal'],
'Musculoskeletal_FullBody': ['Mixed', 'Figure', 'Skeleton', 'Flesh'],
'PhysicalActivity_FullWeek': ['GramianAngularField1minDifference', 'GramianAngularField1minSummation',
'MarkovTransitionField1min', 'RecurrencePlots1min']}
self.dict_organsviews_to_transformations.update(dict.fromkeys(['Eyes_Fundus', 'Eyes_OCT'], ['Raw']))
self.dict_organsviews_to_transformations.update(
dict.fromkeys(['Abdomen_Liver', 'Abdomen_Pancreas'], ['Raw', 'Contrast']))
self.dict_organsviews_to_transformations.update(
dict.fromkeys(['Musculoskeletal_Hips', 'Musculoskeletal_Knees'], ['MRI']))
self.organsviews_not_to_augment = []
self.organs_instances23 = ['Brain', 'Eyes', 'Arterial', 'Heart', 'Abdomen', 'Musculoskeletal',
'PhysicalActivity']
self.organs_XWAS = \
['*', '*instances01', '*instances1.5x', '*instances23', 'Brain', 'BrainCognitive', 'BrainMRI', 'Eyes',
'EyesFundus', 'EyesOCT', 'Hearing', 'Lungs', 'Arterial', 'ArterialPulseWaveAnalysis', 'ArterialCarotids',
'Heart', 'HeartECG', 'HeartMRI', 'Abdomen', 'AbdomenLiver', 'AbdomenPancreas', 'Musculoskeletal',
'MusculoskeletalSpine', 'MusculoskeletalHips', 'MusculoskeletalKnees', 'MusculoskeletalFullBody',
'MusculoskeletalScalars', 'PhysicalActivity', 'Biochemistry', 'BiochemistryUrine', 'BiochemistryBlood',
'ImmuneSystem']
# Others
if '/Users/Alan/' in os.getcwd():
os.chdir('/Users/Alan/Desktop/Aging/Medical_Images/scripts/')
else:
os.chdir('/n/groups/patel/Alan/Aging/Medical_Images/scripts/')
gc.enable() # garbage collector
warnings.filterwarnings('ignore')
def _version_to_parameters(self, model_name):
parameters = {}
parameters_list = model_name.split('_')
for i, parameter in enumerate(self.names_model_parameters):
parameters[parameter] = parameters_list[i]
if len(parameters_list) > 11:
parameters['outer_fold'] = parameters_list[11]
return parameters
@staticmethod
def _parameters_to_version(parameters):
return '_'.join(parameters.values())
@staticmethod
def convert_string_to_boolean(string):
if string == 'True':
boolean = True
elif string == 'False':
boolean = False
else:
print('ERROR: string must be either \'True\' or \'False\'')
sys.exit(1)
return boolean
class Metrics(Basics):
"""
Helper class defining dictionaries of metrics and custom metrics
"""
def __init__(self):
# Parameters
Basics.__init__(self)
self.metrics_displayed_in_int = ['True-Positives', 'True-Negatives', 'False-Positives', 'False-Negatives']
self.metrics_needing_classpred = ['F1-Score', 'Binary-Accuracy', 'Precision', 'Recall']
self.dict_metrics_names_K = {'regression': ['RMSE'], # For now, R-Square is buggy. Try again in a few months.
'binary': ['ROC-AUC', 'PR-AUC', 'F1-Score', 'Binary-Accuracy', 'Precision',
'Recall', 'True-Positives', 'False-Positives', 'False-Negatives',
'True-Negatives'],
'multiclass': ['Categorical-Accuracy']}
self.dict_metrics_names = {'regression': ['RMSE', 'MAE', 'R-Squared', 'Pearson-Correlation'],
'binary': ['ROC-AUC', 'F1-Score', 'PR-AUC', 'Binary-Accuracy', 'Sensitivity',
'Specificity', 'Precision', 'Recall', 'True-Positives', 'False-Positives',
'False-Negatives', 'True-Negatives'],
'multiclass': ['Categorical-Accuracy']}
self.dict_losses_names = {'regression': 'MSE', 'binary': 'Binary-Crossentropy',
'multiclass': 'categorical_crossentropy'}
self.dict_main_metrics_names_K = {'Age': 'MAE', 'Sex': 'PR-AUC', 'imbalanced_binary_placeholder': 'PR-AUC'}
self.dict_main_metrics_names = {'Age': 'R-Squared', 'Sex': 'ROC-AUC',
'imbalanced_binary_placeholder': 'PR-AUC'}
self.main_metrics_modes = {'loss': 'min', 'R-Squared': 'max', 'Pearson-Correlation': 'max', 'RMSE': 'min',
'MAE': 'min', 'ROC-AUC': 'max', 'PR-AUC': 'max', 'F1-Score': 'max', 'C-Index': 'max',
'C-Index-difference': 'max'}
self.n_bootstrap_iterations = 1000
def rmse(y_true, y_pred):
return math.sqrt(mean_squared_error(y_true, y_pred))
def sensitivity_score(y, pred):
_, _, fn, tp = confusion_matrix(y, pred.round()).ravel()
return tp / (tp + fn)
def specificity_score(y, pred):
tn, fp, _, _ = confusion_matrix(y, pred.round()).ravel()
return tn / (tn + fp)
def true_positives_score(y, pred):
_, _, _, tp = confusion_matrix(y, pred.round()).ravel()
return tp
def false_positives_score(y, pred):
_, fp, _, _ = confusion_matrix(y, pred.round()).ravel()
return fp
def false_negatives_score(y, pred):
_, _, fn, _ = confusion_matrix(y, pred.round()).ravel()
return fn
def true_negatives_score(y, pred):
tn, _, _, _ = confusion_matrix(y, pred.round()).ravel()
return tn
self.dict_metrics_sklearn = {'mean_squared_error': mean_squared_error,
'mean_absolute_error': mean_absolute_error,
'RMSE': rmse,
'Pearson-Correlation': pearsonr,
'R-Squared': r2_score,
'Binary-Crossentropy': log_loss,
'ROC-AUC': roc_auc_score,
'F1-Score': f1_score,
'PR-AUC': average_precision_score,
'Binary-Accuracy': accuracy_score,
'Sensitivity': sensitivity_score,
'Specificity': specificity_score,
'Precision': precision_score,
'Recall': recall_score,
'True-Positives': true_positives_score,
'False-Positives': false_positives_score,
'False-Negatives': false_negatives_score,
'True-Negatives': true_negatives_score}
def _bootstrap(self, data, function):
results = []
for i in range(self.n_bootstrap_iterations):
data_i = resample(data, replace=True, n_samples=len(data.index))
results.append(function(data_i['y'], data_i['pred']))
return np.mean(results), np.std(results)
class PreprocessingMain(Basics):
"""
This class executes the code for step 01. It preprocesses the main dataframe by:
- reformating the rows and columns
- splitting the dataset into folds for the future cross validations
- imputing key missing data
- adding a new UKB instance for physical activity data
- formating the demographics columns (age, sex and ethnicity)
- reformating the dataframe so that different instances of the same participant are treated as different rows
- saving the dataframe
"""
def __init__(self):
Basics.__init__(self)
self.data_raw = None
self.data_features = None
self.data_features_eids = None
def _add_outer_folds(self):
outer_folds_split = pd.read_csv(self.path_data + 'All_eids.csv')
outer_folds_split.rename(columns={'fold': 'outer_fold'}, inplace=True)
outer_folds_split['eid'] = outer_folds_split['eid'].astype('str')
outer_folds_split['outer_fold'] = outer_folds_split['outer_fold'].astype('str')
outer_folds_split.set_index('eid', inplace=True)
self.data_raw = self.data_raw.join(outer_folds_split)
def _impute_missing_ecg_instances(self):
data_ecgs = pd.read_csv('/n/groups/patel/Alan/Aging/TimeSeries/scripts/age_analysis/missing_samples.csv')
data_ecgs['eid'] = data_ecgs['eid'].astype(str)
data_ecgs['instance'] = data_ecgs['instance'].astype(str)
for _, row in data_ecgs.iterrows():
self.data_raw.loc[row['eid'], 'Date_attended_center_' + row['instance']] = row['observation_date']
def _add_physicalactivity_instances(self):
data_pa = pd.read_csv(
'/n/groups/patel/Alan/Aging/TimeSeries/series/PhysicalActivity/90001/features/PA_visit_date.csv')
data_pa['eid'] = data_pa['eid'].astype(str)
data_pa.set_index('eid', drop=False, inplace=True)
data_pa.index.name = 'column_names'
self.data_raw = self.data_raw.merge(data_pa, on=['eid'], how='outer')
self.data_raw.set_index('eid', drop=False, inplace=True)
def _compute_sex(self):
# Use genetic sex when available
self.data_raw['Sex_genetic'][self.data_raw['Sex_genetic'].isna()] = \
self.data_raw['Sex'][self.data_raw['Sex_genetic'].isna()]
self.data_raw.drop(['Sex'], axis=1, inplace=True)
self.data_raw.rename(columns={'Sex_genetic': 'Sex'}, inplace=True)
self.data_raw.dropna(subset=['Sex'], inplace=True)
def _compute_age(self):
# Recompute age with greater precision by leveraging the month of birth
self.data_raw['Year_of_birth'] = self.data_raw['Year_of_birth'].astype(int)
self.data_raw['Month_of_birth'] = self.data_raw['Month_of_birth'].astype(int)
self.data_raw['Date_of_birth'] = self.data_raw.apply(
lambda row: datetime(row.Year_of_birth, row.Month_of_birth, 15), axis=1)
for i in self.instances:
self.data_raw['Date_attended_center_' + i] = \
self.data_raw['Date_attended_center_' + i].apply(
lambda x: pd.NaT if pd.isna(x) else datetime.strptime(x, '%Y-%m-%d'))
self.data_raw['Age_' + i] = self.data_raw['Date_attended_center_' + i] - self.data_raw['Date_of_birth']
self.data_raw['Age_' + i] = self.data_raw['Age_' + i].dt.days / 365.25
self.data_raw.drop(['Date_attended_center_' + i], axis=1, inplace=True)
self.data_raw.drop(['Year_of_birth', 'Month_of_birth', 'Date_of_birth'], axis=1, inplace=True)
self.data_raw.dropna(how='all', subset=['Age_0', 'Age_1', 'Age_1.5', 'Age_1.51', 'Age_1.52', 'Age_1.53',
'Age_1.54', 'Age_2', 'Age_3'], inplace=True)
def _encode_ethnicity(self):
# Fill NAs for ethnicity on instance 0 if available in other instances
eids_missing_ethnicity = self.data_raw['eid'][self.data_raw['Ethnicity'].isna()]
for eid in eids_missing_ethnicity:
sample = self.data_raw.loc[eid, :]
if not math.isnan(sample['Ethnicity_1']):
self.data_raw.loc[eid, 'Ethnicity'] = self.data_raw.loc[eid, 'Ethnicity_1']
elif not math.isnan(sample['Ethnicity_2']):
self.data_raw.loc[eid, 'Ethnicity'] = self.data_raw.loc[eid, 'Ethnicity_2']
self.data_raw.drop(['Ethnicity_1', 'Ethnicity_2'], axis=1, inplace=True)
# One hot encode ethnicity
dict_ethnicity_codes = {'1': 'Ethnicity.White', '1001': 'Ethnicity.British', '1002': 'Ethnicity.Irish',
'1003': 'Ethnicity.White_Other',
'2': 'Ethnicity.Mixed', '2001': 'Ethnicity.White_and_Black_Caribbean',
'2002': 'Ethnicity.White_and_Black_African',
'2003': 'Ethnicity.White_and_Asian', '2004': 'Ethnicity.Mixed_Other',
'3': 'Ethnicity.Asian', '3001': 'Ethnicity.Indian', '3002': 'Ethnicity.Pakistani',
'3003': 'Ethnicity.Bangladeshi', '3004': 'Ethnicity.Asian_Other',
'4': 'Ethnicity.Black', '4001': 'Ethnicity.Caribbean', '4002': 'Ethnicity.African',
'4003': 'Ethnicity.Black_Other',
'5': 'Ethnicity.Chinese',
'6': 'Ethnicity.Other_ethnicity',
'-1': 'Ethnicity.Do_not_know',
'-3': 'Ethnicity.Prefer_not_to_answer',
'-5': 'Ethnicity.NA'}
self.data_raw['Ethnicity'] = self.data_raw['Ethnicity'].fillna(-5).astype(int).astype(str)
ethnicities = pd.get_dummies(self.data_raw['Ethnicity'])
self.data_raw.drop(['Ethnicity'], axis=1, inplace=True)
ethnicities.rename(columns=dict_ethnicity_codes, inplace=True)
ethnicities['Ethnicity.White'] = ethnicities['Ethnicity.White'] + ethnicities['Ethnicity.British'] + \
ethnicities['Ethnicity.Irish'] + ethnicities['Ethnicity.White_Other']
ethnicities['Ethnicity.Mixed'] = ethnicities['Ethnicity.Mixed'] + \
ethnicities['Ethnicity.White_and_Black_Caribbean'] + \
ethnicities['Ethnicity.White_and_Black_African'] + \
ethnicities['Ethnicity.White_and_Asian'] + \
ethnicities['Ethnicity.Mixed_Other']
ethnicities['Ethnicity.Asian'] = ethnicities['Ethnicity.Asian'] + ethnicities['Ethnicity.Indian'] + \
ethnicities['Ethnicity.Pakistani'] + ethnicities['Ethnicity.Bangladeshi'] + \
ethnicities['Ethnicity.Asian_Other']
ethnicities['Ethnicity.Black'] = ethnicities['Ethnicity.Black'] + ethnicities['Ethnicity.Caribbean'] + \
ethnicities['Ethnicity.African'] + ethnicities['Ethnicity.Black_Other']
ethnicities['Ethnicity.Other'] = ethnicities['Ethnicity.Other_ethnicity'] + \
ethnicities['Ethnicity.Do_not_know'] + \
ethnicities['Ethnicity.Prefer_not_to_answer'] + \
ethnicities['Ethnicity.NA']
self.data_raw = self.data_raw.join(ethnicities)
def generate_data(self):
# Preprocessing
dict_UKB_fields_to_names = {'34-0.0': 'Year_of_birth', '52-0.0': 'Month_of_birth',
'53-0.0': 'Date_attended_center_0', '53-1.0': 'Date_attended_center_1',
'53-2.0': 'Date_attended_center_2', '53-3.0': 'Date_attended_center_3',
'31-0.0': 'Sex', '22001-0.0': 'Sex_genetic', '21000-0.0': 'Ethnicity',
'21000-1.0': 'Ethnicity_1', '21000-2.0': 'Ethnicity_2',
'22414-2.0': 'Abdominal_images_quality'}
self.data_raw = pd.read_csv('/n/groups/patel/uk_biobank/project_52887_41230/ukb41230.csv',
usecols=['eid', '31-0.0', '22001-0.0', '21000-0.0', '21000-1.0', '21000-2.0',
'34-0.0', '52-0.0', '53-0.0', '53-1.0', '53-2.0', '53-3.0', '22414-2.0'])
# Formatting
self.data_raw.rename(columns=dict_UKB_fields_to_names, inplace=True)
self.data_raw['eid'] = self.data_raw['eid'].astype(str)
self.data_raw.set_index('eid', drop=False, inplace=True)
self.data_raw.index.name = 'column_names'
self._add_outer_folds()
self._impute_missing_ecg_instances()
self._add_physicalactivity_instances()
self._compute_sex()
self._compute_age()
self._encode_ethnicity()
# Concatenate the data from the different instances
self.data_features = None
for i in self.instances:
print('Preparing the samples for instance ' + i)
df_i = self.data_raw[['eid', 'outer_fold', 'Age_' + i, 'Sex'] + self.ethnicities_vars +
['Abdominal_images_quality']].dropna(subset=['Age_' + i])
print(str(len(df_i.index)) + ' samples found in instance ' + i)
df_i.rename(columns={'Age_' + i: 'Age'}, inplace=True)
df_i['instance'] = i
df_i['id'] = df_i['eid'] + '_' + df_i['instance']
df_i = df_i[self.id_vars + self.demographic_vars + ['Abdominal_images_quality']]
if i != '2':
df_i['Abdominal_images_quality'] = np.nan # not defined for instance 3, not relevant for instances 0, 1
if self.data_features is None:
self.data_features = df_i
else:
self.data_features = self.data_features.append(df_i)
print('The size of the full concatenated dataframe is now ' + str(len(self.data_features.index)))
# Save age as a float32 instead of float64
self.data_features['Age'] = np.float32(self.data_features['Age'])
# Shuffle the rows before saving the dataframe
self.data_features = self.data_features.sample(frac=1)
# Generate dataframe for eids pipeline as opposed to instances pipeline
self.data_features_eids = self.data_features[self.data_features.instance == '0']
self.data_features_eids['instance'] = '*'
self.data_features_eids['id'] = [ID.replace('_0', '_*') for ID in self.data_features_eids['id'].values]
def save_data(self):
self.data_features.to_csv(self.path_data + 'data-features_instances.csv', index=False)
self.data_features_eids.to_csv(self.path_data + 'data-features_eids.csv', index=False)
class PreprocessingImagesIDs(Basics):
"""
Splits the different images datasets into folds for the future cross validation
"""
def __init__(self):
Basics.__init__(self)
# Instances 2 and 3 datasets (most medical images, mostly medical images)
self.instances23_eids = None
self.HEART_EIDs = None
self.heart_eids = None
self.FOLDS_23_EIDS = None
def _load_23_eids(self):
data_features = pd.read_csv(self.path_data + 'data-features_instances.csv')
images_eids = data_features['eid'][data_features['instance'].isin([2, 3])]
self.images_eids = list(set(images_eids))
def _load_heart_eids(self):
# IDs already used in Heart videos
HEART_EIDS = {}
heart_eids = []
for i in range(10):
# Important: The i's data fold is used as *validation* fold for outer fold i.
data_i = pd.read_csv(
"/n/groups/patel/JbProst/Heart/Data/FoldsAugmented/data-features_Heart_20208_Augmented_Age_val_" + str(
i) + ".csv")
HEART_EIDS[i] = list(set([int(str(ID)[:7]) for ID in data_i['eid']]))
heart_eids = heart_eids + HEART_EIDS[i]
self.HEART_EIDS = HEART_EIDS
self.heart_eids = heart_eids
def _split_23_eids_folds(self):
self._load_23_eids()
self._load_heart_eids()
# List extra images ids, and split them between the different folds.
extra_eids = [eid for eid in self.images_eids if eid not in self.heart_eids]
random.shuffle(extra_eids)
n_samples = len(extra_eids)
n_samples_by_fold = n_samples / self.n_CV_outer_folds
FOLDS_EXTRAEIDS = {}
FOLDS_EIDS = {}
for outer_fold in self.outer_folds:
FOLDS_EXTRAEIDS[outer_fold] = \
extra_eids[int((int(outer_fold)) * n_samples_by_fold):int((int(outer_fold) + 1) * n_samples_by_fold)]
FOLDS_EIDS[outer_fold] = self.HEART_EIDS[int(outer_fold)] + FOLDS_EXTRAEIDS[outer_fold]
self.FOLDS_23_EIDS = FOLDS_EIDS
def _save_23_eids_folds(self):
for outer_fold in self.outer_folds:
with open(self.path_data + 'instances23_eids_' + outer_fold + '.csv', 'w', newline='') as myfile:
wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
wr.writerow(self.FOLDS_23_EIDS[outer_fold])
def generate_eids_splits(self):
print("Generating eids split for organs on instances 2 and 3")
self._split_23_eids_folds()
self._save_23_eids_folds()
class PreprocessingFolds(Metrics):
"""
Splits the data into training, validation and testing sets for all CV folds
"""
def __init__(self, target, organ, regenerate_data):
Metrics.__init__(self)
self.target = target
self.organ = organ
self.list_ids_per_view_transformation = None
# Check if these folds have already been generated
if not regenerate_data:
if len(glob.glob(self.path_data + 'data-features_' + organ + '_*_' + target + '_*.csv')) > 0:
print("Error: The files already exist! Either change regenerate_data to True or delete the previous"
" version.")
sys.exit(1)
self.side_predictors = self.dict_side_predictors[target]
self.variables_to_normalize = self.side_predictors
if target in self.targets_regression:
self.variables_to_normalize.append(target)
self.dict_image_quality_col = {'Liver': 'Abdominal_images_quality'}
self.dict_image_quality_col.update(
dict.fromkeys(['Brain', 'Eyes', 'Arterial', 'Heart', 'Abdomen', 'Musculoskeletal', 'PhysicalActivity'],
None))
self.image_quality_col = self.dict_image_quality_col[organ]
self.views = self.dict_organs_to_views[organ]
self.list_ids = None
self.list_ids_per_view = {}
self.data = None
self.EIDS = None
self.EIDS_per_view = {'train': {}, 'val': {}, 'test': {}}
self.data_fold = None
def _get_list_ids(self):
self.list_ids_per_view_transformation = {}
list_ids = []
# if different views are available, take the union of the ids
for view in self.views:
self.list_ids_per_view_transformation[view] = {}
for transformation in self.dict_organsviews_to_transformations[self.organ + '_' + view]:
list_ids_transformation = []
path = '../images/' + self.organ + '/' + view + '/' + transformation + '/'
# for paired organs, take the unions of the ids available on the right and the left sides
if self.organ + '_' + view in self.left_right_organs_views:
for side in ['right', 'left']:
list_ids_transformation += os.listdir(path + side + '/')
list_ids_transformation = np.unique(list_ids_transformation).tolist()
else:
list_ids_transformation += os.listdir(path)
self.list_ids_per_view_transformation[view][transformation] = \
[im.replace('.jpg', '') for im in list_ids_transformation]
list_ids += self.list_ids_per_view_transformation[view][transformation]
self.list_ids = np.unique(list_ids).tolist()
self.list_ids.sort()
def _filter_and_format_data(self):
"""
Clean the data before it can be split between the rows
"""
cols_data = self.id_vars + self.demographic_vars
if self.image_quality_col is not None:
cols_data.append(self.dict_image_quality_col[self.organ])
data = pd.read_csv(self.path_data + 'data-features_instances.csv', usecols=cols_data)
data.rename(columns={self.dict_image_quality_col[self.organ]: 'Data_quality'}, inplace=True)
for col_name in self.id_vars:
data[col_name] = data[col_name].astype(str)
data.set_index('id', drop=False, inplace=True)
if self.image_quality_col is not None:
data = data[data['Data_quality'] != np.nan]
data.drop('Data_quality', axis=1, inplace=True)
# get rid of samples with NAs
data.dropna(inplace=True)
# list the samples' ids for which images are available
data = data.loc[self.list_ids]
self.data = data
def _split_data(self):
# Generate the data for each outer_fold
for i, outer_fold in enumerate(self.outer_folds):
of_val = outer_fold
of_test = str((int(outer_fold) + 1) % len(self.outer_folds))
DATA = {
'train': self.data[~self.data['outer_fold'].isin([of_val, of_test])],
'val': self.data[self.data['outer_fold'] == of_val],
'test': self.data[self.data['outer_fold'] == of_test]
}
# Generate the data for the different views and transformations
for view in self.views:
for transformation in self.dict_organsviews_to_transformations[self.organ + '_' + view]:
print('Splitting data for view ' + view + ', and transformation ' + transformation)
DF = {}
for fold in self.folds:
idx = DATA[fold]['id'].isin(self.list_ids_per_view_transformation[view][transformation]).values
DF[fold] = DATA[fold].iloc[idx, :]
# compute values for scaling of variables
normalizing_values = {}
for var in self.variables_to_normalize:
var_mean = DF['train'][var].mean()
if len(DF['train'][var].unique()) < 2:
print('Variable ' + var + ' has a single value in fold ' + outer_fold +
'. Using 1 as std for normalization.')
var_std = 1
else:
var_std = DF['train'][var].std()
normalizing_values[var] = {'mean': var_mean, 'std': var_std}
# normalize the variables
for fold in self.folds:
for var in self.variables_to_normalize:
DF[fold][var + '_raw'] = DF[fold][var]
DF[fold][var] = (DF[fold][var] - normalizing_values[var]['mean']) \
/ normalizing_values[var]['std']
# report issue if NAs were detected (most likely comes from a sample whose id did not match)
n_mismatching_samples = DF[fold].isna().sum().max()
if n_mismatching_samples > 0:
print(DF[fold][DF[fold].isna().any(axis=1)])
print('/!\\ WARNING! ' + str(n_mismatching_samples) + ' ' + fold + ' images ids out of ' +
str(len(DF[fold].index)) + ' did not match the dataframe!')
# save the data
DF[fold].to_csv(self.path_data + 'data-features_' + self.organ + '_' + view + '_' +
transformation + '_' + self.target + '_' + fold + '_' + outer_fold + '.csv',
index=False)
print('For outer_fold ' + outer_fold + ', the ' + fold + ' fold has a sample size of ' +
str(len(DF[fold].index)))
def generate_folds(self):
self._get_list_ids()
self._filter_and_format_data()
self._split_data()
class PreprocessingSurvival(Basics):
"""
Preprocesses the main dataframe for survival purposes.
Mirrors the PreprocessingMain class, but computes Death time and FollowTime for the future survival analysis
"""
def __init__(self):
Basics.__init__(self)
self.data_raw = None
self.data_features = None
self.data_features_eids = None
self.survival_vars = ['FollowUpTime', 'Death']
def _preprocessing(self):
usecols = ['eid', '40000-0.0', '34-0.0', '52-0.0', '53-0.0', '53-1.0', '53-2.0', '53-3.0']
self.data_raw = pd.read_csv('/n/groups/patel/uk_biobank/project_52887_41230/ukb41230.csv', usecols=usecols)
dict_UKB_fields_to_names = {'40000-0.0': 'FollowUpDate', '34-0.0': 'Year_of_birth', '52-0.0': 'Month_of_birth',
'53-0.0': 'Date_attended_center_0', '53-1.0': 'Date_attended_center_1',
'53-2.0': 'Date_attended_center_2', '53-3.0': 'Date_attended_center_3'}
self.data_raw.rename(columns=dict_UKB_fields_to_names, inplace=True)
self.data_raw['eid'] = self.data_raw['eid'].astype(str)
self.data_raw.set_index('eid', drop=False, inplace=True)
self.data_raw.index.name = 'column_names'
# Format survival data
self.data_raw['Death'] = ~self.data_raw['FollowUpDate'].isna()
self.data_raw['FollowUpDate'][self.data_raw['FollowUpDate'].isna()] = '2020-04-27'
self.data_raw['FollowUpDate'] = self.data_raw['FollowUpDate'].apply(
lambda x: pd.NaT if pd.isna(x) else datetime.strptime(x, '%Y-%m-%d'))
assert ('FollowUpDate.1' not in self.data_raw.columns)
def _add_physicalactivity_instances(self):
data_pa = pd.read_csv(
'/n/groups/patel/Alan/Aging/TimeSeries/series/PhysicalActivity/90001/features/PA_visit_date.csv')
data_pa['eid'] = data_pa['eid'].astype(str)
data_pa.set_index('eid', drop=False, inplace=True)
data_pa.index.name = 'column_names'
self.data_raw = self.data_raw.merge(data_pa, on=['eid'], how='outer')
self.data_raw.set_index('eid', drop=False, inplace=True)
def _compute_age(self):
# Recompute age with greater precision by leveraging the month of birth
self.data_raw.dropna(subset=['Year_of_birth'], inplace=True)
self.data_raw['Year_of_birth'] = self.data_raw['Year_of_birth'].astype(int)
self.data_raw['Month_of_birth'] = self.data_raw['Month_of_birth'].astype(int)
self.data_raw['Date_of_birth'] = self.data_raw.apply(
lambda row: datetime(row.Year_of_birth, row.Month_of_birth, 15), axis=1)
for i in self.instances:
self.data_raw['Date_attended_center_' + i] = self.data_raw['Date_attended_center_' + i].apply(
lambda x: pd.NaT if pd.isna(x) else datetime.strptime(x, '%Y-%m-%d'))
self.data_raw['Age_' + i] = self.data_raw['Date_attended_center_' + i] - self.data_raw['Date_of_birth']
self.data_raw['Age_' + i] = self.data_raw['Age_' + i].dt.days / 365.25
self.data_raw['FollowUpTime_' + i] = self.data_raw['FollowUpDate'] - self.data_raw[
'Date_attended_center_' + i]
self.data_raw['FollowUpTime_' + i] = self.data_raw['FollowUpTime_' + i].dt.days / 365.25
self.data_raw.drop(['Date_attended_center_' + i], axis=1, inplace=True)
self.data_raw.drop(['Year_of_birth', 'Month_of_birth', 'Date_of_birth', 'FollowUpDate'], axis=1, inplace=True)
self.data_raw.dropna(how='all', subset=['Age_0', 'Age_1', 'Age_1.5', 'Age_1.51', 'Age_1.52', 'Age_1.53',
'Age_1.54', 'Age_2', 'Age_3'], inplace=True)
def _concatenate_instances(self):
self.data_features = None
for i in self.instances:
print('Preparing the samples for instance ' + i)
df_i = self.data_raw.dropna(subset=['Age_' + i])
print(str(len(df_i.index)) + ' samples found in instance ' + i)
dict_names = {}
features = ['Age', 'FollowUpTime']
for feature in features:
dict_names[feature + '_' + i] = feature
self.dict_names = dict_names
df_i.rename(columns=dict_names, inplace=True)
df_i['instance'] = i
df_i['id'] = df_i['eid'] + '_' + df_i['instance']
df_i = df_i[['id', 'eid', 'instance'] + self.survival_vars]
if self.data_features is None:
self.data_features = df_i
else:
self.data_features = self.data_features.append(df_i)
print('The size of the full concatenated dataframe is now ' + str(len(self.data_features.index)))
# Add * instance for eids
survival_eids = self.data_features[self.data_features['instance'] == '0']
survival_eids['instance'] = '*'
survival_eids['id'] = survival_eids['eid'] + '_' + survival_eids['instance']
self.data_features = self.data_features.append(survival_eids)
def generate_data(self):
# Formatting
self._preprocessing()
self._add_physicalactivity_instances()
self._compute_age()
self._concatenate_instances()
# save data
self.data_features.to_csv('../data/data_survival.csv', index=False)
class MyImageDataGenerator(Basics, Sequence, ImageDataGenerator):
"""
Helper class: custom data generator for images.
It handles several custom features such as:
- provides batches of not only images, but also the scalar data (e.g demographics) that correspond to it
- it performs random shuffling while making sure that no leftover data (the remainder of the modulo batch size)
is being unused
- it can handle paired data for paired organs (e.g left/right eyes)
"""
def __init__(self, target=None, organ=None, view=None, data_features=None, n_samples_per_subepoch=None,
batch_size=None, training_mode=None, side_predictors=None, dir_images=None, images_width=None,
images_height=None, data_augmentation=False, data_augmentation_factor=None, seed=None):
# Parameters
Basics.__init__(self)
self.target = target
if target in self.targets_regression:
self.labels = data_features[target]
else:
self.labels = data_features[target + '_raw']
self.organ = organ
self.view = view
self.training_mode = training_mode
self.data_features = data_features
self.list_ids = data_features.index.values
self.batch_size = batch_size
# for paired organs, take twice fewer ids (two images for each id), and add organ_side as side predictor
if organ + '_' + view in self.left_right_organs_views:
self.data_features['organ_side'] = np.nan
self.n_ids_batch = batch_size // 2
else:
self.n_ids_batch = batch_size
if self.training_mode & (n_samples_per_subepoch is not None): # during training, 1 epoch = number of samples
self.steps = math.ceil(n_samples_per_subepoch / batch_size)
else: # during prediction and other tasks, an epoch is defined as all the samples being seen once and only once
self.steps = math.ceil(len(self.list_ids) / self.n_ids_batch)
# learning_rate_patience
if n_samples_per_subepoch is not None:
self.n_subepochs_per_epoch = math.ceil(len(self.data_features.index) / n_samples_per_subepoch)
# initiate the indices and shuffle the ids
self.shuffle = training_mode # Only shuffle if the model is being trained. Otherwise no need.
self.indices = np.arange(len(self.list_ids))
self.idx_end = 0 # Keep track of last indice to permute indices accordingly at the end of epoch.
if self.shuffle:
np.random.shuffle(self.indices)
# Input for side NN and CNN
self.side_predictors = side_predictors
self.dir_images = dir_images
self.images_width = images_width
self.images_height = images_height
# Data augmentation
self.data_augmentation = data_augmentation
self.data_augmentation_factor = data_augmentation_factor
self.seed = seed
# Parameters for data augmentation: (rotation range, width shift range, height shift range, zoom range)
self.augmentation_parameters = \
pd.DataFrame(index=['Brain_MRI', 'Eyes_Fundus', 'Eyes_OCT', 'Arterial_Carotids', 'Heart_MRI',
'Abdomen_Liver', 'Abdomen_Pancreas', 'Musculoskeletal_Spine', 'Musculoskeletal_Hips',
'Musculoskeletal_Knees', 'Musculoskeletal_FullBody', 'PhysicalActivity_FullWeek',
'PhysicalActivity_Walking'],
columns=['rotation', 'width_shift', 'height_shift', 'zoom'])
self.augmentation_parameters.loc['Brain_MRI', :] = [10, 0.05, 0.1, 0.0]
self.augmentation_parameters.loc['Eyes_Fundus', :] = [20, 0.02, 0.02, 0]
self.augmentation_parameters.loc['Eyes_OCT', :] = [30, 0.1, 0.2, 0]
self.augmentation_parameters.loc[['Arterial_Carotids'], :] = [0, 0.2, 0.0, 0.0]
self.augmentation_parameters.loc[['Heart_MRI', 'Abdomen_Liver', 'Abdomen_Pancreas',
'Musculoskeletal_Spine'], :] = [10, 0.1, 0.1, 0.0]
self.augmentation_parameters.loc[['Musculoskeletal_Hips', 'Musculoskeletal_Knees'], :] = [10, 0.1, 0.1, 0.1]
self.augmentation_parameters.loc[['Musculoskeletal_FullBody'], :] = [10, 0.05, 0.02, 0.0]
self.augmentation_parameters.loc[['PhysicalActivity_FullWeek'], :] = [0, 0, 0, 0.0]
organ_view = organ + '_' + view
ImageDataGenerator.__init__(self, rescale=1. / 255.,
rotation_range=self.augmentation_parameters.loc[organ_view, 'rotation'],
width_shift_range=self.augmentation_parameters.loc[organ_view, 'width_shift'],
height_shift_range=self.augmentation_parameters.loc[organ_view, 'height_shift'],
zoom_range=self.augmentation_parameters.loc[organ_view, 'zoom'])
def __len__(self):
return self.steps
def on_epoch_end(self):
_ = gc.collect()
self.indices = np.concatenate([self.indices[self.idx_end:], self.indices[:self.idx_end]])
def _generate_image(self, path_image):
img = load_img(path_image, target_size=(self.images_width, self.images_height), color_mode='rgb')
Xi = img_to_array(img)
if hasattr(img, 'close'):
img.close()
if self.data_augmentation:
params = self.get_random_transform(Xi.shape)
Xi = self.apply_transform(Xi, params)
Xi = self.standardize(Xi)
return Xi
def _data_generation(self, list_ids_batch):
# initialize empty matrices
n_samples_batch = min(len(list_ids_batch), self.batch_size)
X = np.empty((n_samples_batch, self.images_width, self.images_height, 3)) * np.nan
x = np.empty((n_samples_batch, len(self.side_predictors))) * np.nan
y = np.empty((n_samples_batch, 1)) * np.nan
# fill the matrices sample by sample
for i, ID in enumerate(list_ids_batch):
y[i] = self.labels[ID]
x[i] = self.data_features.loc[ID, self.side_predictors]
if self.organ + '_' + self.view in self.left_right_organs_views:
if i % 2 == 0:
path = self.dir_images + 'right/'
x[i][-1] = 0
else:
path = self.dir_images + 'left/'
x[i][-1] = 1
if not os.path.exists(path + ID + '.jpg'):
path = path.replace('/right/', '/left/') if i % 2 == 0 else path.replace('/left/', '/right/')
x[i][-1] = 1 - x[i][-1]
else:
path = self.dir_images
X[i, :, :, :] = self._generate_image(path_image=path + ID + '.jpg')
return [X, x], y
def __getitem__(self, index):
# Select the indices
idx_start = (index * self.n_ids_batch) % len(self.list_ids)
idx_end = (((index + 1) * self.n_ids_batch) - 1) % len(self.list_ids) + 1
if idx_start > idx_end:
# If this happens outside of training, that is a mistake
if not self.training_mode:
print('\nERROR: Outside of training, every sample should only be predicted once!')
sys.exit(1)
# Select part of the indices from the end of the epoch
indices = self.indices[idx_start:]
# Generate a new set of indices
# print('\nThe end of the data was reached within this batch, looping.')
if self.shuffle:
np.random.shuffle(self.indices)
# Complete the batch with samples from the new indices
indices = np.concatenate([indices, self.indices[:idx_end]])
else:
indices = self.indices[idx_start: idx_end]
if idx_end == len(self.list_ids) & self.shuffle:
# print('\nThe end of the data was reached. Shuffling for the next epoch.')
np.random.shuffle(self.indices)
# Keep track of last indice for end of subepoch
self.idx_end = idx_end
# Select the corresponding ids
list_ids_batch = [self.list_ids[i] for i in indices]
# For paired organs, two images (left, right eyes) are selected for each id.
if self.organ + '_' + self.view in self.left_right_organs_views:
list_ids_batch = [ID for ID in list_ids_batch for _ in ('right', 'left')]
return self._data_generation(list_ids_batch)
class MyCSVLogger(Callback):
"""
Custom CSV Logger callback class for Keras training: append to existing file if can be found. Allows to keep track
of training over several jobs.
"""
def __init__(self, filename, separator=',', append=False):
self.sep = separator
self.filename = filename
self.append = append
self.writer = None
self.keys = None
self.append_header = True
self.csv_file = None
if six.PY2:
self.file_flags = 'b'
self._open_args = {}
else:
self.file_flags = ''
self._open_args = {'newline': '\n'}
Callback.__init__(self)
def on_train_begin(self, logs=None):
if self.append:
if file_io.file_exists(self.filename):
with open(self.filename, 'r' + self.file_flags) as f:
self.append_header = not bool(len(f.readline()))
mode = 'a'
else:
mode = 'w'
self.csv_file = io.open(self.filename, mode + self.file_flags, **self._open_args)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
def handle_value(k):
is_zero_dim_ndarray = isinstance(k, np.ndarray) and k.ndim == 0
if isinstance(k, six.string_types):
return k
elif isinstance(k, collections_abc.Iterable) and not is_zero_dim_ndarray:
return '"[%s]"' % (', '.join(map(str, k)))
else:
return k
if self.keys is None:
self.keys = sorted(logs.keys())
if self.model.stop_training:
# We set NA so that csv parsers do not fail for this last epoch.
logs = dict([(k, logs[k]) if k in logs else (k, 'NA') for k in self.keys])
if not self.writer:
class CustomDialect(csv.excel):
delimiter = self.sep
fieldnames = ['epoch', 'learning_rate'] + self.keys
if six.PY2:
fieldnames = [unicode(x) for x in fieldnames]
self.writer = csv.DictWriter(
self.csv_file,
fieldnames=fieldnames,
dialect=CustomDialect)
if self.append_header:
self.writer.writeheader()
row_dict = collections.OrderedDict({'epoch': epoch, 'learning_rate': eval(self.model.optimizer.lr)})
row_dict.update((key, handle_value(logs[key])) for key in self.keys)
self.writer.writerow(row_dict)
self.csv_file.flush()
def on_train_end(self, logs=None):
self.csv_file.close()
self.writer = None
class MyModelCheckpoint(ModelCheckpoint):
"""
Custom checkpoint callback class for Keras training. Handles a baseline performance.
"""
def __init__(self, filepath, monitor='val_loss', baseline=-np.Inf, verbose=0, save_best_only=False,
save_weights_only=False, mode='auto', save_freq='epoch'):
# Parameters
ModelCheckpoint.__init__(self, filepath, monitor=monitor, verbose=verbose, save_best_only=save_best_only,
save_weights_only=save_weights_only, mode=mode, save_freq=save_freq)
if mode == 'min':
self.monitor_op = np.less
self.best = baseline
elif mode == 'max':
self.monitor_op = np.greater
self.best = baseline
else:
print('Error. mode for metric must be either min or max')
sys.exit(1)
class DeepLearning(Metrics):
"""
Core helper class to train models. Used to:
- build the data generators
- generate the CNN architectures
- load the weights
"""
def __init__(self, target=None, organ=None, view=None, transformation=None, architecture=None, n_fc_layers=None,
n_fc_nodes=None, optimizer=None, learning_rate=None, weight_decay=None, dropout_rate=None,
data_augmentation_factor=None, debug_mode=False):
# Initialization
Metrics.__init__(self)
tf.random.set_seed(self.seed)
# Model's version
self.target = target
self.organ = organ
self.view = view
self.transformation = transformation
self.architecture = architecture
self.n_fc_layers = int(n_fc_layers)
self.n_fc_nodes = int(n_fc_nodes)
self.optimizer = optimizer
self.learning_rate = float(learning_rate)
self.weight_decay = float(weight_decay)
self.dropout_rate = float(dropout_rate)
self.data_augmentation_factor = float(data_augmentation_factor)
self.outer_fold = None
self.version = target + '_' + organ + '_' + view + '_' + transformation + '_' + architecture + '_' + \
n_fc_layers + '_' + n_fc_nodes + '_' + optimizer + '_' + learning_rate + '_' + weight_decay + \
'_' + dropout_rate + '_' + data_augmentation_factor
# NNet's architecture and weights
self.side_predictors = self.dict_side_predictors[target]
if self.organ + '_' + self.view in self.left_right_organs_views:
self.side_predictors.append('organ_side')
self.dict_final_activations = {'regression': 'linear', 'binary': 'sigmoid', 'multiclass': 'softmax',
'saliency': 'linear'}
self.path_load_weights = None
self.keras_weights = None
# Generators
self.debug_mode = debug_mode
self.debug_fraction = 0.005
self.DATA_FEATURES = {}
self.mode = None
self.n_cpus = len(os.sched_getaffinity(0))
self.dir_images = '../images/' + organ + '/' + view + '/' + transformation + '/'
# define dictionary to fit the architecture's input size to the images sizes (take min (height, width))
self.dict_organ_view_transformation_to_image_size = {
'Eyes_Fundus_Raw': (316, 316), # initial size (1388, 1388)
'Eyes_OCT_Raw': (312, 320), # initial size (500, 512)
'Musculoskeletal_Spine_Sagittal': (466, 211), # initial size (1513, 684)
'Musculoskeletal_Spine_Coronal': (315, 313), # initial size (724, 720)
'Musculoskeletal_Hips_MRI': (329, 303), # initial size (626, 680)
'Musculoskeletal_Knees_MRI': (347, 286) # initial size (851, 700)
}
self.dict_organ_view_transformation_to_image_size.update(
dict.fromkeys(['Brain_MRI_SagittalRaw', 'Brain_MRI_SagittalReference', 'Brain_MRI_CoronalRaw',
'Brain_MRI_CoronalReference', 'Brain_MRI_TransverseRaw', 'Brain_MRI_TransverseReference'],
(316, 316))) # initial size (88, 88)
self.dict_organ_view_transformation_to_image_size.update(
dict.fromkeys(['Arterial_Carotids_Mixed', 'Arterial_Carotids_LongAxis', 'Arterial_Carotids_CIMT120',
'Arterial_Carotids_CIMT150', 'Arterial_Carotids_ShortAxis'],
(337, 291))) # initial size (505, 436)
self.dict_organ_view_transformation_to_image_size.update(
dict.fromkeys(['Heart_MRI_2chambersRaw', 'Heart_MRI_2chambersContrast', 'Heart_MRI_3chambersRaw',
'Heart_MRI_3chambersContrast', 'Heart_MRI_4chambersRaw', 'Heart_MRI_4chambersContrast'],
(316, 316))) # initial size (200, 200)
self.dict_organ_view_transformation_to_image_size.update(
dict.fromkeys(['Abdomen_Liver_Raw', 'Abdomen_Liver_Contrast'], (288, 364))) # initial size (288, 364)
self.dict_organ_view_transformation_to_image_size.update(
dict.fromkeys(['Abdomen_Pancreas_Raw', 'Abdomen_Pancreas_Contrast'], (288, 350))) # initial size (288, 350)
self.dict_organ_view_transformation_to_image_size.update(
dict.fromkeys(['Musculoskeletal_FullBody_Figure', 'Musculoskeletal_FullBody_Skeleton',
'Musculoskeletal_FullBody_Flesh', 'Musculoskeletal_FullBody_Mixed'],
(541, 181))) # initial size (811, 272)
self.dict_organ_view_transformation_to_image_size.update(
dict.fromkeys(['PhysicalActivity_FullWeek_GramianAngularField1minDifference',
'PhysicalActivity_FullWeek_GramianAngularField1minSummation',
'PhysicalActivity_FullWeek_MarkovTransitionField1min',
'PhysicalActivity_FullWeek_RecurrencePlots1min'],
(316, 316))) # initial size (316, 316)
self.dict_architecture_to_image_size = {'MobileNet': (224, 224), 'MobileNetV2': (224, 224),
'NASNetMobile': (224, 224), 'NASNetLarge': (331, 331)}
if self.architecture in ['MobileNet', 'MobileNetV2', 'NASNetMobile', 'NASNetLarge']:
self.image_width, self.image_height = self.dict_architecture_to_image_size[architecture]
else:
self.image_width, self.image_height = \
self.dict_organ_view_transformation_to_image_size[organ + '_' + view + '_' + transformation]
# define dictionary of batch sizes to fit as many samples as the model's architecture allows
self.dict_batch_sizes = {
# Default, applies to all images with resized input ~100,000 pixels
'Default': {'VGG16': 32, 'VGG19': 32, 'DenseNet121': 16, 'DenseNet169': 16, 'DenseNet201': 16,
'Xception': 32, 'InceptionV3': 32, 'InceptionResNetV2': 8, 'ResNet50': 32, 'ResNet101': 16,
'ResNet152': 16, 'ResNet50V2': 32, 'ResNet101V2': 16, 'ResNet152V2': 16, 'ResNeXt50': 4,
'ResNeXt101': 8, 'EfficientNetB7': 4,
'MobileNet': 128, 'MobileNetV2': 64, 'NASNetMobile': 64, 'NASNetLarge': 4}}
# Define batch size
if organ + '_' + view in self.dict_batch_sizes.keys():
randoself.batch_size = self.dict_batch_sizes[organ + '_' + view][architecture]
else:
self.batch_size = self.dict_batch_sizes['Default'][architecture]
# double the batch size for the teslaM40 cores that have bigger memory
if len(GPUtil.getGPUs()) > 0: # make sure GPUs are available (not truesometimes for debugging)
if GPUtil.getGPUs()[0].memoryTotal > 20000:
self.batch_size *= 2
# Define number of ids per batch (twice fewer for paired organs, because left and right samples)
self.n_ids_batch = self.batch_size
if organ + '_' + view in self.left_right_organs_views:
self.n_ids_batch //= 2
# Define number of samples per subepoch
if debug_mode:
self.n_samples_per_subepoch = self.batch_size * 4
else:
self.n_samples_per_subepoch = 32768
if organ + '_' + view in self.left_right_organs_views:
self.n_samples_per_subepoch //= 2
# dict to decide which field is used to generate the ids when several targets share the same ids
self.dict_target_to_ids = dict.fromkeys(['Age', 'Sex'], 'Age')
# Note: R-Squared and F1-Score are not available, because their batch based values are misleading.
# For some reason, Sensitivity and Specificity are not available either. Might implement later.
self.dict_losses_K = {'MSE': MeanSquaredError(name='MSE'),
'Binary-Crossentropy': BinaryCrossentropy(name='Binary-Crossentropy')}
self.dict_metrics_K = {'R-Squared': RSquare(name='R-Squared', y_shape=(1,)),
'RMSE': RootMeanSquaredError(name='RMSE'),
'F1-Score': F1Score(name='F1-Score', num_classes=1, dtype=tf.float32),
'ROC-AUC': AUC(curve='ROC', name='ROC-AUC'),
'PR-AUC': AUC(curve='PR', name='PR-AUC'),
'Binary-Accuracy': BinaryAccuracy(name='Binary-Accuracy'),
'Precision': Precision(name='Precision'),
'Recall': Recall(name='Recall'),
'True-Positives': TruePositives(name='True-Positives'),
'False-Positives': FalsePositives(name='False-Positives'),
'False-Negatives': FalseNegatives(name='False-Negatives'),
'True-Negatives': TrueNegatives(name='True-Negatives')}
# Metrics
self.prediction_type = self.dict_prediction_types[target]
self.loss_name = self.dict_losses_names[self.prediction_type]
self.loss_function = self.dict_losses_K[self.loss_name]
self.main_metric_name = self.dict_main_metrics_names_K[target]
self.main_metric_mode = self.main_metrics_modes[self.main_metric_name]
self.main_metric = self.dict_metrics_K[self.main_metric_name]
self.metrics_names = [self.main_metric_name]
self.metrics = [self.dict_metrics_K[metric_name] for metric_name in self.metrics_names]
# Optimizers
self.optimizers = {'Adam': Adam, 'RMSprop': RMSprop, 'Adadelta': Adadelta}
# Model
self.model = None
@staticmethod
def _append_ext(fn):
return fn + ".jpg"
def _load_data_features(self):
for fold in self.folds:
self.DATA_FEATURES[fold] = pd.read_csv(
self.path_data + 'data-features_' + self.organ + '_' + self.view + '_' + self.transformation + '_' +
self.dict_target_to_ids[self.target] + '_' + fold + '_' + self.outer_fold + '.csv')
for col_name in self.id_vars:
self.DATA_FEATURES[fold][col_name] = self.DATA_FEATURES[fold][col_name].astype(str)
self.DATA_FEATURES[fold].set_index('id', drop=False, inplace=True)
def _take_subset_to_debug(self):
for fold in self.folds:
# use +1 or +2 to test the leftovers pipeline
leftovers_extra = {'train': 0, 'val': 1, 'test': 2}
n_batches = 2
n_limit_fold = leftovers_extra[fold] + self.batch_size * n_batches
self.DATA_FEATURES[fold] = self.DATA_FEATURES[fold].iloc[:n_limit_fold, :]
def _generate_generators(self, DATA_FEATURES):
GENERATORS = {}
for fold in self.folds:
# do not generate a generator if there are no samples (can happen for leftovers generators)
if fold not in DATA_FEATURES.keys():
continue
# parameters
training_mode = True if self.mode == 'model_training' else False
if (fold == 'train') & (self.mode == 'model_training') & \
(self.organ + '_' + self.view not in self.organsviews_not_to_augment):
data_augmentation = True
else:
data_augmentation = False
# define batch size for testing: data is split between a part that fits in batches, and leftovers
if self.mode == 'model_testing':
if self.organ + '_' + self.view in self.left_right_organs_views:
n_samples = len(DATA_FEATURES[fold].index) * 2
else:
n_samples = len(DATA_FEATURES[fold].index)
batch_size_fold = min(self.batch_size, n_samples)
else:
batch_size_fold = self.batch_size
if (fold == 'train') & (self.mode == 'model_training'):
n_samples_per_subepoch = self.n_samples_per_subepoch
else:
n_samples_per_subepoch = None
# generator
GENERATORS[fold] = \
MyImageDataGenerator(target=self.target, organ=self.organ, view=self.view,
data_features=DATA_FEATURES[fold], n_samples_per_subepoch=n_samples_per_subepoch,
batch_size=batch_size_fold, training_mode=training_mode,
side_predictors=self.side_predictors, dir_images=self.dir_images,
images_width=self.image_width, images_height=self.image_height,
data_augmentation=data_augmentation,
data_augmentation_factor=self.data_augmentation_factor, seed=self.seed)
return GENERATORS
def _generate_class_weights(self):
if self.dict_prediction_types[self.target] == 'binary':
self.class_weights = {}
counts = self.DATA_FEATURES['train'][self.target + '_raw'].value_counts()
n_total = counts.sum()
# weighting the samples for each class inversely proportional to their prevalence, with order of magnitude 1
for i in counts.index.values:
self.class_weights[i] = n_total / (counts.loc[i] * len(counts.index))
def _generate_cnn(self):
# define the arguments
# take special initial weights for EfficientNetB7 (better)
if (self.architecture == 'EfficientNetB7') & (self.keras_weights == 'imagenet'):
w = 'noisy-student'
else:
w = self.keras_weights
kwargs = {"include_top": False, "weights": w, "input_shape": (self.image_width, self.image_height, 3)}
if self.architecture in ['ResNet50', 'ResNet101', 'ResNet152', 'ResNet50V2', 'ResNet101V2', 'ResNet152V2',
'ResNeXt50', 'ResNeXt101']:
import tensorflow.keras
kwargs.update(
{"backend": tensorflow.keras.backend, "layers": tensorflow.keras.layers,
"models": tensorflow.keras.models, "utils": tensorflow.keras.utils})
# load the architecture builder
if self.architecture == 'VGG16':
from tensorflow.keras.applications.vgg16 import VGG16 as ModelBuilder
elif self.architecture == 'VGG19':
from tensorflow.keras.applications.vgg19 import VGG19 as ModelBuilder
elif self.architecture == 'DenseNet121':
from tensorflow.keras.applications.densenet import DenseNet121 as ModelBuilder
elif self.architecture == 'DenseNet169':
from tensorflow.keras.applications.densenet import DenseNet169 as ModelBuilder
elif self.architecture == 'DenseNet201':
from tensorflow.keras.applications.densenet import DenseNet201 as ModelBuilder
elif self.architecture == 'Xception':
from tensorflow.keras.applications.xception import Xception as ModelBuilder
elif self.architecture == 'InceptionV3':
from tensorflow.keras.applications.inception_v3 import InceptionV3 as ModelBuilder
elif self.architecture == 'InceptionResNetV2':
from tensorflow.keras.applications.inception_resnet_v2 import InceptionResNetV2 as ModelBuilder
elif self.architecture == 'ResNet50':
from keras_applications.resnet import ResNet50 as ModelBuilder
elif self.architecture == 'ResNet101':
from keras_applications.resnet import ResNet101 as ModelBuilder
elif self.architecture == 'ResNet152':
from keras_applications.resnet import ResNet152 as ModelBuilder
elif self.architecture == 'ResNet50V2':
from keras_applications.resnet_v2 import ResNet50V2 as ModelBuilder
elif self.architecture == 'ResNet101V2':
from keras_applications.resnet_v2 import ResNet101V2 as ModelBuilder
elif self.architecture == 'ResNet152V2':
from keras_applications.resnet_v2 import ResNet152V2 as ModelBuilder
elif self.architecture == 'ResNeXt50':
from keras_applications.resnext import ResNeXt50 as ModelBuilder
elif self.architecture == 'ResNeXt101':
from keras_applications.resnext import ResNeXt101 as ModelBuilder
elif self.architecture == 'EfficientNetB7':
from efficientnet.tfkeras import EfficientNetB7 as ModelBuilder
# The following model have a fixed input size requirement
elif self.architecture == 'NASNetMobile':
from tensorflow.keras.applications.nasnet import NASNetMobile as ModelBuilder
elif self.architecture == 'NASNetLarge':
from tensorflow.keras.applications.nasnet import NASNetLarge as ModelBuilder
elif self.architecture == 'MobileNet':
from tensorflow.keras.applications.mobilenet import MobileNet as ModelBuilder
elif self.architecture == 'MobileNetV2':
from tensorflow.keras.applications.mobilenet_v2 import MobileNetV2 as ModelBuilder
else:
print('Architecture does not exist.')
sys.exit(1)
# build the model's base
cnn = ModelBuilder(**kwargs)
x = cnn.output
# complete the model's base
if self.architecture in ['VGG16', 'VGG19']:
x = Flatten()(x)
x = Dense(4096, activation='relu', kernel_regularizer=regularizers.l2(self.weight_decay))(x)
x = Dropout(self.dropout_rate)(x)
x = Dense(4096, activation='relu', kernel_regularizer=regularizers.l2(self.weight_decay))(x)
x = Dropout(self.dropout_rate)(x)
else:
x = GlobalAveragePooling2D()(x)
if self.architecture == 'EfficientNetB7':
x = Dropout(self.dropout_rate)(x)
cnn_output = x
return cnn.input, cnn_output
def _generate_side_nn(self):
side_nn = Sequential()
side_nn.add(Dense(16, input_dim=len(self.side_predictors), activation="relu",
kernel_regularizer=regularizers.l2(self.weight_decay)))
return side_nn.input, side_nn.output
def _complete_architecture(self, cnn_input, cnn_output, side_nn_input, side_nn_output):
x = concatenate([cnn_output, side_nn_output])
x = Dropout(self.dropout_rate)(x)
for n in [int(self.n_fc_nodes * (2 ** (2 * (self.n_fc_layers - 1 - i)))) for i in range(self.n_fc_layers)]:
x = Dense(n, activation='relu', kernel_regularizer=regularizers.l2(self.weight_decay))(x)
# scale the dropout proportionally to the number of nodes in a layer. No dropout for the last layers
if n > 16:
x = Dropout(self.dropout_rate * n / 1024)(x)
predictions = Dense(1, activation=self.dict_final_activations[self.prediction_type],
kernel_regularizer=regularizers.l2(self.weight_decay))(x)
self.model = Model(inputs=[cnn_input, side_nn_input], outputs=predictions)
def _generate_architecture(self):
cnn_input, cnn_output = self._generate_cnn()
side_nn_input, side_nn_output = self._generate_side_nn()
self._complete_architecture(cnn_input=cnn_input, cnn_output=cnn_output, side_nn_input=side_nn_input,
side_nn_output=side_nn_output)
def _load_model_weights(self):
try:
self.model.load_weights(self.path_load_weights)
except (FileNotFoundError, TypeError):
# load backup weights if the main weights are corrupted
try:
self.model.load_weights(self.path_load_weights.replace('model-weights', 'backup-model-weights'))
except FileNotFoundError:
print('Error. No file was found. imagenet weights should have been used. Bug somewhere.')
sys.exit(1)
@staticmethod
def clean_exit():
# exit
print('\nDone.\n')
print('Killing JOB PID with kill...')
os.system('touch ../eo/' + os.environ['SLURM_JOBID'])
os.system('kill ' + str(os.getpid()))
time.sleep(60)
print('Escalating to kill JOB PID with kill -9...')
os.system('kill -9 ' + str(os.getpid()))
time.sleep(60)
print('Escalating to kill JOB ID')
os.system('scancel ' + os.environ['SLURM_JOBID'])
time.sleep(60)
print('Everything failed to kill the job. Hanging there until hitting walltime...')
class Training(DeepLearning):
"""
Class to train CNN models:
- Generates the architecture
- Loads the best last weights so that a model can be trained over several jobs
- Generates the callbacks
- Compiles the model
- Trains the model
"""
def __init__(self, target=None, organ=None, view=None, transformation=None, architecture=None, n_fc_layers=None,
n_fc_nodes=None, optimizer=None, learning_rate=None, weight_decay=None, dropout_rate=None,
data_augmentation_factor=None, outer_fold=None, debug_mode=False, transfer_learning=None,
continue_training=True, display_full_metrics=True):
# parameters
DeepLearning.__init__(self, target, organ, view, transformation, architecture, n_fc_layers, n_fc_nodes,
optimizer, learning_rate, weight_decay, dropout_rate, data_augmentation_factor,
debug_mode)
self.outer_fold = outer_fold
self.version = self.version + '_' + str(outer_fold)
# NNet's architecture's weights
self.continue_training = continue_training
self.transfer_learning = transfer_learning
self.list_parameters_to_match = ['organ', 'transformation', 'view']
# dict to decide in which order targets should be used when trying to transfer weight from a similar model
self.dict_alternative_targets_for_transfer_learning = {'Age': ['Age', 'Sex'], 'Sex': ['Sex', 'Age']}
# Generators
self.folds = ['train', 'val']
self.mode = 'model_training'
self.class_weights = None
self.GENERATORS = None
# Metrics
self.baseline_performance = None
if display_full_metrics:
self.metrics_names = self.dict_metrics_names_K[self.prediction_type]
# Model
self.path_load_weights = self.path_data + 'model-weights_' + self.version + '.h5'
if debug_mode:
self.path_save_weights = self.path_data + 'model-weights-debug.h5'
else:
self.path_save_weights = self.path_data + 'model-weights_' + self.version + '.h5'
self.n_epochs_max = 100000
self.callbacks = None
# Load and preprocess the data, build the generators
def data_preprocessing(self):
self._load_data_features()
if self.debug_mode:
self._take_subset_to_debug()
self._generate_class_weights()
self.GENERATORS = self._generate_generators(self.DATA_FEATURES)
# Determine which weights to load, if any.
def _weights_for_transfer_learning(self):
print('Looking for models to transfer weights from...')
# define parameters
parameters = self._version_to_parameters(self.version)
# continue training if possible
if self.continue_training and os.path.exists(self.path_load_weights):
print('Loading the weights from the model\'s previous training iteration.')
return
# Initialize the weights using other the weights from other successful hyperparameters combinations
if self.transfer_learning == 'hyperparameters':
# Check if the same model with other hyperparameters have already been trained. Pick the best for transfer.
params = self.version.split('_')
params_tl_idx = \
[i for i in range(len(names_model_parameters))
if any(names_model_parameters[i] == p for p in
['optimizer', 'learning_rate', 'weight_decay', 'dropout_rate', 'data_augmentation_factor'])]
for idx in params_tl_idx:
params[idx] = '*'
versions = '../eo/MI02_' + '_'.join(params) + '.out'
files = glob.glob(versions)
if self.main_metric_mode == 'min':
best_perf = np.Inf
else:
best_perf = -np.Inf
for file in files:
hand = open(file, 'r')
# find best last performance
final_improvement_line = None
baseline_performance_line = None
for line in hand:
line = line.rstrip()
if re.search('Baseline validation ' + self.main_metric_name + ' = ', line):
baseline_performance_line = line
if re.search('val_' + self.main_metric_name + ' improved from', line):
final_improvement_line = line
hand.close()
if final_improvement_line is not None:
perf = float(final_improvement_line.split(' ')[7].replace(',', ''))
elif baseline_performance_line is not None:
perf = float(baseline_performance_line.split(' ')[-1])
else:
continue
# Keep track of the file with the best performance
if self.main_metric_mode == 'min':
update = perf < best_perf
else:
update = perf > best_perf
if update:
best_perf = perf
self.path_load_weights = \
file.replace('../eo/', self.path_data).replace('MI02', 'model-weights').replace('.out', '.h5')
if best_perf not in [-np.Inf, np.Inf]:
print('Transfering the weights from: ' + self.path_load_weights + ', with ' + self.main_metric_name +
' = ' + str(best_perf))
return
# Initialize the weights based on models trained on different datasets, ranked by similarity
if self.transfer_learning == 'datasets':
while True:
# print('Matching models for the following criterias:');
# print(['architecture', 'target'] + list_parameters_to_match)
# start by looking for models trained on the same target, then move to other targets
for target_to_load in self.dict_alternative_targets_for_transfer_learning[parameters['target']]:
# print('Target used: ' + target_to_load)
parameters_to_match = parameters.copy()
parameters_to_match['target'] = target_to_load
# load the ranked performances table to select the best performing model among the similar
# models available
path_performances_to_load = self.path_data + 'PERFORMANCES_ranked_' + \
parameters_to_match['target'] + '_' + 'val' + '.csv'
try:
Performances = pd.read_csv(path_performances_to_load)
Performances['organ'] = Performances['organ'].astype(str)
except FileNotFoundError:
# print("Could not load the file: " + path_performances_to_load)
break
# iteratively get rid of models that are not similar enough, based on the list
for parameter in ['architecture', 'target'] + self.list_parameters_to_match:
Performances = Performances[Performances[parameter] == parameters_to_match[parameter]]
# if at least one model is similar enough, load weights from the best of them
if len(Performances.index) != 0:
self.path_load_weights = self.path_data + 'model-weights_' + Performances['version'][0] + '.h5'
self.keras_weights = None
print('transfering the weights from: ' + self.path_load_weights)
return
# if no similar model was found, try again after getting rid of the last selection criteria
if len(self.list_parameters_to_match) == 0:
print('No model found for transfer learning.')
break
self.list_parameters_to_match.pop()
# Otherwise use imagenet weights to initialize
print('Using imagenet weights.')
# using string instead of None for path to not ge
self.path_load_weights = None
self.keras_weights = 'imagenet'
def _compile_model(self):
# if learning rate was reduced with success according to logger, start with this reduced learning rate
if self.path_load_weights is not None:
path_logger = self.path_load_weights.replace('model-weights', 'logger').replace('.h5', '.csv')
else:
path_logger = self.path_data + 'logger_' + self.version + '.csv'
if os.path.exists(path_logger):
try:
logger = pd.read_csv(path_logger)
best_log = \
logger[logger['val_' + self.main_metric_name] == logger['val_' + self.main_metric_name].max()]
lr = best_log['learning_rate'].values[0]
except pd.errors.EmptyDataError:
os.remove(path_logger)
lr = self.learning_rate
else:
lr = self.learning_rate
self.model.compile(optimizer=self.optimizers[self.optimizer](lr=lr, clipnorm=1.0), loss=self.loss_function,
metrics=self.metrics)
def _compute_baseline_performance(self):
# calculate initial val_loss value
if self.continue_training:
idx_metric_name = ([self.loss_name] + self.metrics_names).index(self.main_metric_name)
baseline_perfs = self.model.evaluate(self.GENERATORS['val'], steps=self.GENERATORS['val'].steps)
self.baseline_performance = baseline_perfs[idx_metric_name]
elif self.main_metric_mode == 'min':
self.baseline_performance = np.Inf
else:
self.baseline_performance = -np.Inf
print('Baseline validation ' + self.main_metric_name + ' = ' + str(self.baseline_performance))
def _define_callbacks(self):
if self.debug_mode:
path_logger = self.path_data + 'logger-debug.csv'
append = False
else:
path_logger = self.path_data + 'logger_' + self.version + '.csv'
append = self.continue_training
csv_logger = MyCSVLogger(path_logger, separator=',', append=append)
model_checkpoint_backup = MyModelCheckpoint(self.path_save_weights.replace('model-weights',
'backup-model-weights'),
monitor='val_' + self.main_metric.name,
baseline=self.baseline_performance, verbose=1, save_best_only=True,
save_weights_only=True, mode=self.main_metric_mode,
save_freq='epoch')
model_checkpoint = MyModelCheckpoint(self.path_save_weights,
monitor='val_' + self.main_metric.name, baseline=self.baseline_performance,
verbose=1, save_best_only=True, save_weights_only=True,
mode=self.main_metric_mode, save_freq='epoch')
patience_reduce_lr = min(7, 3 * self.GENERATORS['train'].n_subepochs_per_epoch)
reduce_lr_on_plateau = ReduceLROnPlateau(monitor='loss', factor=0.5, patience=patience_reduce_lr, verbose=1,
mode='min', min_delta=0, cooldown=0, min_lr=0)
early_stopping = EarlyStopping(monitor='val_' + self.main_metric.name, min_delta=0, patience=15, verbose=0,
mode=self.main_metric_mode, baseline=self.baseline_performance,
restore_best_weights=True)
self.callbacks = [csv_logger, model_checkpoint_backup, model_checkpoint, early_stopping, reduce_lr_on_plateau]
def build_model(self):
self._weights_for_transfer_learning()
self._generate_architecture()
# Load weights if possible
try:
load_weights = True if os.path.exists(self.path_load_weights) else False
except TypeError:
load_weights = False
if load_weights:
self._load_model_weights()
else:
# save transferred weights as default, in case no better weights are found
self.model.save_weights(self.path_save_weights.replace('model-weights', 'backup-model-weights'))
self.model.save_weights(self.path_save_weights)
self._compile_model()
self._compute_baseline_performance()
self._define_callbacks()
def train_model(self):
# garbage collector
_ = gc.collect()
# use more verbose when debugging
verbose = 1 if self.debug_mode else 2
# train the model
self.model.fit(self.GENERATORS['train'], steps_per_epoch=self.GENERATORS['train'].steps,
validation_data=self.GENERATORS['val'], validation_steps=self.GENERATORS['val'].steps,
shuffle=False, use_multiprocessing=False, workers=self.n_cpus, epochs=self.n_epochs_max,
class_weight=self.class_weights, callbacks=self.callbacks, verbose=verbose)
class PredictionsGenerate(DeepLearning):
"""
Generates the predictions for each model.
Unscales the predictions.
"""
def __init__(self, target=None, organ=None, view=None, transformation=None, architecture=None, n_fc_layers=None,
n_fc_nodes=None, optimizer=None, learning_rate=None, weight_decay=None, dropout_rate=None,
data_augmentation_factor=None, outer_fold=None, debug_mode=False):
# Initialize parameters
DeepLearning.__init__(self, target, organ, view, transformation, architecture, n_fc_layers, n_fc_nodes,
optimizer, learning_rate, weight_decay, dropout_rate, data_augmentation_factor,
debug_mode)
self.outer_fold = outer_fold
self.mode = 'model_testing'
# Define dictionaries attributes for data, generators and predictions
self.DATA_FEATURES_BATCH = {}
self.DATA_FEATURES_LEFTOVERS = {}
self.GENERATORS_BATCH = None
self.GENERATORS_LEFTOVERS = None
self.PREDICTIONS = {}
def _split_batch_leftovers(self):
# split the samples into two groups: what can fit into the batch size, and the leftovers.
for fold in self.folds:
n_leftovers = len(self.DATA_FEATURES[fold].index) % self.n_ids_batch
if n_leftovers > 0:
self.DATA_FEATURES_BATCH[fold] = self.DATA_FEATURES[fold].iloc[:-n_leftovers]
self.DATA_FEATURES_LEFTOVERS[fold] = self.DATA_FEATURES[fold].tail(n_leftovers)
else:
self.DATA_FEATURES_BATCH[fold] = self.DATA_FEATURES[fold] # special case for syntax if no leftovers
if fold in self.DATA_FEATURES_LEFTOVERS.keys():
del self.DATA_FEATURES_LEFTOVERS[fold]
def _generate_outerfold_predictions(self):
# prepare unscaling
if self.target in self.targets_regression:
mean_train = self.DATA_FEATURES['train'][self.target + '_raw'].mean()
std_train = self.DATA_FEATURES['train'][self.target + '_raw'].std()
else:
mean_train, std_train = None, None
# Generate predictions
for fold in self.folds:
print('Predicting samples from fold ' + fold + '.')
print(str(len(self.DATA_FEATURES[fold].index)) + ' samples to predict.')
print('Predicting batches: ' + str(len(self.DATA_FEATURES_BATCH[fold].index)) + ' samples.')
pred_batch = self.model.predict(self.GENERATORS_BATCH[fold], steps=self.GENERATORS_BATCH[fold].steps,
verbose=1)
if fold in self.GENERATORS_LEFTOVERS.keys():
print('Predicting leftovers: ' + str(len(self.DATA_FEATURES_LEFTOVERS[fold].index)) + ' samples.')
pred_leftovers = self.model.predict(self.GENERATORS_LEFTOVERS[fold],
steps=self.GENERATORS_LEFTOVERS[fold].steps, verbose=1)
pred_full = np.concatenate((pred_batch, pred_leftovers)).squeeze()
else:
pred_full = pred_batch.squeeze()
print('Predicted a total of ' + str(len(pred_full)) + ' samples.')
# take the average between left and right predictions for paired organs
if self.organ + '_' + self.view in self.left_right_organs_views:
pred_full = np.mean(pred_full.reshape(-1, 2), axis=1)
# unscale predictions
if self.target in self.targets_regression:
pred_full = pred_full * std_train + mean_train
# format the dataframe
self.DATA_FEATURES[fold]['pred'] = pred_full
self.PREDICTIONS[fold] = self.DATA_FEATURES[fold]
self.PREDICTIONS[fold]['id'] = [ID.replace('.jpg', '') for ID in self.PREDICTIONS[fold]['id']]
def _generate_predictions(self):
self.path_load_weights = self.path_data + 'model-weights_' + self.version + '_' + self.outer_fold + '.h5'
self._load_data_features()
if self.debug_mode:
self._take_subset_to_debug()
self._load_model_weights()
self._split_batch_leftovers()
# generate the generators
self.GENERATORS_BATCH = self._generate_generators(DATA_FEATURES=self.DATA_FEATURES_BATCH)
if self.DATA_FEATURES_LEFTOVERS is not None:
self.GENERATORS_LEFTOVERS = self._generate_generators(DATA_FEATURES=self.DATA_FEATURES_LEFTOVERS)
self._generate_outerfold_predictions()
def _format_predictions(self):
for fold in self.folds:
perf_fun = self.dict_metrics_sklearn[self.dict_main_metrics_names[self.target]]
perf = perf_fun(self.PREDICTIONS[fold][self.target + '_raw'], self.PREDICTIONS[fold]['pred'])
print('The ' + fold + ' performance is: ' + str(perf))
# format the predictions
self.PREDICTIONS[fold].index.name = 'column_names'
self.PREDICTIONS[fold] = self.PREDICTIONS[fold][['id', 'outer_fold', 'pred']]
def generate_predictions(self):
self._generate_architecture()
self._generate_predictions()
self._format_predictions()
def save_predictions(self):
for fold in self.folds:
self.PREDICTIONS[fold].to_csv(self.path_data + 'Predictions_instances_' + self.version + '_' + fold + '_'
+ self.outer_fold + '.csv', index=False)
class PredictionsConcatenate(Basics):
"""
Concatenates the predictions coming from the different cross validation folds.
"""
def __init__(self, target=None, organ=None, view=None, transformation=None, architecture=None, n_fc_layers=None,
n_fc_nodes=None, optimizer=None, learning_rate=None, weight_decay=None, dropout_rate=None,
data_augmentation_factor=None):
# Initialize parameters
Basics.__init__(self)
self.version = target + '_' + organ + '_' + view + '_' + transformation + '_' + architecture + '_' + \
n_fc_layers + '_' + n_fc_nodes + '_' + optimizer + '_' + learning_rate + '_' + weight_decay + \
'_' + dropout_rate + '_' + data_augmentation_factor
# Define dictionaries attributes for data, generators and predictions
self.PREDICTIONS = {}
def concatenate_predictions(self):
for fold in self.folds:
for outer_fold in self.outer_folds:
Predictions_fold = pd.read_csv(self.path_data + 'Predictions_instances_' + self.version + '_' + fold +
'_' + outer_fold + '.csv')
if fold in self.PREDICTIONS.keys():
self.PREDICTIONS[fold] = pd.concat([self.PREDICTIONS[fold], Predictions_fold])
else:
self.PREDICTIONS[fold] = Predictions_fold
def save_predictions(self):
for fold in self.folds:
self.PREDICTIONS[fold].to_csv(self.path_data + 'Predictions_instances_' + self.version + '_' + fold +
'.csv', index=False)
class PredictionsMerge(Basics):
"""
Merges the predictions from all models into a unified dataframe.
"""
def __init__(self, target=None, fold=None):
Basics.__init__(self)
# Define dictionaries attributes for data, generators and predictions
self.target = target
self.fold = fold
self.data_features = None
self.list_models = None
self.Predictions_df_previous = None
self.Predictions_df = None
def _load_data_features(self):
self.data_features = pd.read_csv(self.path_data + 'data-features_instances.csv',
usecols=self.id_vars + self.demographic_vars)
for var in self.id_vars:
self.data_features[var] = self.data_features[var].astype(str)
self.data_features.set_index('id', drop=False, inplace=True)
self.data_features.index.name = 'column_names'
def _preprocess_data_features(self):
# For the training set, each sample is predicted n_CV_outer_folds times, so prepare a larger dataframe
if self.fold == 'train':
df_all_folds = None
for outer_fold in self.outer_folds:
df_fold = self.data_features.copy()
df_all_folds = df_fold if outer_fold == self.outer_folds[0] else df_all_folds.append(df_fold)
self.data_features = df_all_folds
def _load_previous_merged_predictions(self):
if os.path.exists(self.path_data + 'PREDICTIONS_withoutEnsembles_instances_' + self.target + '_' + self.fold +
'.csv'):
self.Predictions_df_previous = pd.read_csv(self.path_data + 'PREDICTIONS_withoutEnsembles_instances_' +
self.target + '_' + self.fold + '.csv')
self.Predictions_df_previous.drop(columns=['eid', 'instance'] + self.demographic_vars, inplace=True)
def _list_models(self):
# generate list of predictions that will be integrated in the Predictions dataframe
self.list_models = glob.glob(self.path_data + 'Predictions_instances_' + self.target + '_*_' + self.fold +
'.csv')
# get rid of ensemble models and models already merged
self.list_models = [model for model in self.list_models if ('*' not in model)]
if self.Predictions_df_previous is not None:
self.list_models = \
[model for model in self.list_models
if ('pred_' + '_'.join(model.split('_')[2:-1]) not in self.Predictions_df_previous.columns)]
self.list_models.sort()
def preprocessing(self):
self._load_data_features()
self._preprocess_data_features()
self._load_previous_merged_predictions()
self._list_models()
def merge_predictions(self):
# merge the predictions
print('There are ' + str(len(self.list_models)) + ' models to merge.')
i = 0
# define subgroups to accelerate merging process
list_subgroups = list(set(['_'.join(model.split('_')[3:7]) for model in self.list_models]))
for subgroup in list_subgroups:
print('Merging models from the subgroup ' + subgroup)
models_subgroup = [model for model in self.list_models if subgroup in model]
Predictions_subgroup = None
# merge the models one by one
for file_name in models_subgroup:
i += 1
version = '_'.join(file_name.split('_')[2:-1])
if self.Predictions_df_previous is not None and \
'pred_' + version in self.Predictions_df_previous.columns:
print('The model ' + version + ' has already been merged before.')
else:
print('Merging the ' + str(i) + 'th model: ' + version)
# load csv and format the predictions
prediction = pd.read_csv(self.path_data + file_name)
print('raw prediction\'s shape: ' + str(prediction.shape))
for var in ['id', 'outer_fold']:
prediction[var] = prediction[var].apply(str)
prediction.rename(columns={'pred': 'pred_' + version}, inplace=True)
# merge data frames
if Predictions_subgroup is None:
Predictions_subgroup = prediction
elif self.fold == 'train':
Predictions_subgroup = Predictions_subgroup.merge(prediction, how='outer',
on=['id', 'outer_fold'])
else:
prediction.drop(['outer_fold'], axis=1, inplace=True)
# not supported for panda version > 0.23.4 for now
Predictions_subgroup = Predictions_subgroup.merge(prediction, how='outer', on=['id'])
# merge group predictions data frames
if self.fold != 'train':
Predictions_subgroup.drop(['outer_fold'], axis=1, inplace=True)
if Predictions_subgroup is not None:
if self.Predictions_df is None:
self.Predictions_df = Predictions_subgroup
elif self.fold == 'train':
self.Predictions_df = self.Predictions_df.merge(Predictions_subgroup, how='outer',
on=['id', 'outer_fold'])
else:
# not supported for panda version > 0.23.4 for now
self.Predictions_df = self.Predictions_df.merge(Predictions_subgroup, how='outer', on=['id'])
print('Predictions_df\'s shape: ' + str(self.Predictions_df.shape))
# garbage collector
gc.collect()
# Merge with the previously merged predictions
if (self.Predictions_df_previous is not None) & (self.Predictions_df is not None):
if self.fold == 'train':
self.Predictions_df = self.Predictions_df_previous.merge(self.Predictions_df, how='outer',
on=['id', 'outer_fold'])
else:
self.Predictions_df.drop(columns=['outer_fold'], inplace=True)
# not supported for panda version > 0.23.4 for now
self.Predictions_df = self.Predictions_df_previous.merge(self.Predictions_df, how='outer', on=['id'])
self.Predictions_df_previous = None
elif self.Predictions_df is None:
print('No new models to merge. Exiting.')
print('Done.')
sys.exit(0)
# Reorder the columns alphabetically
pred_versions = [col for col in self.Predictions_df.columns if 'pred_' in col]
pred_versions.sort()
id_cols = ['id', 'outer_fold'] if self.fold == 'train' else ['id']
self.Predictions_df = self.Predictions_df[id_cols + pred_versions]
def postprocessing(self):
# get rid of useless rows in data_features before merging to keep the memory requirements as low as possible
self.data_features = self.data_features[self.data_features['id'].isin(self.Predictions_df['id'].values)]
# merge data_features and predictions
if self.fold == 'train':
print('Starting to merge a massive dataframe')
self.Predictions_df = self.data_features.merge(self.Predictions_df, how='outer', on=['id', 'outer_fold'])
else:
# not supported for panda version > 0.23.4 for now
self.Predictions_df = self.data_features.merge(self.Predictions_df, how='outer', on=['id'])
print('Merging done')
# remove rows for which no prediction is available (should be none)
subset_cols = [col for col in self.Predictions_df.columns if 'pred_' in col]
self.Predictions_df.dropna(subset=subset_cols, how='all', inplace=True)
# Displaying the R2s
versions = [col.replace('pred_', '') for col in self.Predictions_df.columns if 'pred_' in col]
r2s = []
for version in versions:
df = self.Predictions_df[[self.target, 'pred_' + version]].dropna()
r2s.append(r2_score(df[self.target], df['pred_' + version]))
R2S = pd.DataFrame({'version': versions, 'R2': r2s})
R2S.sort_values(by='R2', ascending=False, inplace=True)
print('R2 for each model: ')
print(R2S)
def save_merged_predictions(self):
print('Writing the merged predictions...')
self.Predictions_df.to_csv(self.path_data + 'PREDICTIONS_withoutEnsembles_instances_' + self.target + '_' +
self.fold + '.csv', index=False)
class PredictionsEids(Basics):
"""
Computes the average age prediction across samples from different instances for every participant.
(Scaled back to instance 0)
"""
def __init__(self, target=None, fold=None, debug_mode=None):
Basics.__init__(self)
# Define dictionaries attributes for data, generators and predictions
self.target = target
self.fold = fold
self.debug_mode = debug_mode
self.Predictions = None
self.Predictions_chunk = None
self.pred_versions = None
self.res_versions = None
self.target_0s = None
self.Predictions_eids = None
self.Predictions_eids_previous = None
self.pred_versions_previous = None
def preprocessing(self):
# Load predictions
self.Predictions = pd.read_csv(
self.path_data + 'PREDICTIONS_withoutEnsembles_instances_' + self.target + '_' + self.fold + '.csv')
self.Predictions.drop(columns=['id'], inplace=True)
self.Predictions['eid'] = self.Predictions['eid'].astype(str)
self.Predictions.index.name = 'column_names'
self.pred_versions = [col for col in self.Predictions.columns.values if 'pred_' in col]
# Prepare target values on instance 0 as a reference
target_0s = pd.read_csv(self.path_data + 'data-features_eids.csv', usecols=['eid', self.target])
target_0s['eid'] = target_0s['eid'].astype(str)
target_0s.set_index('eid', inplace=True)
target_0s = target_0s[self.target]
target_0s.name = 'target_0'
target_0s = target_0s[self.Predictions['eid'].unique()]
self.Predictions = self.Predictions.merge(target_0s, on='eid')
# Compute biological ages reported to target_0
for pred in self.pred_versions:
# Compute the biais of the predictions as a function of age
print('Generating residuals for model ' + pred.replace('pred_', ''))
df_model = self.Predictions[['Age', pred]]
df_model.dropna(inplace=True)
if (len(df_model.index)) > 0:
age = df_model.loc[:, ['Age']]
res = df_model['Age'] - df_model[pred]
regr = LinearRegression()
regr.fit(age, res)
self.Predictions[pred.replace('pred_', 'correction_')] = regr.predict(self.Predictions[['Age']])
# Take the residuals bias into account when "translating" the prediction to instance 0
correction = self.Predictions['target_0'] - self.Predictions[self.target] + \
regr.predict(self.Predictions[['Age']]) - regr.predict(self.Predictions[['target_0']])
self.Predictions[pred] = self.Predictions[pred] + correction
self.Predictions[self.target] = self.Predictions['target_0']
self.Predictions.drop(columns=['target_0'], inplace=True)
self.Predictions.index.name = 'column_names'
def processing(self):
if self.fold == 'train':
# Prepare template to which each model will be appended
Predictions = self.Predictions[['eid'] + self.demographic_vars]
Predictions = Predictions.groupby('eid', as_index=True).mean()
Predictions.index.name = 'column_names'
Predictions['eid'] = Predictions.index.values
Predictions['instance'] = '*'
Predictions['id'] = Predictions['eid'] + '_*'
self.Predictions_eids = Predictions.copy()
self.Predictions_eids['outer_fold'] = -1
for i in range(self.n_CV_outer_folds):
Predictions_i = Predictions.copy()
Predictions_i['outer_fold'] = i
self.Predictions_eids = self.Predictions_eids.append(Predictions_i)
# Append each model one by one because the folds are different
print(str(len(self.pred_versions)) + ' models to compute.')
for pred_version in self.pred_versions:
if pred_version in self.pred_versions_previous:
print(pred_version.replace('pred_', '') + ' had already been computed.')
else:
print("Computing results for version " + pred_version.replace('pred_', ''))
Predictions_version = self.Predictions[['eid', pred_version, 'outer_fold']]
# Use placeholder for NaN in outer_folds
Predictions_version['outer_fold'][Predictions_version['outer_fold'].isna()] = -1
Predictions_version_eids = Predictions_version.groupby(['eid', 'outer_fold'], as_index=False).mean()
self.Predictions_eids = self.Predictions_eids.merge(Predictions_version_eids,
on=['eid', 'outer_fold'], how='outer')
self.Predictions_eids[of_version] = self.Predictions_eids['outer_fold']
self.Predictions_eids[of_version][self.Predictions_eids[of_version] == -1] = np.nan
del Predictions_version
_ = gc.collect
self.Predictions_eids.drop(columns=['outer_fold'], inplace=True)
else:
self.Predictions_eids = self.Predictions.groupby('eid').mean()
self.Predictions_eids['eid'] = self.Predictions_eids.index.values
self.Predictions_eids['instance'] = '*'
self.Predictions_eids['id'] = self.Predictions_eids['eid'].astype(str) + '_' + \
self.Predictions_eids['instance']
# Re-order the columns
self.Predictions_eids = self.Predictions_eids[self.id_vars + self.demographic_vars + self.pred_versions]
def postprocessing(self):
# Displaying the R2s
versions = [col.replace('pred_', '') for col in self.Predictions_eids.columns if 'pred_' in col]
r2s = []
for version in versions:
df = self.Predictions_eids[[self.target, 'pred_' + version]].dropna()
r2s.append(r2_score(df[self.target], df['pred_' + version]))
R2S = pd.DataFrame({'version': versions, 'R2': r2s})
R2S.sort_values(by='R2', ascending=False, inplace=True)
print('R2 for each model: ')
print(R2S)
def _generate_single_model_predictions(self):
for pred_version in self.pred_versions:
path_save = \
self.path_data + 'Predictions_eids_' + '_'.join(pred_version.split('_')[1:]) + '_' + self.fold + '.csv'
# Generate only if does not exist already.
if not os.path.exists(path_save):
Predictions_version = self.Predictions_eids[['id', 'outer_fold', pred_version]]
Predictions_version.rename(columns={pred_version: 'pred'}, inplace=True)
Predictions_version.dropna(subset=['pred'], inplace=True)
Predictions_version.to_csv(path_save, index=False)
def save_predictions(self):
self.Predictions_eids.to_csv(self.path_data + 'PREDICTIONS_withoutEnsembles_eids_' + self.target + '_' +
self.fold + '.csv', index=False)
# Generate and save files for every single model
self._generate_single_model_predictions()
class PerformancesGenerate(Metrics):
"""
Computes the performances for each model.
"""
def __init__(self, target=None, organ=None, view=None, transformation=None, architecture=None, n_fc_layers=None,
n_fc_nodes=None, optimizer=None, learning_rate=None, weight_decay=None, dropout_rate=None,
data_augmentation_factor=None, fold=None, pred_type=None, debug_mode=False):
Metrics.__init__(self)
self.target = target
self.organ = organ
self.view = view
self.transformation = transformation
self.architecture = architecture
self.n_fc_layers = n_fc_layers
self.n_fc_nodes = n_fc_nodes
self.optimizer = optimizer
self.learning_rate = learning_rate
self.weight_decay = weight_decay
self.dropout_rate = dropout_rate
self.data_augmentation_factor = data_augmentation_factor
self.fold = fold
self.pred_type = pred_type
if debug_mode:
self.n_bootstrap_iterations = 3
else:
self.n_bootstrap_iterations = 1000
self.version = target + '_' + organ + '_' + view + '_' + transformation + '_' + architecture + '_' + \
n_fc_layers + '_' + n_fc_nodes + '_' + optimizer + '_' + learning_rate + '_' + weight_decay + \
'_' + dropout_rate + '_' + data_augmentation_factor
self.names_metrics = self.dict_metrics_names[self.dict_prediction_types[target]]
self.data_features = None
self.Predictions = None
self.PERFORMANCES = None
def _preprocess_data_features_predictions_for_performances(self):
# load dataset
data_features = pd.read_csv(self.path_data + 'data-features_' + self.pred_type + '.csv',
usecols=['id', 'Sex', 'Age'])
# format data_features to extract y
data_features.rename(columns={self.target: 'y'}, inplace=True)
data_features = data_features[['id', 'y']]
data_features['id'] = data_features['id'].astype(str)
data_features['id'] = data_features['id']
data_features.set_index('id', drop=False, inplace=True)
data_features.index.name = 'columns_names'
self.data_features = data_features
def _preprocess_predictions_for_performances(self):
Predictions = pd.read_csv(self.path_data + 'Predictions_' + self.pred_type + '_' + self.version + '_' +
self.fold + '.csv')
Predictions['id'] = Predictions['id'].astype(str)
self.Predictions = Predictions.merge(self.data_features, how='inner', on=['id'])
# Initialize performances dataframes and compute sample sizes
def _initiate_empty_performances_df(self):
# Define an empty performances dataframe to store the performances computed
row_names = ['all'] + self.outer_folds
col_names_sample_sizes = ['N']
if self.target in self.targets_binary:
col_names_sample_sizes.extend(['N_0', 'N_1'])
col_names = ['outer_fold'] + col_names_sample_sizes
col_names.extend(self.names_metrics)
performances = np.empty((len(row_names), len(col_names),))
performances.fill(np.nan)
performances = pd.DataFrame(performances)
performances.index = row_names
performances.columns = col_names
performances['outer_fold'] = row_names
# Convert float to int for sample sizes and some metrics.
for col_name in col_names_sample_sizes:
# need recent version of pandas to use type below. Otherwise nan cannot be int
performances[col_name] = performances[col_name].astype('Int64')
# compute sample sizes for the data frame
performances.loc['all', 'N'] = len(self.Predictions.index)
if self.target in self.targets_binary:
performances.loc['all', 'N_0'] = len(self.Predictions.loc[self.Predictions['y'] == 0].index)
performances.loc['all', 'N_1'] = len(self.Predictions.loc[self.Predictions['y'] == 1].index)
for outer_fold in self.outer_folds:
performances.loc[outer_fold, 'N'] = len(
self.Predictions.loc[self.Predictions['outer_fold'] == int(outer_fold)].index)
if self.target in self.targets_binary:
performances.loc[outer_fold, 'N_0'] = len(
self.Predictions.loc[
(self.Predictions['outer_fold'] == int(outer_fold)) & (self.Predictions['y'] == 0)].index)
performances.loc[outer_fold, 'N_1'] = len(
self.Predictions.loc[
(self.Predictions['outer_fold'] == int(outer_fold)) & (self.Predictions['y'] == 1)].index)
# initialize the dataframes
self.PERFORMANCES = {}
for mode in self.modes:
self.PERFORMANCES[mode] = performances.copy()
# Convert float to int for sample sizes and some metrics.
for col_name in self.PERFORMANCES[''].columns.values:
if any(metric in col_name for metric in self.metrics_displayed_in_int):
# need recent version of pandas to use type below. Otherwise nan cannot be int
self.PERFORMANCES[''][col_name] = self.PERFORMANCES[''][col_name].astype('Int64')
def preprocessing(self):
self._preprocess_data_features_predictions_for_performances()
self._preprocess_predictions_for_performances()
self._initiate_empty_performances_df()
# Fill the columns for this model, outer_fold by outer_fold
def compute_performances(self):
# fill it outer_fold by outer_fold
for outer_fold in ['all'] + self.outer_folds:
print('Calculating the performances for the outer fold ' + outer_fold)
# Generate a subdataframe from the predictions table for each outerfold
if outer_fold == 'all':
predictions_fold = self.Predictions.copy()
else:
predictions_fold = self.Predictions.loc[self.Predictions['outer_fold'] == int(outer_fold), :]
# if no samples are available for this fold, fill columns with nans
if len(predictions_fold.index) == 0:
print('NO SAMPLES AVAILABLE FOR MODEL ' + self.version + ' IN OUTER_FOLD ' + outer_fold)
else:
# For binary classification, generate class prediction
if self.target in self.targets_binary:
predictions_fold_class = predictions_fold.copy()
predictions_fold_class['pred'] = predictions_fold_class['pred'].round()
else:
predictions_fold_class = None
# Fill the Performances dataframe metric by metric
for name_metric in self.names_metrics:
# print('Calculating the performance using the metric ' + name_metric)
if name_metric in self.metrics_needing_classpred:
predictions_metric = predictions_fold_class
else:
predictions_metric = predictions_fold
metric_function = self.dict_metrics_sklearn[name_metric]
self.PERFORMANCES[''].loc[outer_fold, name_metric] = metric_function(predictions_metric['y'],
predictions_metric['pred'])
self.PERFORMANCES['_sd'].loc[outer_fold, name_metric] = \
self._bootstrap(predictions_metric, metric_function)[1]
self.PERFORMANCES['_str'].loc[outer_fold, name_metric] = "{:.3f}".format(
self.PERFORMANCES[''].loc[outer_fold, name_metric]) + '+-' + "{:.3f}".format(
self.PERFORMANCES['_sd'].loc[outer_fold, name_metric])
# calculate the fold sd (variance between the metrics values obtained on the different folds)
folds_sd = self.PERFORMANCES[''].iloc[1:, :].std(axis=0)
for name_metric in self.names_metrics:
self.PERFORMANCES['_str'].loc['all', name_metric] = "{:.3f}".format(
self.PERFORMANCES[''].loc['all', name_metric]) + '+-' + "{:.3f}".format(
folds_sd[name_metric]) + '+-' + "{:.3f}".format(self.PERFORMANCES['_sd'].loc['all', name_metric])
# print the performances
print('Performances for model ' + self.version + ': ')
print(self.PERFORMANCES['_str'])
def save_performances(self):
for mode in self.modes:
path_save = self.path_data + 'Performances_' + self.pred_type + '_' + self.version + '_' + self.fold + \
mode + '.csv'
self.PERFORMANCES[mode].to_csv(path_save, index=False)
class PerformancesMerge(Metrics):
"""
Merges the performances of the different models into a unified dataframe.
"""
def __init__(self, target=None, fold=None, pred_type=None, ensemble_models=None):
# Parameters
Metrics.__init__(self)
self.target = target
self.fold = fold
self.pred_type = pred_type
self.ensemble_models = self.convert_string_to_boolean(ensemble_models)
self.names_metrics = self.dict_metrics_names[self.dict_prediction_types[target]]
self.main_metric_name = self.dict_main_metrics_names[target]
# list the models that need to be merged
self.list_models = glob.glob(self.path_data + 'Performances_' + pred_type + '_' + target + '_*_' + fold +
'_str.csv')
# get rid of ensemble models
if self.ensemble_models:
self.list_models = [model for model in self.list_models if '*' in model]
else:
self.list_models = [model for model in self.list_models if '*' not in model]
self.Performances = None
self.Performances_alphabetical = None
self.Performances_ranked = None
def _initiate_empty_performances_summary_df(self):
# Define the columns of the Performances dataframe
# columns for sample sizes
names_sample_sizes = ['N']
if self.target in self.targets_binary:
names_sample_sizes.extend(['N_0', 'N_1'])
# columns for metrics
names_metrics = self.dict_metrics_names[self.dict_prediction_types[self.target]]
# for normal folds, keep track of metric and bootstrapped metric's sd
names_metrics_with_sd = []
for name_metric in names_metrics:
names_metrics_with_sd.extend([name_metric, name_metric + '_sd', name_metric + '_str'])
# for the 'all' fold, also keep track of the 'folds_sd' (metric's sd calculated using the folds' results)
names_metrics_with_folds_sd_and_sd = []
for name_metric in names_metrics:
names_metrics_with_folds_sd_and_sd.extend(
[name_metric, name_metric + '_folds_sd', name_metric + '_sd', name_metric + '_str'])
# merge all the columns together. First description of the model, then sample sizes and metrics for each fold
names_col_Performances = ['version'] + self.names_model_parameters
# special outer fold 'all'
names_col_Performances.extend(
['_'.join([name, 'all']) for name in names_sample_sizes + names_metrics_with_folds_sd_and_sd])
# other outer_folds
for outer_fold in self.outer_folds:
names_col_Performances.extend(
['_'.join([name, outer_fold]) for name in names_sample_sizes + names_metrics_with_sd])
# Generate the empty Performance table from the rows and columns.
Performances = np.empty((len(self.list_models), len(names_col_Performances),))
Performances.fill(np.nan)
Performances = pd.DataFrame(Performances)
Performances.columns = names_col_Performances
# Format the types of the columns
for colname in Performances.columns.values:
if (colname in self.names_model_parameters) | ('_str' in colname):
col_type = str
else:
col_type = float
Performances[colname] = Performances[colname].astype(col_type)
self.Performances = Performances
def merge_performances(self):
# define parameters
names_metrics = self.dict_metrics_names[self.dict_prediction_types[self.target]]
# initiate dataframe
self._initiate_empty_performances_summary_df()
# Fill the Performance table row by row
for i, model in enumerate(self.list_models):
# load the performances subdataframe
PERFORMANCES = {}
for mode in self.modes:
PERFORMANCES[mode] = pd.read_csv(model.replace('_str', mode))
PERFORMANCES[mode].set_index('outer_fold', drop=False, inplace=True)
# Fill the columns corresponding to the model's parameters
version = '_'.join(model.split('_')[2:-2])
parameters = self._version_to_parameters(version)
# fill the columns for model parameters
self.Performances['version'][i] = version
for parameter_name in self.names_model_parameters:
self.Performances[parameter_name][i] = parameters[parameter_name]
# Fill the columns for this model, outer_fold by outer_fold
for outer_fold in ['all'] + self.outer_folds:
# Generate a subdataframe from the predictions table for each outerfold
# Fill sample size columns
self.Performances['N_' + outer_fold][i] = PERFORMANCES[''].loc[outer_fold, 'N']
# For binary classification, calculate sample sizes for each class and generate class prediction
if self.target in self.targets_binary:
self.Performances['N_0_' + outer_fold][i] = PERFORMANCES[''].loc[outer_fold, 'N_0']
self.Performances['N_1_' + outer_fold][i] = PERFORMANCES[''].loc[outer_fold, 'N_1']
# Fill the Performances dataframe metric by metric
for name_metric in names_metrics:
for mode in self.modes:
self.Performances[name_metric + mode + '_' + outer_fold][i] = PERFORMANCES[mode].loc[
outer_fold, name_metric]
# calculate the fold sd (variance between the metrics values obtained on the different folds)
folds_sd = PERFORMANCES[''].iloc[1:, :].std(axis=0)
for name_metric in names_metrics:
self.Performances[name_metric + '_folds_sd_all'] = folds_sd[name_metric]
# Convert float to int for sample sizes and some metrics.
for name_col in self.Performances.columns.values:
cond1 = name_col.startswith('N_')
cond2 = any(metric in name_col for metric in self.metrics_displayed_in_int)
cond3 = '_sd' not in name_col
cond4 = '_str' not in name_col
if cond1 | cond2 & cond3 & cond4:
self.Performances[name_col] = self.Performances[name_col].astype('Int64')
# need recent version of pandas to use this type. Otherwise nan cannot be int
# For ensemble models, merge the new performances with the previously computed performances
if self.ensemble_models:
Performances_withoutEnsembles = pd.read_csv(self.path_data + 'PERFORMANCES_tuned_alphabetical_' +
self.pred_type + '_' + self.target + '_' + self.fold + '.csv')
self.Performances = Performances_withoutEnsembles.append(self.Performances)
# reorder the columns (weird: automatic alphabetical re-ordering happened when append was called for 'val')
self.Performances = self.Performances[Performances_withoutEnsembles.columns]
# Ranking, printing and saving
self.Performances_alphabetical = self.Performances.sort_values(by='version')
cols_to_print = ['version', self.main_metric_name + '_str_all']
print('Performances of the models ranked by models\'names:')
print(self.Performances_alphabetical[cols_to_print])
sort_by = self.dict_main_metrics_names[self.target] + '_all'
sort_ascending = self.main_metrics_modes[self.dict_main_metrics_names[self.target]] == 'min'
self.Performances_ranked = self.Performances.sort_values(by=sort_by, ascending=sort_ascending)
print('Performances of the models ranked by the performance on the main metric on all the samples:')
print(self.Performances_ranked[cols_to_print])
def save_performances(self):
name_extension = 'withEnsembles' if self.ensemble_models else 'withoutEnsembles'
path = self.path_data + 'PERFORMANCES_' + name_extension + '_alphabetical_' + self.pred_type + '_' + \
self.target + '_' + self.fold + '.csv'
self.Performances_alphabetical.to_csv(path, index=False)
self.Performances_ranked.to_csv(path.replace('_alphabetical_', '_ranked_'), index=False)
class PerformancesTuning(Metrics):
"""
For each model, selects the best hyperparameter combination.
"""
def __init__(self, target=None, pred_type=None):
Metrics.__init__(self)
self.target = target
self.pred_type = pred_type
self.PERFORMANCES = {}
self.PREDICTIONS = {}
self.Performances = None
self.models = None
self.folds = ['val', 'test']
def load_data(self):
for fold in self.folds:
path = self.path_data + 'PERFORMANCES_withoutEnsembles_ranked_' + self.pred_type + '_' + self.target + \
'_' + fold + '.csv'
self.PERFORMANCES[fold] = pd.read_csv(path).set_index('version', drop=False)
self.PERFORMANCES[fold]['organ'] = self.PERFORMANCES[fold]['organ'].astype(str)
self.PERFORMANCES[fold].index.name = 'columns_names'
self.PREDICTIONS[fold] = pd.read_csv(path.replace('PERFORMANCES', 'PREDICTIONS').replace('_ranked', ''))
def preprocess_data(self):
# Get list of distinct models without taking into account hyperparameters tuning
self.Performances = self.PERFORMANCES['val']
self.Performances['model'] = self.Performances['organ'] + '_' + self.Performances['view'] + '_' + \
self.Performances['transformation'] + '_' + self.Performances['architecture']
self.models = self.Performances['model'].unique()
def select_models(self):
main_metric_name = self.dict_main_metrics_names[self.target]
main_metric_mode = self.main_metrics_modes[main_metric_name]
Perf_col_name = main_metric_name + '_all'
for model in self.models:
Performances_model = self.Performances[self.Performances['model'] == model]
Performances_model.sort_values([Perf_col_name, 'n_fc_layers', 'n_fc_nodes', 'learning_rate', 'dropout_rate',
'weight_decay', 'data_augmentation_factor'],
ascending=[main_metric_mode == 'min', True, True, False, False, False,
False], inplace=True)
best_version = Performances_model['version'][
Performances_model[Perf_col_name] == Performances_model[Perf_col_name].max()].values[0]
versions_to_drop = [version for version in Performances_model['version'].values if
not version == best_version]
# define columns from predictions to drop
cols_to_drop = ['pred_' + version for version in versions_to_drop] + ['outer_fold_' + version for version in
versions_to_drop]
for fold in self.folds:
self.PERFORMANCES[fold].drop(versions_to_drop, inplace=True)
self.PREDICTIONS[fold].drop(cols_to_drop, axis=1, inplace=True)
# drop 'model' column
self.Performances.drop(['model'], axis=1, inplace=True)
# Display results
for fold in self.folds:
print('The tuned ' + fold + ' performances are:')
print(self.PERFORMANCES[fold])
def save_data(self):
# Save the files
for fold in self.folds:
path_pred = self.path_data + 'PREDICTIONS_tuned_' + self.pred_type + '_' + self.target + '_' + fold + \
'.csv'
path_perf = self.path_data + 'PERFORMANCES_tuned_ranked_' + self.pred_type + '_' + self.target + '_' + \
fold + '.csv'
self.PREDICTIONS[fold].to_csv(path_pred, index=False)
self.PERFORMANCES[fold].to_csv(path_perf, index=False)
Performances_alphabetical = self.PERFORMANCES[fold].sort_values(by='version')
Performances_alphabetical.to_csv(path_perf.replace('ranked', 'alphabetical'), index=False)
# This class was coded by <NAME>.
class InnerCV:
"""
Helper class to perform an inner cross validation to tune the hyperparameters of models trained on scalar predictors
"""
def __init__(self, models, inner_splits, n_iter):
self.inner_splits = inner_splits
self.n_iter = n_iter
if isinstance(models, str):
models = [models]
self.models = models
@staticmethod
def get_model(model_name, params):
if model_name == 'ElasticNet':
return ElasticNet(max_iter=2000, **params)
elif model_name == 'RandomForest':
return RandomForestRegressor(**params)
elif model_name == 'GradientBoosting':
return GradientBoostingRegressor(**params)
elif model_name == 'Xgboost':
return XGBRegressor(**params)
elif model_name == 'LightGbm':
return LGBMRegressor(**params)
elif model_name == 'NeuralNetwork':
return MLPRegressor(solver='adam',
activation='relu',
hidden_layer_sizes=(128, 64, 32),
batch_size=1000,
early_stopping=True, **params)
@staticmethod
def get_hyper_distribution(model_name):
if model_name == 'ElasticNet':
return {
'alpha': hp.loguniform('alpha', low=np.log(0.01), high=np.log(10)),
'l1_ratio': hp.uniform('l1_ratio', low=0.01, high=0.99)
}
elif model_name == 'RandomForest':
return {
'n_estimators': hp.randint('n_estimators', upper=300) + 150,
'max_features': hp.choice('max_features', ['auto', 0.9, 0.8, 0.7, 0.6, 0.5, 0.4]),
'max_depth': hp.choice('max_depth', [None, 10, 8, 6])
}
elif model_name == 'GradientBoosting':
return {
'n_estimators': hp.randint('n_estimators', upper=300) + 150,
'max_features': hp.choice('max_features', ['auto', 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3]),
'learning_rate': hp.uniform('learning_rate', low=0.01, high=0.3),
'max_depth': hp.randint('max_depth', 10) + 5
}
elif model_name == 'Xgboost':
return {
'colsample_bytree': hp.uniform('colsample_bytree', low=0.2, high=0.7),
'gamma': hp.uniform('gamma', low=0.1, high=0.5),
'learning_rate': hp.uniform('learning_rate', low=0.02, high=0.2),
'max_depth': hp.randint('max_depth', 10) + 5,
'n_estimators': hp.randint('n_estimators', 300) + 150,
'subsample': hp.uniform('subsample', 0.2, 0.8)
}
elif model_name == 'LightGbm':
return {
'num_leaves': hp.randint('num_leaves', 40) + 5,
'min_child_samples': hp.randint('min_child_samples', 400) + 100,
'min_child_weight': hp.choice('min_child_weight', [1e-5, 1e-3, 1e-2, 1e-1, 1, 1e1, 1e2, 1e3, 1e4]),
'subsample': hp.uniform('subsample', low=0.2, high=0.8),
'colsample_bytree': hp.uniform('colsample_bytree', low=0.4, high=0.6),
'reg_alpha': hp.choice('reg_alpha', [0, 1e-1, 1, 2, 5, 7, 10, 50, 100]),
'reg_lambda': hp.choice('reg_lambda', [0, 1e-1, 1, 5, 10, 20, 50, 100]),
'n_estimators': hp.randint('n_estimators', 300) + 150
}
elif model_name == 'NeuralNetwork':
return {
'learning_rate_init': hp.loguniform('learning_rate_init', low=np.log(5e-5), high=np.log(2e-2)),
'alpha': hp.uniform('alpha', low=1e-6, high=1e3)
}
def create_folds(self, X, y):
"""
X columns : eid + features except target
y columns : eid + target
"""
X_eid = X.drop_duplicates('eid')
y_eid = y.drop_duplicates('eid')
eids = X_eid.eid
# Kfold on the eid, then regroup all ids
inner_cv = KFold(n_splits=self.inner_splits, shuffle=False, random_state=0)
list_test_folds = [elem[1] for elem in inner_cv.split(X_eid, y_eid)]
list_test_folds_eid = [eids[elem].values for elem in list_test_folds]
list_test_folds_id = [X.index[X.eid.isin(list_test_folds_eid[elem])].values for elem in
range(len(list_test_folds_eid))]
return list_test_folds_id
def optimize_hyperparameters(self, X, y, scoring):
"""
input X : dataframe with features + eid
input y : dataframe with target + eid
"""
if 'instance' in X.columns:
X = X.drop(columns=['instance'])
if 'instance' in y.columns:
y = y.drop(columns=['instance'])
list_test_folds_id = self.create_folds(X, y)
X = X.drop(columns=['eid'])
y = y.drop(columns=['eid'])
# Create custom Splits
list_test_folds_id_index = [np.array([X.index.get_loc(elem) for elem in list_test_folds_id[fold_num]])
for fold_num in range(len(list_test_folds_id))]
test_folds = np.zeros(len(X), dtype='int')
for fold_count in range(len(list_test_folds_id)):
test_folds[list_test_folds_id_index[fold_count]] = fold_count
inner_cv = PredefinedSplit(test_fold=test_folds)
list_best_params = {}
list_best_score = {}
objective, model_name = None, None
for model_name in self.models:
def objective(hyperparameters):
estimator_ = self.get_model(model_name, hyperparameters)
pipeline = Pipeline([('scaler', StandardScaler()), ('estimator', estimator_)])
scores = cross_validate(pipeline, X.values, y, scoring=scoring, cv=inner_cv, n_jobs=self.inner_splits)
return {'status': STATUS_OK, 'loss': -scores['test_score'].mean(),
'attachments': {'split_test_scores_and_params': (scores['test_score'], hyperparameters)}}
space = self.get_hyper_distribution(model_name)
trials = Trials()
best = fmin(objective, space, algo=tpe.suggest, max_evals=self.n_iter, trials=trials)
best_params = space_eval(space, best)
list_best_params[model_name] = best_params
list_best_score[model_name] = - min(trials.losses())
# Recover best between all models :
best_model = max(list_best_score.keys(), key=(lambda k: list_best_score[k]))
best_model_hyp = list_best_params[best_model]
# Recreate best estim :
estim = self.get_model(best_model, best_model_hyp)
pipeline_best = Pipeline([('scaler', StandardScaler()), ('estimator', estim)])
pipeline_best.fit(X.values, y)
return pipeline_best
"""
Useful for EnsemblesPredictions. This function needs to be global to allow pool to pickle it.
"""
def compute_ensemble_folds(ensemble_inputs):
if len(ensemble_inputs[1]) < 100:
print('Small sample size:' + str(len(ensemble_inputs[1])))
n_inner_splits = 5
else:
n_inner_splits = 10
# Can use different models: models=['ElasticNet', 'LightGBM', 'NeuralNetwork']
cv = InnerCV(models=['ElasticNet'], inner_splits=n_inner_splits, n_iter=30)
model = cv.optimize_hyperparameters(ensemble_inputs[0], ensemble_inputs[1], scoring='r2')
return model
class EnsemblesPredictions(Metrics):
"""
Hierarchically builds ensemble models from the already existing predictions.
"""
def __init__(self, target=None, pred_type=None, regenerate_models=False):
# Parameters
Metrics.__init__(self)
self.target = target
self.pred_type = pred_type
self.folds = ['val', 'test']
self.regenerate_models = regenerate_models
self.ensembles_performance_cutoff_percent = 0.5
self.parameters = {'target': target, 'organ': '*', 'view': '*', 'transformation': '*', 'architecture': '*',
'n_fc_layers': '*', 'n_fc_nodes': '*', 'optimizer': '*', 'learning_rate': '*',
'weight_decay': '*', 'dropout_rate': '*', 'data_augmentation_factor': '*'}
self.version = self._parameters_to_version(self.parameters)
self.main_metric_name = self.dict_main_metrics_names[target]
self.init_perf = -np.Inf if self.main_metrics_modes[self.main_metric_name] == 'max' else np.Inf
path_perf = self.path_data + 'PERFORMANCES_tuned_ranked_' + pred_type + '_' + target + '_val.csv'
self.Performances = pd.read_csv(path_perf).set_index('version', drop=False)
self.Performances['organ'] = self.Performances['organ'].astype(str)
self.list_ensemble_levels = ['transformation', 'view', 'organ']
self.PREDICTIONS = {}
self.weights_by_category = None
self.weights_by_ensembles = None
self.N_ensemble_CV_split = 10
self.instancesS = {'instances': ['01', '1.5x', '23'], 'eids': ['*']}
self.instances_names_to_numbers = {'01': ['0', '1'], '1.5x': ['1.5', '1.51', '1.52', '1.53', '1.54'],
'23': ['2', '3'], '*': ['*']}
self.INSTANCES_DATASETS = {
'01': ['Eyes', 'Hearing', 'Lungs', 'Arterial', 'Musculoskeletal', 'Biochemistry', 'ImmuneSystem'],
'1.5x': ['PhysicalActivity'],
'23': ['Brain', 'Arterial', 'Heart', 'Abdomen', 'Musculoskeletal'],
'*': ['Brain', 'Eyes', 'Hearing', 'Lungs', 'Arterial', 'Heart', 'Abdomen', 'Musculoskeletal',
'PhysicalActivity', 'ImmuneSystem']
}
# Get rid of columns and rows for the versions for which all samples as NANs
@staticmethod
def _drop_na_pred_versions(PREDS, Performances):
# Select the versions for which only NAs are available
pred_versions = [col for col in PREDS['val'].columns.values if 'pred_' in col]
to_drop = []
for pv in pred_versions:
for fold in PREDS.keys():
if PREDS[fold][pv].notna().sum() == 0:
to_drop.append(pv)
break
# Drop the corresponding columns from preds, and rows from performances
index_to_drop = [p.replace('pred_', '') for p in to_drop if '*' not in p]
for fold in PREDS.keys():
PREDS[fold].drop(to_drop, axis=1, inplace=True)
return Performances.drop(index_to_drop)
def load_data(self):
for fold in self.folds:
self.PREDICTIONS[fold] = pd.read_csv(
self.path_data + 'PREDICTIONS_tuned_' + self.pred_type + '_' + self.target + '_' + fold + '.csv')
def _build_single_ensemble(self, PREDICTIONS, version):
# Drop columns that are exclusively NaNs
all_nan = PREDICTIONS['val'].isna().all() | PREDICTIONS['test'].isna().all()
non_nan_cols = all_nan[~all_nan.values].index
for fold in self.folds:
PREDICTIONS[fold] = PREDICTIONS[fold][non_nan_cols]
Predictions = PREDICTIONS['val']
# Select the columns for the model
ensemble_preds_cols = [col for col in Predictions.columns.values if
bool(re.compile('pred_' + version).match(col))]
# If only one model in the ensemble, just copy the column. Otherwise build the ensemble model
if len(ensemble_preds_cols) == 1:
for fold in self.folds:
PREDICTIONS[fold]['pred_' + version] = PREDICTIONS[fold][ensemble_preds_cols[0]]
else:
# Initiate the dictionaries
PREDICTIONS_OUTERFOLDS = {}
ENSEMBLE_INPUTS = {}
for outer_fold in self.outer_folds:
# take the subset of the rows that correspond to the outer_fold
PREDICTIONS_OUTERFOLDS[outer_fold] = {}
XS_outer_fold = {}
YS_outer_fold = {}
dict_fold_to_outer_folds = {
'val': [float(outer_fold)],
'test': [(float(outer_fold) + 1) % self.n_CV_outer_folds],
'train': [float(of) for of in self.outer_folds
if float(of) not in [float(outer_fold), (float(outer_fold) + 1) % self.n_CV_outer_folds]]
}
for fold in self.folds:
PREDICTIONS_OUTERFOLDS[outer_fold][fold] = \
PREDICTIONS[fold][PREDICTIONS[fold]['outer_fold'].isin(dict_fold_to_outer_folds[fold])]
PREDICTIONS_OUTERFOLDS[outer_fold][fold] = PREDICTIONS_OUTERFOLDS[outer_fold][fold][
['id', 'eid', 'instance', self.target] + ensemble_preds_cols].dropna()
X = PREDICTIONS_OUTERFOLDS[outer_fold][fold][['id', 'eid', 'instance'] + ensemble_preds_cols]
X.set_index('id', inplace=True)
XS_outer_fold[fold] = X
y = PREDICTIONS_OUTERFOLDS[outer_fold][fold][['id', 'eid', self.target]]
y.set_index('id', inplace=True)
YS_outer_fold[fold] = y
ENSEMBLE_INPUTS[outer_fold] = [XS_outer_fold['val'], YS_outer_fold['val']]
# Build ensemble model using ElasticNet and/or LightGBM, Neural Network.
PREDICTIONS_ENSEMBLE = {}
pool = Pool(self.N_ensemble_CV_split)
MODELS = pool.map(compute_ensemble_folds, list(ENSEMBLE_INPUTS.values()))
pool.close()
pool.join()
# Concatenate all outer folds
for outer_fold in self.outer_folds:
for fold in self.folds:
X = PREDICTIONS_OUTERFOLDS[outer_fold][fold][ensemble_preds_cols]
PREDICTIONS_OUTERFOLDS[outer_fold][fold]['pred_' + version] = MODELS[int(outer_fold)].predict(X)
PREDICTIONS_OUTERFOLDS[outer_fold][fold]['outer_fold'] = float(outer_fold)
df_outer_fold = PREDICTIONS_OUTERFOLDS[outer_fold][fold][['id', 'outer_fold',
'pred_' + version]]
# Initiate, or append if some previous outerfolds have already been concatenated
if fold not in PREDICTIONS_ENSEMBLE.keys():
PREDICTIONS_ENSEMBLE[fold] = df_outer_fold
else:
PREDICTIONS_ENSEMBLE[fold] = PREDICTIONS_ENSEMBLE[fold].append(df_outer_fold)
# Add the ensemble predictions to the dataframe
for fold in self.folds:
if fold == 'train':
PREDICTIONS[fold] = PREDICTIONS[fold].merge(PREDICTIONS_ENSEMBLE[fold], how='outer',
on=['id', 'outer_fold'])
else:
PREDICTIONS_ENSEMBLE[fold].drop('outer_fold', axis=1, inplace=True)
PREDICTIONS[fold] = PREDICTIONS[fold].merge(PREDICTIONS_ENSEMBLE[fold], how='outer', on=['id'])
def _build_single_ensemble_wrapper(self, version, ensemble_level):
print('Building the ensemble model ' + version)
pred_version = 'pred_' + version
# Evaluate if the ensemble model should be built
# 1 - separately on instance 0-1, 1.5 and 2-3 (for ensemble at the top level, since overlap between models is 0)
# 2 - piece by piece on each outer_fold
# 1-Compute instances 0-1, 1.5 and 2-3 separately
if ensemble_level == 'organ':
for fold in self.folds:
self.PREDICTIONS[fold][pred_version] = np.nan
# Add an ensemble for each instances (01, 1.5x, and 23)
if self.pred_type == 'instances':
for instances_names in self.instancesS[self.pred_type]:
pv = 'pred_' + version.split('_')[0] + '_*instances' + instances_names + '_' + \
'_'.join(version.split('_')[2:])
self.PREDICTIONS[fold][pv] = np.nan
for instances_names in self.instancesS[self.pred_type]:
print('Building final ensemble model for samples in the instances: ' + instances_names)
# Take subset of rows and columns
instances = self.instances_names_to_numbers[instances_names]
instances_datasets = self.INSTANCES_DATASETS[instances_names]
versions = \
[col.replace('pred_', '') for col in self.PREDICTIONS['val'].columns if 'pred_' in col]
instances_versions = [version for version in versions
if any(dataset in version for dataset in instances_datasets)]
cols_to_keep = self.id_vars + self.demographic_vars + \
['pred_' + version for version in instances_versions]
PREDICTIONS = {}
for fold in self.folds:
PREDICTIONS[fold] = self.PREDICTIONS[fold][self.PREDICTIONS[fold].instance.isin(instances)]
PREDICTIONS[fold] = PREDICTIONS[fold][cols_to_keep]
self._build_single_ensemble(PREDICTIONS, version)
# Print a quick performance estimation for each instance(s)
df_model = PREDICTIONS['test'][[self.target, 'pred_' + version]].dropna()
print(instances_names)
print(self.main_metric_name + ' for instance(s) ' + instances_names + ': ' +
str(r2_score(df_model[self.target], df_model['pred_' + version])))
print('The sample size is ' + str(len(df_model.index)) + '.')
# Add the predictions to the dataframe, chunck by chunk, instances by instances
for fold in self.folds:
self.PREDICTIONS[fold][pred_version][self.PREDICTIONS[fold].instance.isin(instances)] = \
PREDICTIONS[fold][pred_version].values
# Add an ensemble for the instance(s) only
if self.pred_type == 'instances':
pv = 'pred_' + version.split('_')[0] + '_*instances' + instances_names + '_' + \
'_'.join(version.split('_')[2:])
self.PREDICTIONS[fold][pv][self.PREDICTIONS[fold].instance.isin(instances)] = \
PREDICTIONS[fold][pred_version].values
# Add three extra ensemble models for eids, to allow larger sample sizes for GWAS purposes
if self.pred_type == 'eids':
for instances_names in ['01', '1.5x', '23']:
print('Building final sub-ensemble model for samples in the instances: ' + instances_names)
# Keep only relevant columns
instances_datasets = self.INSTANCES_DATASETS[instances_names]
versions = \
[col.replace('pred_', '') for col in self.PREDICTIONS['val'].columns if 'pred_' in col]
instances_versions = [version for version in versions
if any(dataset in version for dataset in instances_datasets)]
cols_to_keep = self.id_vars + self.demographic_vars + \
['pred_' + version for version in instances_versions]
PREDICTIONS = {}
for fold in self.folds:
PREDICTIONS[fold] = self.PREDICTIONS[fold].copy()
PREDICTIONS[fold] = PREDICTIONS[fold][cols_to_keep]
self._build_single_ensemble(PREDICTIONS, version)
# Print a quick performance estimation for each instance(s)
df_model = PREDICTIONS['test'][[self.target, 'pred_' + version]].dropna()
print(instances_names)
print(self.main_metric_name + ' for instance(s) ' + instances_names + ': ' +
str(r2_score(df_model[self.target], df_model['pred_' + version])))
print('The sample size is ' + str(len(df_model.index)) + '.')
# Add the predictions to the dataframe
pv = 'pred_' + version.split('_')[0] + '_*instances' + instances_names + '_' + \
'_'.join(version.split('_')[2:])
for fold in self.folds:
self.PREDICTIONS[fold][pv] = PREDICTIONS[fold][pred_version].values
# 2-Compute fold by fold
else:
self._build_single_ensemble(self.PREDICTIONS, version)
# build and save a dataset for this specific ensemble model
for fold in self.folds:
df_single_ensemble = self.PREDICTIONS[fold][['id', 'outer_fold', pred_version]]
df_single_ensemble.rename(columns={pred_version: 'pred'}, inplace=True)
df_single_ensemble.dropna(inplace=True, subset=['pred'])
df_single_ensemble.to_csv(self.path_data + 'Predictions_' + self.pred_type + '_' + version + '_' + fold +
'.csv', index=False)
# Add extra ensembles at organ level
if ensemble_level == 'organ':
for instances_names in ['01', '1.5x', '23']:
pv = 'pred_' + version.split('_')[0] + '_*instances' + instances_names + '_' + \
'_'.join(version.split('_')[2:])
version_instances = version.split('_')[0] + '_*instances' + instances_names + '_' + \
'_'.join(version.split('_')[2:])
df_single_ensemble = self.PREDICTIONS[fold][['id', 'outer_fold', pv]]
df_single_ensemble.rename(columns={pv: 'pred'}, inplace=True)
df_single_ensemble.dropna(inplace=True, subset=['pred'])
df_single_ensemble.to_csv(self.path_data + 'Predictions_' + self.pred_type + '_' +
version_instances + '_' + fold + '.csv', index=False)
def _recursive_ensemble_builder(self, Performances_grandparent, parameters_parent, version_parent,
list_ensemble_levels_parent):
# Compute the ensemble models for the children first, so that they can be used for the parent model
Performances_parent = Performances_grandparent[
Performances_grandparent['version'].isin(
fnmatch.filter(Performances_grandparent['version'], version_parent))]
# if the last ensemble level has not been reached, go down one level and create a branch for each child.
# Otherwise the leaf has been reached
if len(list_ensemble_levels_parent) > 0:
list_ensemble_levels_child = list_ensemble_levels_parent.copy()
ensemble_level = list_ensemble_levels_child.pop()
list_children = Performances_parent[ensemble_level].unique()
for child in list_children:
parameters_child = parameters_parent.copy()
parameters_child[ensemble_level] = child
version_child = self._parameters_to_version(parameters_child)
# recursive call to the function
self._recursive_ensemble_builder(Performances_parent, parameters_child, version_child,
list_ensemble_levels_child)
else:
ensemble_level = None
# compute the ensemble model for the parent
# Check if ensemble model has already been computed. If it has, load the predictions. If it has not, compute it.
if not self.regenerate_models and \
os.path.exists(self.path_data + 'Predictions_' + self.pred_type + '_' + version_parent + '_test.csv'):
print('The model ' + version_parent + ' has already been computed. Loading it...')
for fold in self.folds:
df_single_ensemble = pd.read_csv(self.path_data + 'Predictions_' + self.pred_type + '_' +
version_parent + '_' + fold + '.csv')
df_single_ensemble.rename(columns={'pred': 'pred_' + version_parent}, inplace=True)
# Add the ensemble predictions to the dataframe
if fold == 'train':
self.PREDICTIONS[fold] = self.PREDICTIONS[fold].merge(df_single_ensemble, how='outer',
on=['id', 'outer_fold'])
else:
df_single_ensemble.drop(columns=['outer_fold'], inplace=True)
self.PREDICTIONS[fold] = self.PREDICTIONS[fold].merge(df_single_ensemble, how='outer', on=['id'])
# Add the extra ensemble models at the 'organ' level
if ensemble_level == 'organ':
if self.pred_type == 'instances':
instances = self.instancesS[self.pred_type]
else:
instances = ['01', '23']
for instances_names in instances:
pv = 'pred_' + version_parent.split('_')[0] + '_*instances' + instances_names + '_' + \
'_'.join(version_parent.split('_')[2:])
version_instances = version_parent.split('_')[0] + '_*instances' + instances_names + '_' + \
'_'.join(version_parent.split('_')[2:])
df_single_ensemble = pd.read_csv(self.path_data + 'Predictions_' + self.pred_type + '_' +
version_instances + '_' + fold + '.csv')
df_single_ensemble.rename(columns={'pred': pv}, inplace=True)
if fold == 'train':
self.PREDICTIONS[fold] = self.PREDICTIONS[fold].merge(df_single_ensemble, how='outer',
on=['id', 'outer_fold'])
else:
df_single_ensemble.drop(columns=['outer_fold'], inplace=True)
self.PREDICTIONS[fold] = self.PREDICTIONS[fold].merge(df_single_ensemble, how='outer',
on=['id'])
else:
self._build_single_ensemble_wrapper(version_parent, ensemble_level)
# Print a quick performance estimation
df_model = self.PREDICTIONS['test'][[self.target, 'pred_' + version_parent]].dropna()
print(self.main_metric_name + ': ' + str(r2_score(df_model[self.target], df_model['pred_' + version_parent])))
print('The sample size is ' + str(len(df_model.index)) + '.')
def generate_ensemble_predictions(self):
self._recursive_ensemble_builder(self.Performances, self.parameters, self.version, self.list_ensemble_levels)
# Reorder the columns alphabetically
for fold in self.folds:
pred_versions = [col for col in self.PREDICTIONS[fold].columns if 'pred_' in col]
pred_versions.sort()
self.PREDICTIONS[fold] = self.PREDICTIONS[fold][self.id_vars + self.demographic_vars + pred_versions]
# Displaying the R2s
for fold in self.folds:
versions = [col.replace('pred_', '') for col in self.PREDICTIONS[fold].columns if 'pred_' in col]
r2s = []
for version in versions:
df = self.PREDICTIONS[fold][[self.target, 'pred_' + version]].dropna()
r2s.append(r2_score(df[self.target], df['pred_' + version]))
R2S = pd.DataFrame({'version': versions, 'R2': r2s})
R2S.sort_values(by='R2', ascending=False, inplace=True)
print(fold + ' R2s for each model: ')
print(R2S)
def save_predictions(self):
for fold in self.folds:
self.PREDICTIONS[fold].to_csv(self.path_data + 'PREDICTIONS_withEnsembles_' + self.pred_type + '_' +
self.target + '_' + fold + '.csv', index=False)
class ResidualsGenerate(Basics):
"""
Computes accelerated aging phenotypes (Residuals, corrected for residuals bias with respect to age)
"""
def __init__(self, target=None, fold=None, pred_type=None, debug_mode=False):
# Parameters
Basics.__init__(self)
self.target = target
self.fold = fold
self.pred_type = pred_type
self.debug_mode = debug_mode
self.Residuals = pd.read_csv(self.path_data + 'PREDICTIONS_withEnsembles_' + pred_type + '_' + target + '_' +
fold + '.csv')
self.list_models = [col_name.replace('pred_', '') for col_name in self.Residuals.columns.values
if 'pred_' in col_name]
def generate_residuals(self):
list_models = [col_name.replace('pred_', '') for col_name in self.Residuals.columns.values
if 'pred_' in col_name]
for model in list_models:
print('Generating residuals for model ' + model)
df_model = self.Residuals[['Age', 'pred_' + model]]
no_na_indices = [not b for b in df_model['pred_' + model].isna()]
df_model.dropna(inplace=True)
if (len(df_model.index)) > 0:
age = df_model.loc[:, ['Age']]
res = df_model['Age'] - df_model['pred_' + model]
regr = LinearRegression()
regr.fit(age, res)
res_correction = regr.predict(age)
res_corrected = res - res_correction
self.Residuals.loc[no_na_indices, 'pred_' + model] = res_corrected
# debug plot
if self.debug_mode:
print('Bias for the residuals ' + model, regr.coef_)
plt.scatter(age, res)
plt.scatter(age, res_corrected)
regr2 = LinearRegression()
regr2.fit(age, res_corrected)
print('Coefficients after: \n', regr2.coef_)
self.Residuals.rename(columns=lambda x: x.replace('pred_', 'res_'), inplace=True)
def save_residuals(self):
self.Residuals.to_csv(self.path_data + 'RESIDUALS_' + self.pred_type + '_' + self.target + '_' + self.fold +
'.csv', index=False)
class ResidualsCorrelations(Basics):
"""
Computes the phenotypic correlation between aging dimensions.
"""
def __init__(self, target=None, fold=None, pred_type=None, debug_mode=False):
Basics.__init__(self)
self.target = target
self.fold = fold
self.pred_type = pred_type
self.debug_mode = debug_mode
if debug_mode:
self.n_bootstrap_iterations_correlations = 10
else:
self.n_bootstrap_iterations_correlations = 1000
self.Residuals = None
self.CORRELATIONS = {}
self.Correlation_sample_sizes = None
def preprocessing(self):
# load data
Residuals = pd.read_csv(self.path_data + 'RESIDUALS_' + self.pred_type + '_' + self.target + '_' + self.fold +
'.csv')
# Format the dataframe
Residuals_only = Residuals[[col_name for col_name in Residuals.columns.values if 'res_' in col_name]]
Residuals_only.rename(columns=lambda x: x.replace('res_' + self.target + '_', ''), inplace=True)
# Reorder the columns to make the correlation matrix more readable
# Need to temporarily rename '?' because its ranking differs from the '*' and ',' characters
Residuals_only.columns = [col_name.replace('?', ',placeholder') for col_name in Residuals_only.columns.values]
Residuals_only = Residuals_only.reindex(sorted(Residuals_only.columns), axis=1)
Residuals_only.columns = [col_name.replace(',placeholder', '?') for col_name in Residuals_only.columns.values]
self.Residuals = Residuals_only
def _bootstrap_correlations(self):
names = self.Residuals.columns.values
results = []
for i in range(self.n_bootstrap_iterations_correlations):
if (i + 1) % 100 == 0:
print('Bootstrap iteration ' + str(i + 1) + ' out of ' + str(self.n_bootstrap_iterations_correlations))
data_i = resample(self.Residuals, replace=True, n_samples=len(self.Residuals.index))
results.append(np.array(data_i.corr()))
results = np.array(results)
RESULTS = {}
for op in ['mean', 'std']:
results_op = pd.DataFrame(getattr(np, op)(results, axis=0))
results_op.index = names
results_op.columns = names
RESULTS[op] = results_op
self.CORRELATIONS['_sd'] = RESULTS['std']
def generate_correlations(self):
# Generate the correlation matrix
self.CORRELATIONS[''] = self.Residuals.corr()
# Gerate the std by bootstrapping
self._bootstrap_correlations()
# Merge both as a dataframe of strings
self.CORRELATIONS['_str'] = self.CORRELATIONS[''].round(3).applymap(str) \
+ '+-' + self.CORRELATIONS['_sd'].round(3).applymap(str)
# Print correlations
print(self.CORRELATIONS[''])
# Generate correlation sample sizes
self.Residuals[~self.Residuals.isna()] = 1
self.Residuals[self.Residuals.isna()] = 0
self.Correlation_sample_sizes = self.Residuals.transpose() @ self.Residuals
def save_correlations(self):
self.Correlation_sample_sizes.to_csv(self.path_data + 'ResidualsCorrelations_samplesizes_' + self.pred_type +
'_' + self.target + '_' + self.fold + '.csv', index=True)
for mode in self.modes:
self.CORRELATIONS[mode].to_csv(self.path_data + 'ResidualsCorrelations' + mode + '_' + self.pred_type +
'_' + self.target + '_' + self.fold + '.csv', index=True)
class PerformancesSurvival(Metrics):
"""
Computes the performances in terms of survival prediction using biological age phenotypes as survival predictors.
"""
def __init__(self, target=None, fold=None, pred_type=None, debug_mode=None):
Metrics.__init__(self)
self.target = target
self.fold = fold
self.pred_type = pred_type
if debug_mode:
self.n_bootstrap_iterations = 3
else:
self.n_bootstrap_iterations = 1000
self.PERFORMANCES = None
self.Survival = None
self.SURV = None
def _bootstrap_c_index(self, data):
results = []
for i in range(self.n_bootstrap_iterations):
data_i = resample(data, replace=True, n_samples=len(data.index))
if len(data_i['Death'].unique()) == 2:
results.append(concordance_index(data_i['Age'], -data_i['pred'], data_i['Death']))
'''
To debug if this part fails again
try:
results.append(concordance_index(data_i['Age'], -data_i['pred'], data_i['Death']))
except:
print('WEIRD, should not happen! Printing the df')
print(data_i)
self.data_i_debug = data_i
break
'''
if len(results) > 0:
results_mean = np.mean(results)
results_std = np.std(results)
else:
results_mean = np.nan
results_std = np.nan
return results_mean, results_std
def load_data(self):
# Load and preprocess PERFORMANCES
self.PERFORMANCES = pd.read_csv(self.path_data + 'PERFORMANCES_withEnsembles_alphabetical_' +
self.pred_type + '_' + self.target + '_' + self.fold + '.csv')
self.PERFORMANCES.set_index('version', drop=False, inplace=True)
self.PERFORMANCES.index.name = 'index'
for inner_fold in ['all'] + [str(i) for i in range(10)]:
for metric in ['C-Index', 'C-Index-difference']:
for mode in self.modes:
self.PERFORMANCES[metric + mode + '_' + inner_fold] = np.nan
Residuals = pd.read_csv(
self.path_data + 'RESIDUALS_' + self.pred_type + '_' + self.target + '_' + self.fold + '.csv')
Survival = pd.read_csv(self.path_data + 'data_survival.csv')
self.Survival = pd.merge(Survival[['id', 'FollowUpTime', 'Death']], Residuals, on='id')
data_folds = pd.read_csv(self.path_data + 'data-features_eids.csv', usecols=['eid', 'outer_fold'])
self.SURV = {}
for i in range(10):
self.SURV[i] = \
self.Survival[self.Survival['eid'].isin(data_folds['eid'][data_folds['outer_fold'] == i].values)]
def compute_c_index_and_save_data(self):
models = [col.replace('res_' + self.target, self.target) for col in self.Survival.columns if 'res_' in col]
for k, model in enumerate(models):
if k % 30 == 0:
print('Computing CI for the ' + str(k) + 'th model out of ' + str(len(models)) + ' models.')
# Load Performances dataframes
PERFS = {}
for mode in self.modes:
PERFS[mode] = pd.read_csv('../data/Performances_' + self.pred_type + '_' + model + '_' + self.fold +
mode + '.csv')
PERFS[mode].set_index('outer_fold', drop=False, inplace=True)
PERFS[mode]['C-Index'] = np.nan
PERFS[mode]['C-Index-difference'] = np.nan
df_model = self.Survival[['FollowUpTime', 'Death', 'Age', 'res_' + model]].dropna()
df_model.rename(columns={'res_' + model: 'pred'}, inplace=True)
# Compute CI over all samples
if len(df_model['Death'].unique()) == 2:
ci_model = concordance_index(df_model['FollowUpTime'], -(df_model['Age'] - df_model['pred']),
df_model['Death'])
ci_age = concordance_index(df_model['FollowUpTime'], -df_model['Age'], df_model['Death'])
ci_diff = ci_model - ci_age
PERFS[''].loc['all', 'C-Index'] = ci_model
PERFS[''].loc['all', 'C-Index-difference'] = ci_diff
self.PERFORMANCES.loc[model, 'C-Index_all'] = ci_model
self.PERFORMANCES.loc[model, 'C-Index-difference_all'] = ci_diff
_, ci_sd = self._bootstrap_c_index(df_model)
PERFS['_sd'].loc['all', 'C-Index'] = ci_sd
PERFS['_sd'].loc['all', 'C-Index-difference'] = ci_sd
self.PERFORMANCES.loc[model, 'C-Index_sd_all'] = ci_sd
self.PERFORMANCES.loc[model, 'C-Index-difference_sd_all'] = ci_sd
# Compute CI over each fold
for i in range(10):
df_model_i = self.SURV[i][['FollowUpTime', 'Death', 'Age', 'res_' + model]].dropna()
df_model_i.rename(columns={'res_' + model: 'pred'}, inplace=True)
if len(df_model_i['Death'].unique()) == 2:
ci_model_i = concordance_index(df_model_i['FollowUpTime'],
-(df_model_i['Age'] - df_model_i['pred']),
df_model_i['Death'])
ci_age_i = concordance_index(df_model_i['FollowUpTime'], -df_model_i['Age'], df_model_i['Death'])
ci_diff_i = ci_model_i - ci_age_i
PERFS[''].loc[str(i), 'C-Index'] = ci_model_i
PERFS[''].loc[str(i), 'C-Index-difference'] = ci_diff_i
self.PERFORMANCES.loc[model, 'C-Index_' + str(i)] = ci_model_i
self.PERFORMANCES.loc[model, 'C-Index-difference_' + str(i)] = ci_diff_i
_, ci_i_sd = self._bootstrap_c_index(df_model_i)
PERFS['_sd'].loc[str(i), 'C-Index'] = ci_i_sd
PERFS['_sd'].loc[str(i), 'C-Index-difference'] = ci_i_sd
self.PERFORMANCES.loc[model, 'C-Index_sd_' + str(i)] = ci_i_sd
self.PERFORMANCES.loc[model, 'C-Index-difference_sd_' + str(i)] = ci_i_sd
# Compute sd using all folds
ci_str = round(PERFS[''][['C-Index', 'C-Index-difference']], 3).astype(str) + '+-' + \
round(PERFS['_sd'][['C-Index', 'C-Index-difference']], 3).astype(str)
PERFS['_str'][['C-Index', 'C-Index-difference']] = ci_str
for col in ['C-Index', 'C-Index-difference']:
cols = [col + '_str_' + str(i) for i in range(10)]
# Fill model's performance matrix
ci_std_lst = PERFS['_str'].loc['all', col].split('+-')
ci_std_lst.insert(1, str(round(PERFS[''][col].iloc[1:].std(), 3)))
ci_std_str = '+-'.join(ci_std_lst)
PERFS['_str'].loc['all', col] = ci_std_str
# Fill global performances matrix
self.PERFORMANCES.loc[model, cols] = ci_str[col].values[1:]
self.PERFORMANCES.loc[model, col + '_str_all'] = ci_std_str
# Save new performances
for mode in self.modes:
PERFS[mode].to_csv('../data/Performances_' + self.pred_type + '_withCI_' + model + '_' + self.fold +
mode + '.csv')
# Ranking, printing and saving
# Sort by alphabetical order
Performances_alphabetical = self.PERFORMANCES.sort_values(by='version')
Performances_alphabetical.to_csv(self.path_data + 'PERFORMANCES_withEnsembles_withCI_alphabetical_' +
self.pred_type + '_' + self.target + '_' + self.fold + '.csv', index=False)
# Sort by C-Index difference, to print
cols_to_print = ['version', 'C-Index-difference_str_all']
Performances_ranked = self.PERFORMANCES.sort_values(by='C-Index-difference_all', ascending=False)
print('Performances of the models ranked by C-Index difference with C-Index based on age only,'
' on all the samples:')
print(Performances_ranked[cols_to_print])
# Sort by main metric, to save
sort_by = self.dict_main_metrics_names[self.target] + '_all'
sort_ascending = self.main_metrics_modes[self.dict_main_metrics_names[self.target]] == 'min'
Performances_ranked = self.PERFORMANCES.sort_values(by=sort_by, ascending=sort_ascending)
Performances_ranked.to_csv(self.path_data + 'PERFORMANCES_withEnsembles_withCI_withEnsembles_ranked_' +
self.pred_type + '_' + self.target + '_' + self.fold + '.csv', index=False)
# Save with ensembles
models_nonensembles = [idx for idx in Performances_alphabetical.index if '*' not in idx]
path_save = self.path_data + 'PERFORMANCES_withoutEnsembles_withCI_alphabetical_' + self.pred_type + '_' + \
self.target + '_' + self.fold + '.csv'
Performances_alphabetical.loc[models_nonensembles, :].to_csv(path_save, index=False)
Performances_ranked.loc[models_nonensembles, :].to_csv(path_save.replace('alphabetical', 'ranked'))
def print_key_results(self):
# Helper function
def compute_p_value(row):
sd = float(row['C-Index-difference_str_all'].split('+-')[1])
z = np.abs(row['C-Index-difference_all']) / sd
pv = norm.sf(abs(z)) * 2
return pv
# Preprocess the data
Performances = pd.read_csv(
self.path_data + 'PERFORMANCES_withEnsembles_withCI_alphabetical_' + self.pred_type + '_' +
self.target + '_' + self.fold + '.csv')
Performances.set_index('version', drop=False, inplace=True)
Perfs_CI = Performances[['version', 'C-Index_all', 'C-Index-difference_all',
'C-Index-difference_str_all']].sort_values(by='C-Index-difference_all')
Perfs_CI['C-Index_CA'] = Perfs_CI['C-Index_all'] - Perfs_CI['C-Index-difference_all']
Perfs_CI['p-value'] = Perfs_CI.apply(compute_p_value, axis=1)
# Select only models for which difference between biological age's CI and chronological age's CI is significant
Perfs_CI_significant = Perfs_CI[Perfs_CI['p-value'] < 0.05]
Perfs_CI_significant_FDR = Perfs_CI[Perfs_CI['p-value'] * len(Perfs_CI.index) < 0.05]
# Take the subset corresponding to the 11 main dimensions
main_dims = ['Brain', 'Eyes', 'Hearing', 'Lungs', 'Arterial', 'Heart', 'Abdomen', 'Musculoskeletal',
'PhysicalActivity', 'Biochemistry', 'ImmuneSystem']
main_rows = ['Age_' + dim + '_*' * 10 for dim in main_dims]
Perfs_CI_main = Perfs_CI.loc[main_rows, :]
Perfs_CI_main.sort_values(by='C-Index-difference_all', inplace=True)
# Select only models for which difference between biological age's CI and chronological age's CI is significant
Perfs_CI_main_significant = Perfs_CI_main[Perfs_CI_main['p-value'] < 0.05]
Perfs_CI_main_significant_FDR = Perfs_CI_main[Perfs_CI_main['p-value'] * len(Perfs_CI_main.index) < 0.05]
# Compute the statistics to compare biological ages and chronological age on all the dimensions
CI_diff_mean = Perfs_CI['C-Index-difference_all'].mean()
CI_diff_std = Perfs_CI['C-Index-difference_all'].std()
_t_stat_all, pv_all = ttest_rel(Perfs_CI['C-Index_all'], Perfs_CI['C-Index_CA'])
# Number of dimensions outperforming and underperforming compared to chronological age
n_CI_diff_positives = (Perfs_CI['C-Index-difference_all'] > 0).sum()
n_CI_diff_negatives = (Perfs_CI['C-Index-difference_all'] < 0).sum()
n_CI_diff_positives_significant = (Perfs_CI_significant['C-Index-difference_all'] > 0).sum()
n_CI_diff_negatives_significant = (Perfs_CI_significant['C-Index-difference_all'] < 0).sum()
n_CI_diff_positives_significant_FDR = (Perfs_CI_significant_FDR['C-Index-difference_all'] > 0).sum()
n_CI_diff_negatives_significant_FDR = (Perfs_CI_significant_FDR['C-Index-difference_all'] < 0).sum()
# print results
print('The mean CI difference over the ' + str(len(Perfs_CI.index)) + ' biological ages = ' +
str(round(CI_diff_mean, 3)) + '; standard deviation = ' + str(round(CI_diff_std, 3)) +
'; paired t-test p-value = ' + str(pv_all))
print('Out of the ' + str(len(Perfs_CI.index)) + ' dimensions, ' + str(n_CI_diff_positives) +
' dimensions outperform CA as survival predictors, and ' + str(n_CI_diff_negatives) +
' dimensions underperform.')
# Compute the statistics to compare biological ages and chronological age on the 11 main dimensions
CI_diff_main_mean = Perfs_CI_main['C-Index-difference_all'].mean()
CI_diff_main_std = Perfs_CI_main['C-Index-difference_all'].std()
_t_stat_main, pv_main = ttest_rel(Perfs_CI_main['C-Index_all'], Perfs_CI_main['C-Index_CA'])
# Number of dimensions outperforming and underperforming compared to chronological age
n_CI_diff_main_positives = (Perfs_CI_main['C-Index-difference_all'] > 0).sum()
n_CI_diff_main_negatives = (Perfs_CI_main['C-Index-difference_all'] < 0).sum()
n_CI_diff_main_positives_significant = (Perfs_CI_main_significant['C-Index-difference_all'] > 0).sum()
n_CI_diff_main_negatives_significant = (Perfs_CI_main_significant['C-Index-difference_all'] < 0).sum()
n_CI_diff_main_positives_significant_FDR = (Perfs_CI_main_significant_FDR['C-Index-difference_all'] > 0).sum()
n_CI_diff_main_negatives_significant_FDR = (Perfs_CI_main_significant_FDR['C-Index-difference_all'] < 0).sum()
# print results
print('The mean CI difference over the ' + str(len(Perfs_CI_main.index)) + ' biological ages = ' +
str(round(CI_diff_main_mean, 3)) + '; standard deviation = ' + str(round(CI_diff_main_std, 3)) +
'; paired t-test p-value = ' + str(pv_main))
print('Out of the ' + str(len(Perfs_CI_main.index)) + ' main biological dimensions, ' + str(
n_CI_diff_main_positives) +
' dimensions outperform CA as survival predictors, and ' + str(n_CI_diff_main_negatives) +
' dimensions underperform.')
Perfs_CI_main[['version', 'C-Index-difference_all',
'C-Index-difference_str_all', 'C-Index_all', 'C-Index_CA']].sort_values(
by='C-Index-difference_all')
row_names = ['All', 'significant', 'FDR_significant']
col_names = ['All', '+', '-']
n_models = pd.DataFrame(np.empty((len(row_names), len(col_names),)))
n_models.index = row_names
n_models.columns = col_names
N_MODELS = {'All_dims': n_models.copy(), 'Main_dims': n_models.copy()}
best_models = n_models.drop(columns=['All'])
BEST_MODELS = {'All_dims': best_models.copy(), 'Main_dims': best_models.copy()}
BEST_CI_DIFFS = {'All_dims': best_models.copy(), 'Main_dims': best_models.copy()}
N_MODELS['All_dims'].loc[:, '+'] = \
[n_CI_diff_positives, n_CI_diff_positives_significant, n_CI_diff_positives_significant_FDR]
BEST_MODELS['All_dims'].loc[:, '+'] = [Perfs_CI['version'][len(Perfs_CI.index) - 1],
Perfs_CI_significant['version'][len(Perfs_CI_significant.index) - 1],
Perfs_CI_significant_FDR['version'][
len(Perfs_CI_significant_FDR.index) - 1]]
BEST_CI_DIFFS['All_dims'].loc[:, '+'] = \
[Perfs_CI['C-Index-difference_str_all'][len(Perfs_CI.index) - 1],
Perfs_CI_significant['C-Index-difference_str_all'][len(Perfs_CI_significant.index) - 1],
Perfs_CI_significant_FDR['C-Index-difference_str_all'][len(Perfs_CI_significant_FDR.index) - 1]]
N_MODELS['All_dims'].loc[:, '-'] = \
[n_CI_diff_negatives, n_CI_diff_negatives_significant, n_CI_diff_negatives_significant_FDR]
BEST_MODELS['All_dims'].loc[:, '-'] = [Perfs_CI['version'][0],
Perfs_CI_significant['version'][0],
Perfs_CI_significant_FDR['version'][0]]
BEST_CI_DIFFS['All_dims'].loc[:, '-'] = [Perfs_CI['C-Index-difference_str_all'][0],
Perfs_CI_significant['C-Index-difference_str_all'][0],
Perfs_CI_significant_FDR['C-Index-difference_str_all'][0]]
N_MODELS['All_dims']['All'] = N_MODELS['All_dims']['+'] + N_MODELS['All_dims']['-']
N_MODELS['Main_dims'].loc[:, '+'] = \
[n_CI_diff_main_positives, n_CI_diff_main_positives_significant, n_CI_diff_main_positives_significant_FDR]
BEST_MODELS['Main_dims'].loc[:, '+'] = \
[Perfs_CI_main['version'][len(Perfs_CI_main.index) - 1],
Perfs_CI_main_significant['version'][len(Perfs_CI_main_significant.index) - 1],
Perfs_CI_main_significant_FDR['version'][len(Perfs_CI_main_significant_FDR.index) - 1]]
BEST_CI_DIFFS['Main_dims'].loc[:, '+'] = \
[Perfs_CI_main['C-Index-difference_str_all'][len(Perfs_CI_main.index) - 1],
Perfs_CI_main_significant['C-Index-difference_str_all'][len(Perfs_CI_main_significant.index) - 1],
Perfs_CI_main_significant_FDR['C-Index-difference_str_all'][len(Perfs_CI_main_significant_FDR.index) - 1]]
N_MODELS['Main_dims'].loc[:, '-'] = \
[n_CI_diff_main_negatives, n_CI_diff_main_negatives_significant, n_CI_diff_main_negatives_significant_FDR]
BEST_MODELS['Main_dims'].loc[:, '-'] = [Perfs_CI_main['version'][0],
Perfs_CI_main_significant['version'][0],
Perfs_CI_main_significant_FDR['version'][0]]
BEST_CI_DIFFS['Main_dims'].loc[:, '-'] = [Perfs_CI_main['C-Index-difference_str_all'][0],
Perfs_CI_main_significant['C-Index-difference_str_all'][0],
Perfs_CI_main_significant_FDR['C-Index-difference_str_all'][0]]
N_MODELS['Main_dims']['All'] = N_MODELS['Main_dims']['+'] + N_MODELS['Main_dims']['-']
# Reformat to take into account that sometimes no model fits the criteria
for dims in ['All_dims', 'Main_dims']:
for sign in ['+', '-']:
for models in ['All', 'significant', 'FDR_significant']:
if N_MODELS[dims].loc[models, sign] == 0:
BEST_MODELS[dims].loc[models, sign] = ''
BEST_CI_DIFFS[dims].loc[models, sign] = ''
# Print results
# All dims
print('Number of aging dimensions, best models and associated CI differences for All dims: ')
print(N_MODELS['All_dims'])
print(BEST_MODELS['All_dims'])
print(BEST_CI_DIFFS['All_dims'])
print('Best model between All dims: ')
print(Perfs_CI_significant_FDR[['C-Index-difference_str_all', 'C-Index_all', 'C-Index_CA']].iloc[-1, :])
# Main dims
print('Number of aging dimensions, best models and associated CI differences for Main dims: ')
print(N_MODELS['Main_dims'])
print(BEST_MODELS['Main_dims'])
print(BEST_CI_DIFFS['Main_dims'])
print('Best model between Main dims: ')
print(Perfs_CI_main_significant_FDR[['C-Index-difference_str_all', 'C-Index_all', 'C-Index_CA']].iloc[-1, :])
class SelectBest(Metrics):
"""
For each aging main dimension and selected subdimensions, select the best performing model.
"""
def __init__(self, target=None, pred_type=None):
Metrics.__init__(self)
self.target = target
self.pred_type = pred_type
self.folds = ['test']
self.organs_with_suborgans = {'Brain': ['Cognitive', 'MRI'], 'Eyes': ['All', 'Fundus', 'OCT'],
'Arterial': ['PulseWaveAnalysis', 'Carotids'],
'Heart': ['ECG', 'MRI'], 'Abdomen': ['Liver', 'Pancreas'],
'Musculoskeletal': ['Spine', 'Hips', 'Knees', 'FullBody', 'Scalars'],
'Biochemistry': ['Urine', 'Blood']}
self.organs = []
self.best_models = []
self.PREDICTIONS = {}
self.RESIDUALS = {}
self.PERFORMANCES = {}
self.CORRELATIONS = {}
self.CORRELATIONS_SAMPLESIZES = {}
def _load_data(self):
for fold in self.folds:
path_pred = self.path_data + 'PREDICTIONS_withEnsembles_' + self.pred_type + '_' + self.target + '_' + \
fold + '.csv'
path_res = self.path_data + 'RESIDUALS_' + self.pred_type + '_' + self.target + '_' + fold + '.csv'
path_perf = self.path_data + 'PERFORMANCES_withEnsembles_withCI_ranked_' + self.pred_type + '_' + \
self.target + '_' + fold + '.csv'
path_corr = self.path_data + 'ResidualsCorrelations_str_' + self.pred_type + '_' + self.target + '_' + \
fold + '.csv'
self.PREDICTIONS[fold] = pd.read_csv(path_pred)
self.RESIDUALS[fold] = pd.read_csv(path_res)
self.PERFORMANCES[fold] = pd.read_csv(path_perf)
self.PERFORMANCES[fold].set_index('version', drop=False, inplace=True)
self.CORRELATIONS_SAMPLESIZES[fold] = pd.read_csv(self.path_data + 'ResidualsCorrelations_samplesizes_' +
self.pred_type + '_' + self.target + '_' + fold + '.csv',
index_col=0)
self.CORRELATIONS[fold] = {}
for mode in self.modes:
self.CORRELATIONS[fold][mode] = pd.read_csv(path_corr.replace('_str', mode), index_col=0)
def _select_versions(self):
# Load val performances
path_perf = self.path_data + 'PERFORMANCES_withEnsembles_withCI_ranked_' + self.pred_type + '_' + \
self.target + '_test.csv'
Performances = pd.read_csv(path_perf)
Performances.set_index('version', drop=False, inplace=True)
list_organs = Performances['organ'].unique()
list_organs.sort()
for organ in list_organs:
print('Selecting best model for ' + organ)
Perf_organ = Performances[Performances['organ'] == organ]
self.organs.append(organ)
self.best_models.append(Perf_organ['version'].values[0])
if organ in self.organs_with_suborgans.keys():
for view in self.organs_with_suborgans[organ]:
print('Selecting best model for ' + organ + view)
Perf_organview = Performances[(Performances['organ'] == organ) & (Performances['view'] == view)]
self.organs.append(organ + view)
self.best_models.append(Perf_organview['version'].values[0])
def _take_subsets(self):
base_cols = self.id_vars + self.demographic_vars
best_models_pred = ['pred_' + model for model in self.best_models]
best_models_res = ['res_' + model for model in self.best_models]
best_models_corr = ['_'.join(model.split('_')[1:]) for model in self.best_models]
for fold in self.folds:
self.PREDICTIONS[fold] = self.PREDICTIONS[fold].loc[:, base_cols + best_models_pred]
self.PREDICTIONS[fold].columns = base_cols + self.organs
self.RESIDUALS[fold] = self.RESIDUALS[fold].loc[:, base_cols + best_models_res]
self.RESIDUALS[fold].columns = base_cols + self.organs
self.PERFORMANCES[fold] = self.PERFORMANCES[fold].loc[self.best_models, :]
self.PERFORMANCES[fold].index = self.organs
self.CORRELATIONS_SAMPLESIZES[fold] = \
self.CORRELATIONS_SAMPLESIZES[fold].loc[best_models_corr, best_models_corr]
self.CORRELATIONS_SAMPLESIZES[fold].index = self.organs
self.CORRELATIONS_SAMPLESIZES[fold].columns = self.organs
for mode in self.modes:
self.CORRELATIONS[fold][mode] = self.CORRELATIONS[fold][mode].loc[best_models_corr, best_models_corr]
self.CORRELATIONS[fold][mode].index = self.organs
self.CORRELATIONS[fold][mode].columns = self.organs
def select_models(self):
self._load_data()
self._select_versions()
self._take_subsets()
def save_data(self):
for fold in self.folds:
path_pred = self.path_data + 'PREDICTIONS_bestmodels_' + self.pred_type + '_' + self.target + '_' + fold \
+ '.csv'
path_res = self.path_data + 'RESIDUALS_bestmodels_' + self.pred_type + '_' + self.target + '_' + fold + \
'.csv'
path_corr = self.path_data + 'ResidualsCorrelations_bestmodels_str_' + self.pred_type + '_' + self.target \
+ '_' + fold + '.csv'
path_perf = self.path_data + 'PERFORMANCES_bestmodels_ranked_' + self.pred_type + '_' + self.target + '_' \
+ fold + '.csv'
self.PREDICTIONS[fold].to_csv(path_pred, index=False)
self.RESIDUALS[fold].to_csv(path_res, index=False)
self.PERFORMANCES[fold].sort_values(by=self.dict_main_metrics_names[self.target] + '_all', ascending=False,
inplace=True)
self.PERFORMANCES[fold].to_csv(path_perf, index=False)
Performances_alphabetical = self.PERFORMANCES[fold].sort_values(by='version')
Performances_alphabetical.to_csv(path_perf.replace('ranked', 'alphabetical'), index=False)
for mode in self.modes:
self.CORRELATIONS[fold][mode].to_csv(path_corr.replace('_str', mode), index=True)
# Handy draft to print some key results
Perfs = pd.read_csv('../data/PERFORMANCES_withEnsembles_alphabetical_instances_Age_test.csv')
Perfs.set_index('version', drop=False, inplace=True)
# Take the subset corresponding to the 11 main dimensions
main_dims = ['Brain', 'Eyes', 'Hearing', 'Lungs', 'Arterial', 'Heart', 'Abdomen', 'Musculoskeletal',
'PhysicalActivity',
'Biochemistry', 'ImmuneSystem']
main_rows = ['Age_' + dim + '_*' * 10 for dim in main_dims]
Perfs_main = Perfs.loc[main_rows, :]
print('R-Squared for all dimensions = ' + str(round(Perfs['R-Squared_all'].mean(), 3)) + '; std = ' +
str(round(Perfs['R-Squared_all'].std(), 3)) + '; min = ' + str(round(Perfs['R-Squared_all'].min(), 3)) +
'; max = ' + str(round(Perfs['R-Squared_all'].max(), 3)))
print('RMSEs for all dimensions = ' + str(round(Perfs['RMSE_all'].mean(), 3)) + '; std = ' +
str(round(Perfs['RMSE_all'].std(), 3)) + '; min = ' + str(
round(Perfs['RMSE_all'].min(), 3)) + '; max = ' +
str(round(Perfs['RMSE_all'].max(), 3)))
print('R-Squared for main dimensions = ' + str(round(Perfs_main['R-Squared_all'].mean(), 3)) + '; std = ' +
str(round(Perfs_main['R-Squared_all'].std(), 3)) + '; min = ' + str(
round(Perfs_main['R-Squared_all'].min(), 3)) +
'; max = ' + str(round(Perfs_main['R-Squared_all'].max(), 3)))
print('RMSEs for main dimensions = ' + str(round(Perfs_main['RMSE_all'].mean(), 3)) + '; std = ' +
str(round(Perfs_main['RMSE_all'].std(), 3)) + '; min = ' + str(round(Perfs_main['RMSE_all'].min(), 3)) +
'; max = ' + str(round(Perfs_main['RMSE_all'].max(), 3)))
class SelectCorrelationsNAs(Basics):
"""
Build a summary correlation matrix: when a correlation cannot be computed in terms of samples ("instances") because
the intersection has a small sample size, fill the NA with the correlation computed at the participant's level
("eids").
"""
def __init__(self, target=None):
Basics.__init__(self)
self.target = target
self.folds = ['test']
self.CORRELATIONS = {'*': {'': {}, '_sd': {}, '_str': {}}}
def load_data(self):
for models_type in self.models_types:
self.CORRELATIONS[models_type] = {}
for pred_type in ['instances', 'eids', '*']:
self.CORRELATIONS[models_type][pred_type] = {}
for mode in self.modes:
self.CORRELATIONS[models_type][pred_type][mode] = {}
for fold in self.folds:
if pred_type == '*':
self.CORRELATIONS[models_type][pred_type][mode][fold] = \
pd.read_csv(self.path_data + 'ResidualsCorrelations' + models_type + mode +
'_instances_' + self.target + '_' + fold + '.csv', index_col=0)
else:
self.CORRELATIONS[models_type][pred_type][mode][fold] = \
pd.read_csv(self.path_data + 'ResidualsCorrelations' + models_type + mode + '_' +
pred_type + '_' + self.target + '_' + fold + '.csv', index_col=0)
def fill_na(self):
# Dectect NAs in the instances correlation matrix
for models_type in self.models_types:
NAs_mask = self.CORRELATIONS[models_type]['instances']['']['test'].isna()
for mode in self.modes:
for fold in self.folds:
self.CORRELATIONS[models_type]['*'][mode][fold] = \
self.CORRELATIONS[models_type]['instances'][mode][fold].copy()
self.CORRELATIONS[models_type]['*'][mode][fold][NAs_mask] = \
self.CORRELATIONS[models_type]['eids'][mode][fold][NAs_mask]
def save_correlations(self):
for models_type in self.models_types:
for mode in self.modes:
for fold in self.folds:
self.CORRELATIONS[models_type]['*'][mode][fold].to_csv(self.path_data + 'ResidualsCorrelations' +
models_type + mode + '_*_' + self.target +
'_' + fold + '.csv', index=True)
class CorrelationsAverages:
"""
Computes average correlation at different levels, to summarize the results.
"""
def __init__(self):
self.Performances = pd.read_csv("../data/PERFORMANCES_withEnsembles_ranked_eids_Age_test.csv")
self.Correlations = pd.read_csv("../data/ResidualsCorrelations_eids_Age_test.csv", index_col=0)
def _melt_correlation_matrix(self, models):
models = ['_'.join(c.split('_')[1:]) for c in models]
Corrs = self.Correlations.loc[models, models]
Corrs = Corrs.where(np.triu(np.ones(Corrs.shape), 1).astype(np.bool))
Corrs = Corrs.stack().reset_index()
Corrs.columns = ['Row', 'Column', 'Correlation']
return Corrs
@staticmethod
def _split_version(row):
names = ['organ', 'view', 'transformation', 'architecture', 'n_fc_layers', 'n_fc_nodes', 'optimizer',
'learning_rate', 'weight_decay', 'dropout_rate', 'data_augmentation_factor']
row_names = ['row_' + name for name in names]
col_names = ['col_' + name for name in names]
row_params = row['Row'].split('_')
col_params = row['Column'].split('_')
new_row = pd.Series(row_params + col_params + [row['Correlation']])
new_row.index = row_names + col_names + ['Correlation']
return new_row
@staticmethod
def _compute_stats(data, title):
m = data['Correlation'].mean()
s = data['Correlation'].std()
n = len(data.index)
print('Correlation between ' + title + ': ' + str(round(m, 3)) + '+-' + str(round(s, 3)) + ', n_pairs=' +
str(n))
@staticmethod
def _generate_pairs(ls):
pairs = []
for i in range(len(ls)):
for j in range((i + 1), len(ls)):
pairs.append((ls[i], ls[j]))
return pairs
@staticmethod
def _extract_pair(Corrs, pair, level):
extracted = Corrs[((Corrs['row_' + level] == pair[0]) & (Corrs['col_' + level] == pair[1])) |
((Corrs['row_' + level] == pair[1]) & (Corrs['col_' + level] == pair[0]))]
return extracted
def _extract_pairs(self, Corrs, pairs, level):
extracted = None
for pair in pairs:
extracted_pair = self._extract_pair(Corrs, pair, level)
if extracted is None:
extracted = extracted_pair
else:
extracted = extracted.append(extracted_pair)
return extracted
def correlations_all(self):
Corrs = self._melt_correlation_matrix(self.Performances['version'].values)
self._compute_stats(Corrs, 'All models')
def correlations_dimensions(self):
Perf = self.Performances[(self.Performances['view'] == '*') &
~(self.Performances['organ'].isin(['*', '*instances01', '*instances1.5x',
'*instances23']))]
Corrs = self._melt_correlation_matrix(Perf['version'].values)
self._compute_stats(Corrs, 'Main Dimensions')
def correlations_subdimensions(self):
# Subdimensions
dict_dims_to_subdims = {
'Brain': ['Cognitive', 'MRI'],
'Eyes': ['OCT', 'Fundus', 'IntraocularPressure', 'Acuity', 'Autorefraction'],
'Arterial': ['PulseWaveAnalysis', 'Carotids'],
'Heart': ['ECG', 'MRI'],
'Abdomen': ['Liver', 'Pancreas'],
'Musculoskeletal': ['Spine', 'Hips', 'Knees', 'FullBody', 'Scalars'],
'PhysicalActivity': ['FullWeek', 'Walking'],
'Biochemistry': ['Urine', 'Blood']
}
Corrs_subdim = None
for dim in dict_dims_to_subdims.keys():
models = ['Age_' + dim + '_' + subdim + '_*' * 9 for subdim in dict_dims_to_subdims[dim]]
Corrs_dim = self._melt_correlation_matrix(models)
self._compute_stats(Corrs_dim, dim + ' subdimensions')
if Corrs_subdim is None:
Corrs_subdim = Corrs_dim
else:
Corrs_subdim = Corrs_subdim.append(Corrs_dim)
# Compute the average over the subdimensions
self._compute_stats(Corrs_subdim, 'Subdimensions')
def correlations_subsubdimensions(self):
# Only select the ensemble models at the architecture level,
Perf_ss = self.Performances[self.Performances['architecture'] == '*']
# Brain - Cognitive
Perf = Perf_ss[(Perf_ss['organ'] == 'Brain') & (Perf_ss['view'] == 'Cognitive')]
# Remove ensemble model and all scalars
Perf = Perf[~Perf['transformation'].isin(['*', 'AllScalars'])]
Corrs_bc = self._melt_correlation_matrix(Perf['version'].values)
self._compute_stats(Corrs_bc, 'Brain cognitive sub-subdimensions')
# Musculoskeletal - Scalars
Perf = Perf_ss[(Perf_ss['organ'] == 'Musculoskeletal') & (Perf_ss['view'] == 'Scalars')]
Perf = Perf[~Perf['transformation'].isin(['*', 'AllScalars'])]
Corrs_ms = self._melt_correlation_matrix(Perf['version'].values)
self._compute_stats(Corrs_ms, 'Musculoskeletal - Scalars sub-subdimensions')
# Average over subsubdimensions
Corrs_subsubdimensions = Corrs_bc.append(Corrs_ms)
self._compute_stats(Corrs_subsubdimensions, 'Sub-subdimensions')
def correlations_views(self):
# Variables
dict_dim_to_view = {
'Brain_MRI': [['SagittalReference', 'CoronalReference', 'TransverseReference', 'dMRIWeightedMeans',
'SubcorticalVolumes', 'GreyMatterVolumes'],
['SagittalRaw', 'CoronalRaw', 'TransverseRaw']],
'Arterial_PulseWaveAnalysis': [['Scalars', 'TimeSeries']],
'Arterial_Carotids': [['Scalars', 'LongAxis', 'CIMT120', 'CIMT150', 'ShortAxis']],
'Heart_ECG': [['Scalars', 'TimeSeries']],
'Heart_MRI': [['2chambersRaw', '3chambersRaw', '4chambersRaw'],
['2chambersContrast', '3chambersContrast', '4chambersContrast']],
'Musculoskeletal_Spine': [['Sagittal', 'Coronal']],
'Musculoskeletal_FullBody': [['Figure', 'Flesh']],
'PhysicalActivity_FullWeek': [
['Scalars', 'Acceleration', 'TimeSeriesFeatures', 'GramianAngularField1minDifference',
'GramianAngularField1minSummation', 'MarkovTransitionField1min',
'RecurrencePlots1min']]
}
Corrs_views = None
for dim in dict_dim_to_view.keys():
Corrs_dims = None
for i, views in enumerate(dict_dim_to_view[dim]):
models = ['Age_' + dim + '_' + view + '_*' * 8 for view in dict_dim_to_view[dim][i]]
Corrs_dim = self._melt_correlation_matrix(models)
if Corrs_dims is None:
Corrs_dims = Corrs_dim
else:
Corrs_dims = Corrs_dims.append(Corrs_dim)
self._compute_stats(Corrs_dims, dim + ' views')
if Corrs_views is None:
Corrs_views = Corrs_dims
else:
Corrs_views = Corrs_views.append(Corrs_dims)
# Compute the average over the views
self._compute_stats(Corrs_views, 'Views')
def correlations_transformations(self):
# Raw vs. Contrast (Heart MRI, Abdomen Liver, Abdomen Pancreas), Raw vs. Reference (Brain MRI),
# Figure vs Skeleton (Musculoskeltal FullBody)
# Filter out the models that are ensembles at the architecture level
models_to_keep = [model for model in self.Correlations.index.values if model.split('_')[3] != '*']
# Select only the models that are Heart MRI, Abdomen, or Brain MRI
models_to_keep = [model for model in models_to_keep if
((model.split('_')[0] == 'Abdomen') & (model.split('_')[1] in ['Liver', 'Pancreas'])) |
((model.split('_')[0] == 'Brain') & (model.split('_')[1] == 'MRI')) |
((model.split('_')[0] == 'Heart') & (model.split('_')[1] == 'MRI')) |
((model.split('_')[0] == 'Musculoskeletal') & (model.split('_')[1] == 'FullBody') &
(model.split('_')[2] in ['Figure', 'Skeleton']))]
# Select only the models that have the relevant preprocessing/transformations
models_to_keep = [model for model in models_to_keep if model.split('_')[2] in
['Raw', 'Contrast', '2chambersRaw', '2chambersContrast', '3chambersRaw', '3chambersContrast',
'4chambersRaw', '4chambersContrast', 'SagittalRaw', 'SagittalReference', 'CoronalRaw',
'CoronalReference', 'TransverseRaw', 'TransverseReference', 'Figure', 'Skeleton']]
# Select the corresponding rows and columns
Corrs = self.Correlations.loc[models_to_keep, models_to_keep]
# Melt correlation matrix to dataframe
Corrs = Corrs.where(np.triu(np.ones(Corrs.shape), 1).astype(np.bool))
Corrs = Corrs.stack().reset_index()
Corrs.columns = ['Row', 'Column', 'Correlation']
Corrs = Corrs.apply(self._split_version, axis=1)
# Only keep the models that have the same organ, view and architecture
Corrs = Corrs[(Corrs['row_organ'] == Corrs['col_organ']) & (Corrs['row_view'] == Corrs['col_view']) &
(Corrs['row_architecture'] == Corrs['col_architecture'])]
# Define preprocessing pairs
dict_preprocessing = {
'Raw-Reference': [('SagittalRaw', 'SagittalReference'), ('CoronalRaw', 'CoronalReference'),
('TransverseRaw', 'TransverseReference')],
'Raw-Contrast': [('Raw', 'Contrast'), ('2chambersRaw', '2chambersContrast'),
('3chambersRaw', '3chambersContrast'), ('4chambersRaw', '4chambersContrast')],
'Figure-Skeleton': [('Figure', 'Skeleton')]
}
# Compute average correlation between each pair of transformations
Corrs_transformations = None
for comparison in dict_preprocessing.keys():
Corrs_comp = self._extract_pairs(Corrs, dict_preprocessing[comparison], 'transformation')
print(comparison)
print(Corrs_comp)
self._compute_stats(Corrs_comp, comparison)
if Corrs_transformations is None:
Corrs_transformations = Corrs_comp
else:
Corrs_transformations = Corrs_transformations.append(Corrs_comp)
# Compute average correlation between transformations
self._compute_stats(Corrs_transformations, 'Transformations')
def correlations_algorithms(self):
# Variables
algorithms_scalars = ['ElasticNet', 'LightGBM', 'NeuralNetwork']
algorithms_images = ['InceptionV3', 'InceptionResNetV2']
# Filter out the ensemble models (at the level of the algorithm)
models_to_keep = [model for model in self.Correlations.index.values if model.split('_')[3] != '*']
Corrs = self.Correlations.loc[models_to_keep, models_to_keep]
# Melt correlation matrix to dataframe
Corrs = Corrs.where(np.triu(np.ones(Corrs.shape), 1).astype(np.bool))
Corrs = Corrs.stack().reset_index()
Corrs.columns = ['Row', 'Column', 'Correlation']
Corrs = Corrs.apply(self._split_version, axis=1)
# Select the rows for which everything is identical aside from the dataset
for name in ['organ', 'view', 'transformation']:
Corrs = Corrs[Corrs['row_' + name] == Corrs['col_' + name]]
# Compute average correlation between algorithms
self._compute_stats(Corrs, 'Algorithms')
algorithms_pairs = self._generate_pairs(algorithms_scalars) + self._generate_pairs(algorithms_images)
# Compute average correlation between each algorithm pair
for pair in algorithms_pairs:
Corrs_pair = self._extract_pair(Corrs, pair, 'architecture')
self._compute_stats(Corrs_pair, pair[0] + ' and ' + pair[1])
class AttentionMaps(DeepLearning):
"""
Computes the attention maps (saliency maps and Grad_RAM maps) for all images
"""
def __init__(self, target=None, organ=None, view=None, transformation=None, debug_mode=False):
# Partial initialization with placeholders to get access to parameters and functions
DeepLearning.__init__(self, 'Age', 'Abdomen', 'Liver', 'Raw', 'InceptionResNetV2', '1', '1024', 'Adam',
'0.0001', '0.1', '0.5', '1.0', False)
# Parameters
self.target = target
self.organ = organ
self.view = view
self.transformation = transformation
self.version = None
self.leftright = True if self.organ + '_' + self.view in self.left_right_organs_views else False
self.parameters = None
self.image_width = None
self.image_height = None
self.batch_size = None
self.N_samples_attentionmaps = 10 # needs to be > 1 for the script to work
if debug_mode:
self.N_samples_attentionmaps = 2
self.dir_images = '../images/' + organ + '/' + view + '/' + transformation + '/'
self.prediction_type = self.dict_prediction_types[target]
self.Residuals = None
self.df_to_plot = None
self.df_outer_fold = None
self.class_mode = None
self.image = None
self.generator = None
self.dict_architecture_to_last_conv_layer_name = \
{'VGG16': 'block5_conv3', 'VGG19': 'block5_conv4', 'MobileNet': 'conv_pw_13_relu',
'MobileNetV2': 'out_relu', 'DenseNet121': 'relu', 'DenseNet169': 'relu', 'DenseNet201': 'relu',
'NASNetMobile': 'activation_1136', 'NASNetLarge': 'activation_1396', 'Xception': 'block14_sepconv2_act',
'InceptionV3': 'mixed10', 'InceptionResNetV2': 'conv_7b_ac', 'EfficientNetB7': 'top_activation'}
self.last_conv_layer = None
self.organs_views_transformations_images = \
['Brain_MRI_SagittalRaw', 'Brain_MRI_SagittalReference', 'Brain_MRI_CoronalRaw',
'Brain_MRI_CoronalReference', 'Brain_MRI_TransverseRaw', 'Brain_MRI_TransverseReference',
'Eyes_Fundus_Raw', 'Eyes_OCT_Raw', 'Arterial_Carotids_Mixed', 'Arterial_Carotids_LongAxis',
'Arterial_Carotids_CIMT120', 'Arterial_Carotids_CIMT150', 'Arterial_Carotids_ShortAxis',
'Heart_MRI_2chambersRaw', 'Heart_MRI_2chambersContrast', 'Heart_MRI_3chambersRaw',
'Heart_MRI_3chambersContrast', 'Heart_MRI_4chambersRaw', 'Heart_MRI_4chambersContrast',
'Abdomen_Liver_Raw', 'Abdomen_Liver_Contrast', 'Abdomen_Pancreas_Raw', 'Abdomen_Pancreas_Contrast',
'Musculoskeletal_Spine_Sagittal', 'Musculoskeletal_Spine_Coronal', 'Musculoskeletal_Hips_MRI',
'Musculoskeletal_Knees_MRI', 'Musculoskeletal_FullBody_Mixed', 'Musculoskeletal_FullBody_Figure',
'Musculoskeletal_FullBody_Skeleton', 'Musculoskeletal_FullBody_Flesh',
'PhysicalActivity_FullWeek_GramianAngularField1minDifference',
'PhysicalActivity_FullWeek_GramianAngularField1minSummation',
'PhysicalActivity_FullWeek_MarkovTransitionField1min', 'PhysicalActivity_FullWeek_RecurrencePlots1min']
def _select_best_model(self):
# Pick the best model based on the performances
path_perf = self.path_data + 'PERFORMANCES_withoutEnsembles_ranked_instances_' + self.target + '_test.csv'
Performances = pd.read_csv(path_perf).set_index('version', drop=False)
Performances = Performances[(Performances['organ'] == self.organ)
& (Performances['view'] == self.view)
& (Performances['transformation'] == self.transformation)]
self.version = Performances['version'].values[0]
del Performances
# other parameters
self.parameters = self._version_to_parameters(self.version)
if self.organ + '_' + self.view + '_' + self.transformation in self.organs_views_transformations_images:
DeepLearning.__init__(self, self.parameters['target'], self.parameters['organ'], self.parameters['view'],
self.parameters['transformation'], self.parameters['architecture'],
self.parameters['n_fc_layers'], self.parameters['n_fc_nodes'],
self.parameters['optimizer'], self.parameters['learning_rate'],
self.parameters['weight_decay'], self.parameters['dropout_rate'],
self.parameters['data_augmentation_factor'], False)
def _format_residuals(self):
# Format the residuals
Residuals_full = pd.read_csv(self.path_data + 'RESIDUALS_instances_' + self.target + '_test.csv')
Residuals = Residuals_full[['id', 'outer_fold'] + self.demographic_vars + ['res_' + self.version]]
del Residuals_full
Residuals.dropna(inplace=True)
Residuals.rename(columns={'res_' + self.version: 'res'}, inplace=True)
Residuals.set_index('id', drop=False, inplace=True)
Residuals['outer_fold'] = Residuals['outer_fold'].astype(int).astype(str)
Residuals['res_abs'] = Residuals['res'].abs()
self.Residuals = Residuals
def _select_representative_samples(self):
# Select with samples to plot
print('Selecting representative samples...')
df_to_plot = None
# Sex
dict_sexes_to_values = {'Male': 1, 'Female': 0}
for sex in ['Male', 'Female']:
print('Sex: ' + sex)
Residuals_sex = self.Residuals[self.Residuals['Sex'] == dict_sexes_to_values[sex]]
Residuals_sex['sex'] = sex
# Age category
for age_category in ['young', 'middle', 'old']:
print('Age category: ' + age_category)
if age_category == 'young':
Residuals_age = Residuals_sex[Residuals_sex['Age'] <= Residuals_sex['Age'].min() + 10]
elif age_category == 'middle':
Residuals_age = Residuals_sex[(Residuals_sex['Age'] - Residuals_sex['Age'].median()).abs() < 5]
else:
Residuals_age = Residuals_sex[Residuals_sex['Age'] >= Residuals_sex['Age'].max() - 10]
Residuals_age['age_category'] = age_category
# Aging rate
for aging_rate in ['accelerated', 'normal', 'decelerated']:
print('Aging rate: ' + aging_rate)
Residuals_ar = Residuals_age
if aging_rate == 'accelerated':
Residuals_ar.sort_values(by='res', ascending=True, inplace=True)
elif aging_rate == 'decelerated':
Residuals_ar.sort_values(by='res', ascending=False, inplace=True)
else:
Residuals_ar.sort_values(by='res_abs', ascending=True, inplace=True)
Residuals_ar['aging_rate'] = aging_rate
Residuals_ar = Residuals_ar.iloc[:self.N_samples_attentionmaps, ]
Residuals_ar['sample'] = range(len(Residuals_ar.index))
if df_to_plot is None:
df_to_plot = Residuals_ar
else:
df_to_plot = df_to_plot.append(Residuals_ar)
# Postprocessing
df_to_plot['Biological_Age'] = df_to_plot['Age'] - df_to_plot['res']
activations_path = '../figures/Attention_Maps/' + self.target + '/' + self.organ + '/' + self.view + '/' + \
self.transformation + '/' + df_to_plot['sex'] + '/' + df_to_plot['age_category'] + '/' + \
df_to_plot['aging_rate']
file_names = '/imagetypeplaceholder_' + self.target + '_' + self.organ + '_' + self.view + '_' + \
self.transformation + '_' + df_to_plot['sex'] + '_' + df_to_plot['age_category'] + '_' + \
df_to_plot['aging_rate'] + '_' + df_to_plot['sample'].astype(str)
if self.leftright:
activations_path += '/sideplaceholder'
file_names += '_sideplaceholder'
df_to_plot['save_title'] = activations_path + file_names
path_save = self.path_data + 'AttentionMaps-samples_' + self.target + '_' + self.organ + '_' + self.view + \
'_' + self.transformation + '.csv'
df_to_plot.to_csv(path_save, index=False)
self.df_to_plot = df_to_plot
def preprocessing(self):
self._select_best_model()
self._format_residuals()
self._select_representative_samples()
def _preprocess_for_outer_fold(self, outer_fold):
self.df_outer_fold = self.df_to_plot[self.df_to_plot['outer_fold'] == outer_fold]
self.n_images = len(self.df_outer_fold.index)
if self.leftright:
self.n_images *= 2
# Generate the data generator(s)
self.n_images_batch = self.n_images // self.batch_size * self.batch_size
self.n_samples_batch = self.n_images_batch // 2 if self.leftright else self.n_images_batch
self.df_batch = self.df_outer_fold.iloc[:self.n_samples_batch, :]
if self.n_images_batch > 0:
self.generator_batch = \
MyImageDataGenerator(target=self.target, organ=self.organ, view=self.view,
data_features=self.df_batch, n_samples_per_subepoch=None,
batch_size=self.batch_size, training_mode=False,
side_predictors=self.side_predictors, dir_images=self.dir_images,
images_width=self.image_width, images_height=self.image_height,
data_augmentation=False, data_augmentation_factor=None, seed=self.seed)
else:
self.generator_batch = None
self.n_samples_leftovers = self.n_images % self.batch_size
self.df_leftovers = self.df_outer_fold.iloc[self.n_samples_batch:, :]
if self.n_samples_leftovers > 0:
self.generator_leftovers = \
MyImageDataGenerator(target=self.target, organ=self.organ, view=self.view,
data_features=self.df_leftovers, n_samples_per_subepoch=None,
batch_size=self.n_samples_leftovers, training_mode=False,
side_predictors=self.side_predictors, dir_images=self.dir_images,
images_width=self.image_width, images_height=self.image_height,
data_augmentation=False, data_augmentation_factor=None, seed=self.seed)
else:
self.generator_leftovers = None
# load the weights for the fold (for test images in fold i, load the corresponding model: (i-1)%N_CV_folds
outer_fold_model = str((int(outer_fold) - 1) % self.n_CV_outer_folds)
self.model.load_weights(self.path_data + 'model-weights_' + self.version + '_' + outer_fold_model + '.h5')
@staticmethod
def _process_saliency(saliency):
saliency *= 255 / np.max(np.abs(saliency))
saliency = saliency.astype(int)
r_ch = saliency.copy()
r_ch[r_ch < 0] = 0
b_ch = -saliency.copy()
b_ch[b_ch < 0] = 0
g_ch = saliency.copy() * 0
a_ch = np.maximum(b_ch, r_ch)
saliency = np.dstack((r_ch, g_ch, b_ch, a_ch))
return saliency
@staticmethod
def _process_gradcam(gradcam):
# rescale to 0-255
gradcam = np.maximum(gradcam, 0) / np.max(gradcam)
gradcam = np.uint8(255 * gradcam)
# Convert to rgb
jet = cm.get_cmap("jet")
jet_colors = jet(np.arange(256))[:, :3]
jet_gradcam = jet_colors[gradcam]
jet_gradcam = array_to_img(jet_gradcam)
jet_gradcam = jet_gradcam.resize((gradcam.shape[1], gradcam.shape[0]))
jet_gradcam = img_to_array(jet_gradcam)
return jet_gradcam
def _generate_maps_for_one_batch(self, df, Xs, y):
# Generate saliency
saliencies = get_gradients_of_activations(self.model, Xs, y, layer_name='input_1')['input_1'].sum(axis=3)
# Generate gradam
weights = get_gradients_of_activations(self.model, Xs, y, layer_name=self.last_conv_layer,
)[self.last_conv_layer]
weights = weights.mean(axis=(1, 2))
weights /= np.abs(weights.max()) + 1e-7 # for numerical stability
activations = get_activations(self.model, Xs, layer_name=self.last_conv_layer)[self.last_conv_layer]
# We must take the absolute value because for Grad-RAM, unlike for Grad-Cam, we care both about + and - effects
gradcams = np.abs(np.einsum('il,ijkl->ijk', weights, activations))
zoom_factor = [1] + list(np.array(Xs[0].shape[1:3]) / np.array(gradcams.shape[1:]))
gradcams = zoom(gradcams, zoom_factor)
# Save single images and filters
for j in range(len(y)):
# select sample
if self.leftright:
idx = j // 2
side = 'right' if j % 2 == 0 else 'left'
else:
idx = j
side = None
path = df['save_title'].values[idx]
ID = df['id'].values[idx]
# create directory tree if necessary
if self.leftright:
path = path.replace('sideplaceholder', side)
path_dir = '/'.join(path.split('/')[:-1])
if not os.path.exists(path_dir):
os.makedirs(path_dir)
# Save raw image
# Compute path to test if images existed in first place
path_image = '../images/' + self.organ + '/' + self.view + '/' + self.transformation + '/'
if self.leftright:
path_image += side + '/'
path_image += ID + '.jpg'
if not os.path.exists(path_image):
print('No image found at ' + path_image + ', skipping.')
continue
img = load_img(path_image, target_size=(saliencies.shape[1], saliencies.shape[2]))
img.save(path.replace('imagetypeplaceholder', 'RawImage') + '.jpg')
# Save saliency
saliency = saliencies[j, :, :]
saliency = self._process_saliency(saliency)
np.save(path.replace('imagetypeplaceholder', 'Saliency') + '.npy', saliency)
# Save gradcam
gradcam = gradcams[j, :, :]
gradcam = self._process_gradcam(gradcam)
np.save(path.replace('imagetypeplaceholder', 'Gradcam') + '.npy', gradcam)
def generate_filters(self):
if self.organ + '_' + self.view + '_' + self.transformation in self.organs_views_transformations_images:
self._generate_architecture()
self.model.compile(optimizer=self.optimizers[self.optimizer](lr=self.learning_rate, clipnorm=1.0),
loss=self.loss_function, metrics=self.metrics)
self.last_conv_layer = self.dict_architecture_to_last_conv_layer_name[self.parameters['architecture']]
for outer_fold in self.outer_folds:
print('Generate attention maps for outer_fold ' + outer_fold)
gc.collect()
self._preprocess_for_outer_fold(outer_fold)
n_samples_per_batch = self.batch_size // 2 if self.leftright else self.batch_size
for i in range(self.n_images // self.batch_size):
print('Generating maps for batch ' + str(i))
Xs, y = self.generator_batch.__getitem__(i)
df = self.df_batch.iloc[n_samples_per_batch * i: n_samples_per_batch * (i + 1), :]
self._generate_maps_for_one_batch(df, Xs, y)
if self.n_samples_leftovers > 0:
print('Generating maps for leftovers')
Xs, y = self.generator_leftovers.__getitem__(0)
self._generate_maps_for_one_batch(self.df_leftovers, Xs, y)
class GWASPreprocessing(Basics):
"""
Preprocesses the data for the GWASs.
"""
def __init__(self, target=None):
Basics.__init__(self)
self.target = target
self.fam = None
self.Residuals = None
self.covars = None
self.data = None
self.list_organs = None
self.IIDs_organs = {}
self.IIDs_organ_pairs = {}
def _generate_fam_file(self):
fam = pd.read_csv('/n/groups/patel/uk_biobank/project_52887_genetics/ukb52887_cal_chr1_v2_s488264.fam',
header=None, sep=' ')
fam.columns = ['FID', 'IID', 'father', 'mother', 'Sex', 'phenotype']
fam['phenotype'] = 1
fam.to_csv(self.path_data + 'GWAS.fam', index=False, header=False, sep=' ')
fam.to_csv(self.path_data + 'GWAS_exhaustive_placeholder.tab', index=False, sep='\t')
self.fam = fam
def _preprocess_residuals(self):
# Load residuals
Residuals = pd.read_csv(self.path_data + 'RESIDUALS_bestmodels_eids_' + self.target + '_test.csv')
Residuals['id'] = Residuals['eid']
Residuals.rename(columns={'id': 'FID', 'eid': 'IID'}, inplace=True)
Residuals = Residuals[Residuals['Ethnicity.White'] == 1]
cols_to_drop = ['instance', 'outer_fold', 'Sex'] + \
[col for col in Residuals.columns.values if 'Ethnicity.' in col]
Residuals.drop(columns=cols_to_drop, inplace=True)
self.Residuals = Residuals
self.list_organs = [col for col in self.Residuals.columns.values if col not in ['FID', 'IID', 'Age']]
def _preprocess_covars(self):
# Load covars
covar_cols = ['eid', '22001-0.0', '21000-0.0', '54-0.0', '22000-0.0'] + ['22009-0.' + str(i) for i in
range(1, 41)]
covars = pd.read_csv('/n/groups/patel/uk_biobank/project_52887_41230/ukb41230.csv', usecols=covar_cols)
dict_rename = {'eid': 'IID', '22001-0.0': 'Sex', '21000-0.0': 'Ethnicity', '54-0.0': 'Assessment_center',
'22000-0.0': 'Genotyping_batch'}
for i in range(1, 41):
dict_rename.update(dict.fromkeys(['22009-0.' + str(i)], 'PC' + str(i)))
covars.rename(columns=dict_rename, inplace=True)
covars.dropna(inplace=True)
covars['Sex'][covars['Sex'] == 0] = 2
covars['Sex'] = covars['Sex'].astype(int)
# remove non whites samples as suggested in BOLT-LMM_v2.3.4_manual.pdf p18
covars = covars[covars['Ethnicity'].isin([1, 1001, 1002, 1003])]
self.covars = covars
def _merge_main_data(self):
# Merge both dataframes
self.data = self.covars.merge(self.Residuals, on=['IID'])
reordered_cols = ['FID', 'IID', 'Assessment_center', 'Genotyping_batch', 'Age', 'Sex', 'Ethnicity'] + \
['PC' + str(i) for i in range(1, 41)] + self.list_organs
self.data = self.data[reordered_cols]
print('Preparing data for heritabilities')
for organ in self.list_organs:
print('Preparing data for ' + organ)
data_organ = self.data.copy()
cols_to_drop = [organ2 for organ2 in self.list_organs if organ2 != organ]
data_organ.drop(columns=cols_to_drop, inplace=True)
data_organ.dropna(inplace=True)
data_organ.to_csv(self.path_data + 'GWAS_data_' + self.target + '_' + organ + '.tab', index=False,
sep='\t')
self.IIDs_organs[organ] = data_organ['IID'].values
def _preprocessing_genetic_correlations(self):
print('Preparing data for genetic correlations')
organs_pairs = pd.DataFrame(columns=['organ1', 'organ2'])
for counter, organ1 in enumerate(self.list_organs):
for organ2 in self.list_organs[(counter + 1):]:
print('Preparing data for the organ pair ' + organ1 + ' and ' + organ2)
# Generate GWAS dataframe
organs_pairs = organs_pairs.append({'organ1': organ1, 'organ2': organ2}, ignore_index=True)
data_organ_pair = self.data.copy()
cols_to_drop = [organ3 for organ3 in self.list_organs if organ3 not in [organ1, organ2]]
data_organ_pair.drop(columns=cols_to_drop, inplace=True)
data_organ_pair.dropna(inplace=True)
data_organ_pair.to_csv(self.path_data + 'GWAS_data_' + self.target + '_' + organ1 + '_' + organ2 +
'.tab', index=False, sep='\t')
self.IIDs_organ_pairs[organ1 + '_' + organ2] = data_organ_pair['IID'].values
organs_pairs.to_csv(self.path_data + 'GWAS_genetic_correlations_pairs_' + self.target + '.csv', header=False,
index=False)
def _list_removed(self):
# samples to remove for each organ
print('Listing samples to remove for each organ')
for organ in self.list_organs:
print('Preparing samples to remove for organ ' + organ)
remove_organ = self.fam[['FID', 'IID']].copy()
remove_organ = remove_organ[-remove_organ['IID'].isin(self.IIDs_organs[organ])]
remove_organ.to_csv(self.path_data + 'GWAS_remove_' + self.target + '_' + organ + '.tab', index=False,
header=False, sep=' ')
# samples to remove for each organ pair
print('Listing samples to remove for each organ pair')
for counter, organ1 in enumerate(self.list_organs):
for organ2 in self.list_organs[(counter + 1):]:
print('Preparing samples to remove for organ pair ' + organ1 + ' and ' + organ2)
remove_organ_pair = self.fam[['FID', 'IID']].copy()
remove_organ_pair = \
remove_organ_pair[-remove_organ_pair['IID'].isin(self.IIDs_organ_pairs[organ1 + '_' + organ2])]
remove_organ_pair.to_csv(self.path_data + 'GWAS_remove_' + self.target + '_' + organ1 + '_' + organ2 +
'.tab', index=False, header=False, sep=' ')
def compute_gwas_inputs(self):
self._generate_fam_file()
self._preprocess_residuals()
self._preprocess_covars()
self._merge_main_data()
self._preprocessing_genetic_correlations()
self._list_removed()
class GWASPostprocessing(Basics):
"""
Postprocesses the GWAS results and stores the results in summary files.
"""
def __init__(self, target=None):
Basics.__init__(self)
self.target = target
self.organ = None
self.GWAS = None
self.FDR_correction = 5e-8
def _processing(self):
self.GWAS = pd.read_csv(self.path_data + 'GWAS_' + self.target + '_' + self.organ + '_X.stats', sep='\t')
GWAS_autosome = pd.read_csv(self.path_data + 'GWAS_' + self.target + '_' + self.organ + '_autosome.stats',
sep='\t')
self.GWAS[self.GWAS['CHR'] != 23] = GWAS_autosome
self.GWAS_hits = self.GWAS[self.GWAS['P_BOLT_LMM_INF'] < self.FDR_correction]
def _save_data(self):
self.GWAS.to_csv(self.path_data + 'GWAS_' + self.target + '_' + self.organ + '.csv', index=False)
self.GWAS_hits.to_csv(self.path_data + 'GWAS_hits_' + self.target + '_' + self.organ + '.csv', index=False)
def _merge_all_hits(self):
print('Merging all the GWAS results into a model called All...')
# Summarize all the significant SNPs
files = [file for file in glob.glob(self.path_data + 'GWAS_hits*')
if ('All' not in file) & ('_withGenes' not in file)]
All_hits = None
print(files)
for file in files:
print(file)
hits_organ = pd.read_csv(file)[
['SNP', 'CHR', 'BP', 'GENPOS', 'ALLELE1', 'ALLELE0', 'A1FREQ', 'F_MISS', 'CHISQ_LINREG',
'P_LINREG', 'BETA', 'SE', 'CHISQ_BOLT_LMM_INF', 'P_BOLT_LMM_INF']]
hits_organ['organ'] = '.'.join(file.split('_')[-1].split('.')[:-1])
if All_hits is None:
All_hits = hits_organ
else:
All_hits = pd.concat([All_hits, hits_organ])
All_hits.sort_values(by=['CHR', 'BP'], inplace=True)
All_hits.to_csv(self.path_data + 'GWAS_hits_' + self.target + '_All.csv', index=False)
def processing_all_organs(self):
if not os.path.exists('../figures/GWAS/'):
os.makedirs('../figures/GWAS/')
for organ in self.organs_XWAS:
if os.path.exists(self.path_data + 'GWAS_' + self.target + '_' + organ + '_X.stats') & \
os.path.exists(self.path_data + 'GWAS_' + self.target + '_' + organ + '_autosome.stats'):
print('Processing data for organ ' + organ)
self.organ = organ
self._processing()
self._save_data()
self._merge_all_hits()
@staticmethod
def _grep(pattern, path):
for line in open(path, 'r'):
if line.find(pattern) > -1:
return True
return False
@staticmethod
def _melt_correlation_matrix(Correlations, models):
Corrs = Correlations.loc[models, models]
Corrs = Corrs.where(np.triu(np.ones(Corrs.shape), 1).astype(np.bool))
Corrs = Corrs.stack().reset_index()
Corrs.columns = ['Row', 'Column', 'Correlation']
return Corrs
@staticmethod
def _compute_stats(data, title):
m = data['Correlation'].mean()
s = data['Correlation'].std()
n = len(data.index)
print('Correlation between ' + title + ': ' + str(round(m, 3)) + '+-' + str(round(s, 3)) +
', n_pairs=' + str(n))
def parse_heritability_scores(self):
# Generate empty dataframe
Heritabilities = np.empty((len(self.organs_XWAS), 3,))
Heritabilities.fill(np.nan)
Heritabilities = pd.DataFrame(Heritabilities)
Heritabilities.index = self.organs_XWAS
Heritabilities.columns = ['Organ', 'h2', 'h2_sd']
# Fill the dataframe
for organ in self.organs_XWAS:
path = '../eo/MI09C_reml_' + self.target + '_' + organ + '_X.out'
if os.path.exists(path) and self._grep("h2g", path):
for line in open('../eo/MI09C_reml_' + self.target + '_' + organ + '_X.out', 'r'):
if line.find('h2g (1,1): ') > -1:
h2 = float(line.split()[2])
h2_sd = float(line.split()[-1][1:-2])
Heritabilities.loc[organ, :] = [organ, h2, h2_sd]
# Print and save results
print('Heritabilities:')
print(Heritabilities)
Heritabilities.to_csv(self.path_data + 'GWAS_heritabilities_' + self.target + '.csv', index=False)
def parse_genetic_correlations(self):
# Generate empty dataframe
Genetic_correlations = np.empty((len(self.organs_XWAS), len(self.organs_XWAS),))
Genetic_correlations.fill(np.nan)
Genetic_correlations = pd.DataFrame(Genetic_correlations)
Genetic_correlations.index = self.organs_XWAS
Genetic_correlations.columns = self.organs_XWAS
Genetic_correlations_sd = Genetic_correlations.copy()
Genetic_correlations_str = Genetic_correlations.copy()
# Fill the dataframe
for counter, organ1 in enumerate(self.organs_XWAS):
for organ2 in self.organs_XWAS[(counter + 1):]:
if os.path.exists('../eo/MI09D_' + self.target + '_' + organ1 + '_' + organ2 + '.out'):
for line in open('../eo/MI09D_' + self.target + '_' + organ1 + '_' + organ2 + '.out', 'r'):
if line.find('gen corr (1,2):') > -1:
corr = float(line.split()[3])
corr_sd = float(line.split()[-1][1:-2])
corr_str = "{:.3f}".format(corr) + '+-' + "{:.3f}".format(corr_sd)
Genetic_correlations.loc[organ1, organ2] = corr
Genetic_correlations.loc[organ2, organ1] = corr
Genetic_correlations_sd.loc[organ1, organ2] = corr_sd
Genetic_correlations_sd.loc[organ2, organ1] = corr_sd
Genetic_correlations_str.loc[organ1, organ2] = corr_str
Genetic_correlations_str.loc[organ2, organ1] = corr_str
# Print and save the results
print('Genetic correlations:')
print(Genetic_correlations)
Genetic_correlations.to_csv(self.path_data + 'GWAS_correlations_' + self.target + '.csv')
Genetic_correlations_sd.to_csv(self.path_data + 'GWAS_correlations_sd_' + self.target + '.csv')
Genetic_correlations_str.to_csv(self.path_data + 'GWAS_correlations_str_' + self.target + '.csv')
# Save sample size for the GWAS correlations
Correlations_sample_sizes = Genetic_correlations.copy()
Correlations_sample_sizes = Correlations_sample_sizes * np.NaN
dimensions = Correlations_sample_sizes.columns.values
for i1, dim1 in enumerate(dimensions):
for i2, dim2 in enumerate(dimensions[i1:]):
# Find the sample size
path = '../data/GWAS_data_Age_' + dim1 + '_' + dim2 + '.tab'
if os.path.exists(path):
ss = len(pd.read_csv(path, sep='\t').index)
Correlations_sample_sizes.loc[dim1, dim2] = ss
Correlations_sample_sizes.loc[dim2, dim1] = ss
Correlations_sample_sizes.to_csv(self.path_data + 'GWAS_correlations_sample_sizes_' + self.target + '.csv')
# Print correlations between main dimensions
main_dims = ['Abdomen', 'Musculoskeletal', 'Lungs', 'Eyes', 'Heart', 'Arterial', 'Brain', 'Biochemistry',
'Hearing', 'ImmuneSystem', 'PhysicalActivity']
Corrs_main = self._melt_correlation_matrix(Correlations, main_dims)
Corrs_main_sd = self._melt_correlation_matrix(Correlations_sd, main_dims)
Corrs_main['Correlation_sd'] = Corrs_main_sd['Correlation']
Corrs_main['Correlation_str'] = Corrs_main['Correlation'] + '+-' + Corrs_main['Correlation_sd']
# Fill the table with sample sizes
sample_sizes = []
to_remove_ss = []
for i, row in Corrs_main.iterrows():
# Fill the sample size
sample_size = Correlations_sample_sizes.loc[row['Row'], row['Column']]
if sample_size <= 15000:
to_remove_ss.append(i)
sample_sizes.append(sample_size)
Corrs_main['Sample_size'] = sample_sizes
self._compute_stats(Corrs_main, 'all pairs')
self._compute_stats(Corrs_main.drop(index=to_remove_ss), 'after filtering sample sizes <= 15000')
# Print correlations between subdimensions
pairs_all = \
[['BrainMRI', 'BrainCognitive'], ['EyesOCT', 'EyesFundus'], ['HeartECG', 'HeartMRI'],
['AbdomenLiver', 'AbdomenPancreas'], ['BiochemistryBlood', 'BiochemistryUrine'],
['MusculoskeletalScalars', 'MusculoskeletalFullBody'], ['MusculoskeletalScalars', 'MusculoskeletalSpine'],
['MusculoskeletalScalars', 'MusculoskeletalHips'], ['MusculoskeletalScalars', 'MusculoskeletalKnees'],
['MusculoskeletalFullBody', 'MusculoskeletalSpine'], ['MusculoskeletalFullBody', 'MusculoskeletalHips'],
['MusculoskeletalFullBody', 'MusculoskeletalKnees'], ['MusculoskeletalSpine', 'MusculoskeletalHips'],
['MusculoskeletalSpine', 'MusculoskeletalKnees'], ['MusculoskeletalHips', 'MusculoskeletalKnees']]
pairs_musculo = \
[['MusculoskeletalScalars', 'MusculoskeletalFullBody'], ['MusculoskeletalScalars', 'MusculoskeletalSpine'],
['MusculoskeletalScalars', 'MusculoskeletalHips'], ['MusculoskeletalScalars', 'MusculoskeletalKnees']]
pairs_musculo_images = \
[['MusculoskeletalFullBody', 'MusculoskeletalSpine'], ['MusculoskeletalFullBody', 'MusculoskeletalHips'],
['MusculoskeletalFullBody', 'MusculoskeletalKnees'], ['MusculoskeletalSpine', 'MusculoskeletalHips'],
['MusculoskeletalSpine', 'MusculoskeletalKnees'], ['MusculoskeletalHips', 'MusculoskeletalKnees']]
PAIRS = {'all subdimensions': pairs_all, 'musculo scalars vs others': pairs_musculo,
'musculo-no scalars': pairs_musculo_images}
for _, (key, pairs) in enumerate(PAIRS.items()):
print(key)
cors_pairs = []
for pair in pairs:
cor = Correlations.loc[pair[0], pair[1]]
cor_sd = Correlations_sd.loc[pair[0], pair[1]]
ss = Correlations_sample_sizes.loc[pair[0], pair[1]]
cors_pairs.append(cor)
print('Correlation between ' + pair[0] + ' and ' + pair[1] + ' = ' + str(round(cor, 3)) + '+-' +
str(round(cor_sd, 3)) + '; sample size = ' + str(ss))
print('Mean correlation for ' + key + ' = ' + str(round(np.mean(cors_pairs), 3)) + '+-' +
str(round(np.std(cors_pairs), 3)) + ', number of pairs = ' + str(len(pairs)))
@staticmethod
def compare_phenotypic_correlation_with_genetic_correlations():
Phenotypic_correlations = pd.read_csv('../data/ResidualsCorrelations_bestmodels_eids_Age_test.csv', index_col=0)
Phenotypic_correlations_sd = pd.read_csv('../data/ResidualsCorrelations_bestmodels_sd_eids_Age_test.csv',
index_col=0)
Genetic_correlations = pd.read_csv('../data/GWAS_correlations_Age.csv', index_col=0)
Genetic_correlations_sd = pd.read_csv('../data/GWAS_correlations_sd_Age.csv', index_col=0)
Correlations_sample_sizes = pd.read_csv('../data/GWAS_correlations_sample_sizes_Age.csv', index_col=0)
Genetic_correlations_filtered = Genetic_correlations.where(Correlations_sample_sizes > 15000)
Phenotypic_correlations_filtered = Phenotypic_correlations.where(~Genetic_correlations_filtered.isna())
dict_dims_layers = {
'all_dims': Phenotypic_correlations_filtered.columns.values,
'main_dims': ['Brain', 'Eyes', 'Hearing', 'Lungs', 'Arterial', 'Heart', 'Abdomen', 'Musculoskeletal',
'PhysicalActivity', 'Biochemistry', 'ImmuneSystem'],
'sub_dims': ['BrainCognitive', 'BrainMRI', 'EyesFundus', 'EyesOCT', 'ArterialPulseWaveAnalysis',
'ArterialCarotids',
'HeartECG', 'HeartMRI', 'AbdomenLiver', 'AbdomenPancreas', 'MusculoskeletalSpine',
'MusculoskeletalHips', 'MusculoskeletalKnees', 'MusculoskeletalFullBody',
'MusculoskeletalScalars',
'BiochemistryUrine', 'BiochemistryBlood']
}
def _print_comparisons_between_pheno_and_geno(dimensions):
Pheno_dims = Phenotypic_correlations_filtered.loc[dimensions, dimensions]
Geno_dims = Genetic_correlations_filtered.loc[dimensions, dimensions]
Pheno_dims = Pheno_dims.where(np.triu(np.ones(Pheno_dims.shape), 1).astype(np.bool))
Pheno_dims = Pheno_dims.stack().reset_index()
Geno_dims = Geno_dims.where(np.triu(np.ones(Geno_dims.shape), 1).astype(np.bool))
Geno_dims = Geno_dims.stack().reset_index()
Pheno_dims.columns = ['Row', 'Column', 'Correlation']
Geno_dims.columns = ['Row', 'Column', 'Correlation']
Correlations_dims = Pheno_dims.copy()
Correlations_dims = Correlations_dims.rename(columns={'Correlation': 'Phenotypic'})
Correlations_dims['Genetic'] = Geno_dims['Correlation']
final_cor = str(round(Correlations_dims[['Phenotypic', 'Genetic']].corr().iloc[0, 1], 3))
# Find min and max difference:
Correlations_dims['Difference'] = Correlations_dims['Phenotypic'] - Correlations_dims['Genetic']
# min
min_diff = Correlations_dims.iloc[Correlations_dims['Difference'].idxmin(), :]
min_pheno_sd = Phenotypic_correlations_sd.loc[min_diff['Row'], min_diff['Column']]
min_genetic_sd = Genetic_correlations_sd.loc[min_diff['Row'], min_diff['Column']]
min_diff_sd = np.sqrt(min_pheno_sd ** 2 + min_genetic_sd ** 2)
# max
max_diff = Correlations_dims.iloc[Correlations_dims['Difference'].idxmax(), :]
max_pheno_sd = Phenotypic_correlations_sd.loc[max_diff['Row'], max_diff['Column']]
max_genetic_sd = Genetic_correlations_sd.loc[max_diff['Row'], max_diff['Column']]
max_diff_sd = np.sqrt(max_pheno_sd ** 2 + max_genetic_sd ** 2)
# print key results
print('The correlation between phenotypic and genetic correlations is: ' + final_cor)
print('The min difference between phenotypic and genetic correlations is between ' + min_diff[
'Row'] + ' and ' +
min_diff['Column'] + '. Difference = ' + str(round(min_diff['Difference'], 3)) + '+-' +
str(round(min_diff_sd, 3)) + '; Phenotypic correlation = ' + str(
round(min_diff['Phenotypic'], 3)) + '+-' +
str(round(min_pheno_sd, 3)) + '; Genetic correlation = ' + str(round(min_diff['Genetic'], 3)) + '+-' +
str(round(min_genetic_sd, 3)))
print('The max difference between phenotypic and genetic correlations is between ' + max_diff[
'Row'] + ' and ' +
max_diff['Column'] + '. Difference = ' + str(round(max_diff['Difference'], 3)) + '+-' +
str(round(max_diff_sd, 3)) + '; Phenotypic correlation = ' + str(
round(max_diff['Phenotypic'], 3)) + '+-' +
str(round(max_pheno_sd, 3)) + '; Genetic correlation = ' + str(round(max_diff['Genetic'], 3)) + '+-' +
str(round(max_genetic_sd, 3)))
for _, (dims_name, dims) in enumerate(dict_dims_layers.items()):
print('Printing the comparison between the phenotypic and genetic correlations at the following level: ' +
dims_name)
_print_comparisons_between_pheno_and_geno(dims)
class GWASAnnotate(Basics):
"""
/!\ This class corresponds to a step in the pipeline that should be performed on local machine, since it must be
complemented with researches on the internet for different steps. /!\
Annotates the hits from the GWAS: names of the genes and gene types.
"""
def __init__(self, target=None):
Basics.__init__(self)
self.target = target
self.All_hits = None
self.All_hits_missing = None
def download_data(self):
os.chdir('/Users/Alan/Desktop/Aging/Medical_Images/bash_local/')
os.system('scp <EMAIL>:/n/groups/patel/Alan/Aging/Medical_Images/data/' +
self.path_data + 'GWAS_hits_' + self.target + '_All.csv' + ' ../data/')
self.All_hits = pd.read_csv(self.path_data + 'GWAS_hits_' + self.target + '_All.csv')
@staticmethod
def _find_nearest_gene(row, key):
if row['Overlapped Gene'] != 'None':
gene = row['Overlapped Gene']
gene_type = row['Type']
elif row['Distance to Nearest Downstream Gene'] <= row['Distance to Nearest Upstream Gene']:
gene = row['Nearest Downstream Gene']
gene_type = row['Type of Nearest Downstream Gene']
else:
gene = row['Nearest Upstream Gene']
gene_type = row['Type of Nearest Upstream Gene']
to_return = pd.Series([row[key], gene, gene_type])
to_return.index = [key, 'Gene', 'Gene_type']
return to_return
@staticmethod
def _concatenate_genes(group, key):
row = group.drop_duplicates(subset=[key])
unique_genes_rows = group.drop_duplicates(subset=[key, 'Gene'])
row['Gene'] = ', '.join(list(unique_genes_rows['Gene']))
row['Gene_type'] = ', '.join(list(unique_genes_rows['Gene_type']))
return row
def preprocessing_rs(self):
# Generate the list of SNPs to annotate in two formats to input into https://www.snp-nexus.org/v4/
# Format 1: based on rs#
snps_rs = pd.Series(self.All_hits['SNP'].unique())
snps_rs.index = ['dbsnp'] * len(snps_rs.index)
snps_rs.to_csv(self.path_data + 'snps_rs.txt', sep='\t', header=False)
def postprocessing_rs(self):
# Load the output fromsnp-nexus and fill the available rows
genes_rs = pd.read_csv(self.path_data + 'GWAS_genes_rs.txt', sep='\t')
# Find the nearest gene
genes_rs = genes_rs.apply(self._find_nearest_gene, args=(['Variation ID']), axis=1)
# Concatenate the findinds when several genes matched
genes_rs = genes_rs.groupby(by='Variation ID').apply(self._concatenate_genes, 'Variation ID')
# Fill the rows from the main dataframe when possible
genes_rs.set_index('Variation ID', inplace=True)
self.All_hits['Gene'] = np.NaN
self.All_hits['Gene_type'] = np.NaN
self.All_hits.set_index('SNP', drop=False, inplace=True)
self.All_hits.loc[genes_rs.index, ['Gene', 'Gene_type']] = genes_rs
def preprocessing_chrbp(self):
# Format 2: based on CHR and BP
snps_chrbp = self.All_hits.loc[self.All_hits['Gene'].isna(), ['CHR', 'BP', 'ALLELE0', 'ALLELE1']]
snps_chrbp['strand'] = 1
snps_chrbp.index = ['chromosome'] * len(snps_chrbp.index)
snps_chrbp.drop_duplicates(inplace=True)
snps_chrbp.to_csv(self.path_data + 'snps_chrbp.txt', sep='\t', header=False)
def postprocessing_chrbp(self):
# Load the output from snp-nexus and fill the available rows
genes_chrbp = pd.read_csv(self.path_data + 'GWAS_genes_chrbp.txt', sep='\t')
genes_chrbp['chrbp'] = genes_chrbp['Chromosome'].astype(str) + ':' + genes_chrbp['Position'].astype(str)
# Find the nearest gene
genes_chrbp = genes_chrbp.apply(self._find_nearest_gene, args=(['chrbp']), axis=1)
# Concatenate the findinds when several genes matched
genes_chrbp = genes_chrbp.groupby(by='chrbp').apply(self._concatenate_genes, 'chrbp')
# Fill the rows from the main dataframe when possible
genes_chrbp.set_index('chrbp', inplace=True)
self.All_hits['chrbp'] = 'chr' + self.All_hits['CHR'].astype(str) + ':' + self.All_hits['BP'].astype(str)
self.All_hits.set_index('chrbp', drop=False, inplace=True)
# Only keep subset of genes that actually are hits (somehow extra SNPs are returned too
genes_chrbp = genes_chrbp[genes_chrbp.index.isin(self.All_hits.index.values)]
self.All_hits.loc[genes_chrbp.index, ['Gene', 'Gene_type']] = genes_chrbp
def preprocessing_missing(self):
# Identify which SNPs were not matched so far, and use zoom locus to fill the gaps
self.All_hits_missing = self.All_hits[self.All_hits['Gene'].isna()]
print(str(len(self.All_hits_missing.drop_duplicates(subset=['SNP']).index)) + ' missing SNPs out of ' +
str(len(self.All_hits.drop_duplicates(subset=['SNP']).index)) + '.')
self.All_hits_missing.to_csv(self.path_data + 'All_hits_missing.csv', index=False, sep='\t')
def postprocessing_missing(self):
# The gene_type column was filled using https://www.genecards.org/
self.All_hits.loc['chr1:3691997', ['Gene', 'Gene_type']] = ['SMIM1', 'protein_coding']
self.All_hits.loc['chr2:24194313', ['Gene', 'Gene_type']] = ['COL4A4', 'protein_coding']
self.All_hits.loc['chr2:227896885', ['Gene', 'Gene_type']] = ['UBXN2A', 'protein_coding']
self.All_hits.loc['chr2:27656822', ['Gene', 'Gene_type']] = ['NRBP1', 'protein_coding']
self.All_hits.loc['chr2:42396721', ['Gene', 'Gene_type']] = ['AC083949.1, EML4', 'rna_gene, protein_coding']
self.All_hits.loc['chr2:71661855', ['Gene', 'Gene_type']] = ['ZNF638', 'protein_coding']
self.All_hits.loc['chr3:141081497', ['Gene', 'Gene_type']] = ['PXYLP1, AC117383.1, ZBTB38',
'protein_coding, rna_gene, protein_coding']
self.All_hits.loc['chr4:106317506', ['Gene', 'Gene_type']] = ['PPA2', 'protein_coding']
self.All_hits.loc['chr5:156966773', ['Gene', 'Gene_type']] = ['ADAM19', 'protein_coding']
self.All_hits.loc['chr6:29797695', ['Gene', 'Gene_type']] = ['HLA-G', 'protein_coding']
self.All_hits.loc['chr6:31106501', ['Gene', 'Gene_type']] = ['PSORS1C1, PSORS1C2',
'protein_coding, protein_coding']
self.All_hits.loc['chr6:31322216', ['Gene', 'Gene_type']] = ['HLA-B', 'protein_coding']
self.All_hits.loc['chr6:32552146', ['Gene', 'Gene_type']] = ['HLA-DRB1', 'protein_coding']
self.All_hits.loc['chr6:33377481', ['Gene', 'Gene_type']] = ['KIFC1', 'protein_coding']
self.All_hits.loc['chr8:9683437', ['Gene', 'Gene_type']] = ['snoU13', 'small_nucleolar_rna_gene']
self.All_hits.loc['chr8:19822809', ['Gene', 'Gene_type']] = ['LPL', 'protein_coding']
self.All_hits.loc['chr8:75679126', ['Gene', 'Gene_type']] = ['MIR2052HG', 'rna_gene']
self.All_hits.loc['chr10:18138488', ['Gene', 'Gene_type']] = ['MRC1', 'protein_coding']
self.All_hits.loc['chr10:96084372', ['Gene', 'Gene_type']] = ['PLCE1', 'protein_coding']
self.All_hits.loc['chr11:293001', ['Gene', 'Gene_type']] = ['PGGHG', 'protein_coding']
self.All_hits.loc['chr15:74282833', ['Gene', 'Gene_type']] = ['STOML1', 'protein_coding']
self.All_hits.loc['chr15:89859932', ['Gene', 'Gene_type']] = ['FANCI, POLG', 'protein_coding, protein_coding']
self.All_hits.loc['chr17:44341869', ['Gene', 'Gene_type']] = ['AC005829.1', 'pseudo_gene']
self.All_hits.loc['chr17:79911164', ['Gene', 'Gene_type']] = ['NOTUM', 'protein_coding']
self.All_hits.loc['chr20:57829821', ['Gene', 'Gene_type']] = ['ZNF831', 'protein_coding']
self.All_hits.loc['chr22:29130347', ['Gene', 'Gene_type']] = ['CHEK2', 'protein_coding']
# The following genes were not found by locuszoom, so I used https://www.rcsb.org/pdb/chromosome.do :
self.All_hits.loc['chr6:31084935', ['Gene', 'Gene_type']] = ['CDSN', 'protein_coding']
self.All_hits.loc['chr6:31105857', ['Gene', 'Gene_type']] = ['PSORS1C2', 'protein_coding']
# The following genes was named "0" in locuszoom, so I used https://www.rcsb.org/pdb/chromosome.do :
self.All_hits.loc['chr23:13771531', ['Gene', 'Gene_type']] = ['OFD1', 'protein_coding']
# The following gene did not have a match
self.All_hits.loc['chr23:56640134', ['Gene', 'Gene_type']] = ['UNKNOWN', 'UNKNOWN']
# Ensuring that all SNPs have been annotated
print('Ensuring that all SNPs have been annotated:')
assert self.All_hits['Gene'].isna().sum() == 0
print('Passed.')
# Counter number of unique genes involved, and generating the list
unique_genes = list(set((', '.join(self.All_hits['Gene'].unique())).split(', ')))
print('A total of ' + str(len(unique_genes)) + ' unique genes are associated with accelerated aging.')
np.save(self.path_data + 'GWAS_unique_genes.npy', np.array(unique_genes))
def postprocessing_hits(self):
self.All_hits.drop(columns=['chrbp'], inplace=True)
self.All_hits.to_csv(self.path_data + 'GWAS_hits_' + self.target + '_All_withGenes.csv', index=False)
for organ in self.organs_XWAS:
Hits_organ = self.All_hits[self.All_hits['organ'] == organ].drop(columns=['organ'])
Hits_organ.to_csv(self.path_data + 'GWAS_hits_' + self.target + '_' + organ + '_withGenes.csv', index=False)
def summarize_results(self):
# Generate empty dataframe
organs = ['*', '*instances01', '*instances1.5x', '*instances23', 'Abdomen', 'AbdomenLiver', 'AbdomenPancreas',
'Arterial', 'ArterialCarotids', 'ArterialPulseWaveAnalysis', 'Biochemistry', 'BiochemistryBlood',
'BiochemistryUrine', 'Brain', 'BrainCognitive', 'BrainMRI', 'Eyes', 'EyesFundus', 'EyesOCT',
'Hearing', 'Heart', 'HeartECG', 'HeartMRI', 'ImmuneSystem', 'Lungs', 'Musculoskeletal',
'MusculoskeletalFullBody', 'MusculoskeletalHips', 'MusculoskeletalKnees', 'MusculoskeletalScalars',
'MusculoskeletalSpine', 'PhysicalActivity']
cols = ['Organ', 'Sample size', 'SNPs', 'Genes', 'Heritability', 'CA_prediction_R2']
GWAS_summary = np.empty((len(organs), len(cols),))
GWAS_summary.fill(np.nan)
GWAS_summary = pd.DataFrame(GWAS_summary)
GWAS_summary.index = organs
GWAS_summary.columns = cols
GWAS_summary['Organ'] = organs
# Fill dataframe
All_hits = | pd.read_csv(self.path_data + 'GWAS_hits_' + self.target + '_All_withGenes.csv') | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# In[24]:
import numpy
import pandas as pd
import tensorflow as tf
from PyEMD import CEEMDAN
import warnings
warnings.filterwarnings("ignore")
### import the libraries
from tensorflow import keras
from tensorflow.keras import layers
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn import metrics
from sklearn.preprocessing import StandardScaler
from math import sqrt
# convert an array of values into a dataset matrix
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset)-look_back-1):
a = dataset[i:(i+look_back), 0]
dataX.append(a)
dataY.append(dataset[i + look_back, 0])
return numpy.array(dataX), numpy.array(dataY)
def percentage_error(actual, predicted):
res = numpy.empty(actual.shape)
for j in range(actual.shape[0]):
if actual[j] != 0:
res[j] = (actual[j] - predicted[j]) / actual[j]
else:
res[j] = predicted[j] / np.mean(actual)
return res
def mean_absolute_percentage_error(y_true, y_pred):
return numpy.mean(numpy.abs(percentage_error(numpy.asarray(y_true), numpy.asarray(y_pred)))) * 100
# In[25]:
def lr_model(datass,look_back,data_partition):
datasets=datass.values
train_size = int(len(datasets) * data_partition)
test_size = len(datasets) - train_size
train, test = datasets[0:train_size], datasets[train_size:len(datasets)]
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
X_train=pd.DataFrame(trainX)
Y_train=pd.DataFrame(trainY)
X_test=pd.DataFrame(testX)
Y_test=pd.DataFrame(testY)
sc_X = StandardScaler()
sc_y = StandardScaler()
X= sc_X.fit_transform(X_train)
y= sc_y.fit_transform(Y_train)
X1= sc_X.fit_transform(X_test)
y1= sc_y.fit_transform(Y_test)
y=y.ravel()
y1=y1.ravel()
import tensorflow as tf
numpy.random.seed(1234)
tf.random.set_seed(1234)
from sklearn.linear_model import LinearRegression
grid = LinearRegression()
grid.fit(X,y)
y_pred_train_lr= grid.predict(X)
y_pred_test_lr= grid.predict(X1)
y_pred_train_lr=pd.DataFrame(y_pred_train_lr)
y_pred_test_lr=pd.DataFrame(y_pred_test_lr)
y1=pd.DataFrame(y1)
y=pd.DataFrame(y)
y_pred_test1_lr= sc_y.inverse_transform (y_pred_test_lr)
y_pred_train1_lr=sc_y.inverse_transform (y_pred_train_lr)
y_test= sc_y.inverse_transform (y1)
y_train= sc_y.inverse_transform (y)
y_pred_test1_rf=pd.DataFrame(y_pred_test1_lr)
y_pred_train1_rf=pd.DataFrame(y_pred_train1_lr)
y_test= pd.DataFrame(y_test)
#summarize the fit of the model
mape=mean_absolute_percentage_error(y_test,y_pred_test1_lr)
rmse= sqrt(mean_squared_error(y_test,y_pred_test1_lr))
mae=metrics.mean_absolute_error(y_test,y_pred_test1_lr)
return mape,rmse,mae
# In[26]:
def svr_model(datass,look_back,data_partition):
datasets=datass.values
train_size = int(len(datasets) * data_partition)
test_size = len(datasets) - train_size
train, test = datasets[0:train_size], datasets[train_size:len(datasets)]
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
X_train=pd.DataFrame(trainX)
Y_train=pd.DataFrame(trainY)
X_test=pd.DataFrame(testX)
Y_test=pd.DataFrame(testY)
sc_X = StandardScaler()
sc_y = StandardScaler()
X= sc_X.fit_transform(X_train)
y= sc_y.fit_transform(Y_train)
X1= sc_X.fit_transform(X_test)
y1= sc_y.fit_transform(Y_test)
y=y.ravel()
y1=y1.ravel()
numpy.random.seed(1234)
import tensorflow as tf
tf.random.set_seed(1234)
from sklearn.svm import SVR
grid = SVR()
grid.fit(X,y)
y_pred_train_svr= grid.predict(X)
y_pred_test_svr= grid.predict(X1)
y_pred_train_svr=pd.DataFrame(y_pred_train_svr)
y_pred_test_svr=pd.DataFrame(y_pred_test_svr)
y1=pd.DataFrame(y1)
y=pd.DataFrame(y)
y_pred_test1_svr= sc_y.inverse_transform (y_pred_test_svr)
y_pred_train1_svr=sc_y.inverse_transform (y_pred_train_svr)
y_test= sc_y.inverse_transform (y1)
y_train= sc_y.inverse_transform (y)
y_pred_test1_svr=pd.DataFrame(y_pred_test1_svr)
y_pred_train1_svr=pd.DataFrame(y_pred_train1_svr)
y_test= pd.DataFrame(y_test)
#summarize the fit of the model
mape=mean_absolute_percentage_error(y_test,y_pred_test1_svr)
rmse= sqrt(mean_squared_error(y_test,y_pred_test1_svr))
mae=metrics.mean_absolute_error(y_test,y_pred_test1_svr)
return mape,rmse,mae
# In[27]:
def ann_model(datass,look_back,data_partition):
datasets=datass.values
train_size = int(len(datasets) * data_partition)
test_size = len(datasets) - train_size
train, test = datasets[0:train_size], datasets[train_size:len(datasets)]
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
X_train=pd.DataFrame(trainX)
Y_train=pd.DataFrame(trainY)
X_test=pd.DataFrame(testX)
Y_test=pd.DataFrame(testY)
sc_X = StandardScaler()
sc_y = StandardScaler()
X= sc_X.fit_transform(X_train)
y= sc_y.fit_transform(Y_train)
X1= sc_X.fit_transform(X_test)
y1= sc_y.fit_transform(Y_test)
y=y.ravel()
y1=y1.ravel()
import numpy
trainX = numpy.reshape(X, (X.shape[0], 1, X.shape[1]))
testX = numpy.reshape(X1, (X1.shape[0], 1, X1.shape[1]))
numpy.random.seed(1234)
import tensorflow as tf
tf.random.set_seed(1234)
from sklearn.neural_network import MLPRegressor
model= MLPRegressor(random_state=1,activation='tanh').fit(X,y)
numpy.random.seed(1234)
# make predictions
y_pred_train = model.predict(X)
y_pred_test = model.predict(X1)
y_pred_test= numpy.array(y_pred_test).ravel()
y_pred_test=pd.DataFrame(y_pred_test)
y1=pd.DataFrame(y1)
y_pred_test1= sc_y.inverse_transform (y_pred_test)
y_test= sc_y.inverse_transform (y1)
#summarize the fit of the model
mape=mean_absolute_percentage_error(y_test,y_pred_test1)
rmse= sqrt(mean_squared_error(y_test,y_pred_test1))
mae=metrics.mean_absolute_error(y_test,y_pred_test1)
return mape,rmse,mae
# In[28]:
def rf_model(datass,look_back,data_partition,max_features):
datasets=datass.values
train_size = int(len(datasets) * data_partition)
test_size = len(datasets) - train_size
train, test = datasets[0:train_size], datasets[train_size:len(datasets)]
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
X_train=pd.DataFrame(trainX)
Y_train=pd.DataFrame(trainY)
X_test=pd.DataFrame(testX)
Y_test=pd.DataFrame(testY)
sc_X = StandardScaler()
sc_y = StandardScaler()
X= sc_X.fit_transform(X_train)
y= sc_y.fit_transform(Y_train)
X1= sc_X.fit_transform(X_test)
y1= sc_y.fit_transform(Y_test)
y=y.ravel()
y1=y1.ravel()
numpy.random.seed(1234)
import tensorflow as tf
tf.random.set_seed(1234)
from sklearn.ensemble import RandomForestRegressor
grid = RandomForestRegressor(max_features=max_features)
grid.fit(X,y)
y_pred_train_rf= grid.predict(X)
y_pred_test_rf= grid.predict(X1)
y_pred_train_rf=pd.DataFrame(y_pred_train_rf)
y_pred_test_rf=pd.DataFrame(y_pred_test_rf)
y1=pd.DataFrame(y1)
y=pd.DataFrame(y)
y_pred_test1_rf= sc_y.inverse_transform (y_pred_test_rf)
y_pred_train1_rf=sc_y.inverse_transform (y_pred_train_rf)
y_test= sc_y.inverse_transform (y1)
y_train= sc_y.inverse_transform (y)
y_pred_test1_rf=pd.DataFrame(y_pred_test1_rf)
y_pred_train1_rf=pd.DataFrame(y_pred_train1_rf)
y_test= pd.DataFrame(y_test)
#summarize the fit of the model
mape=mean_absolute_percentage_error(y_test,y_pred_test1_rf)
rmse= sqrt(mean_squared_error(y_test,y_pred_test1_rf))
mae=metrics.mean_absolute_error(y_test,y_pred_test1_rf)
return mape,rmse,mae
# In[29]:
def lstm_model(datass,look_back,data_partition,max_features,epoch,batch_size,neuron,lr,optimizer):
datasets=datass.values
train_size = int(len(datasets) * data_partition)
test_size = len(datasets) - train_size
train, test = datasets[0:train_size], datasets[train_size:len(datasets)]
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
X_train=pd.DataFrame(trainX)
Y_train=pd.DataFrame(trainY)
X_test=pd.DataFrame(testX)
Y_test=pd.DataFrame(testY)
sc_X = StandardScaler()
sc_y = StandardScaler()
X= sc_X.fit_transform(X_train)
y= sc_y.fit_transform(Y_train)
X1= sc_X.fit_transform(X_test)
y1= sc_y.fit_transform(Y_test)
y=y.ravel()
y1=y1.ravel()
trainX1 = numpy.reshape(X, (X.shape[0],1,X.shape[1]))
testX1 = numpy.reshape(X1, (X1.shape[0],1,X1.shape[1]))
numpy.random.seed(1234)
import tensorflow as tf
tf.random.set_seed(1234)
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.recurrent import LSTM
neuron=neuron
model = Sequential()
model.add(LSTM(units = neuron,input_shape=(trainX1.shape[1], trainX1.shape[2])))
model.add(Dense(1))
optimizer = tf.keras.optimizers.Adam(learning_rate=lr)
model.compile(loss='mse',optimizer=optimizer)
# model.summary()
# Fitting the RNN to the Training s
model.fit(trainX1, y, epochs = epoch, batch_size = batch_size,verbose=0)
# make predictions
y_pred_train = model.predict(trainX1)
y_pred_test = model.predict(testX1)
y_pred_test= numpy.array(y_pred_test).ravel()
y_pred_test=pd.DataFrame(y_pred_test)
y_pred_test1= sc_y.inverse_transform (y_pred_test)
y1=pd.DataFrame(y1)
y_test= sc_y.inverse_transform (y1)
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn import metrics
mape=mean_absolute_percentage_error(y_test,y_pred_test1)
rmse= sqrt(mean_squared_error(y_test,y_pred_test1))
mae=metrics.mean_absolute_error(y_test,y_pred_test1)
return mape,rmse,mae
# In[30]:
###################################################hybrid based ceemdan####################################################
def hybrid_ceemdan_rf(datass,look_back,data_partition,max_features):
import numpy as np
import pandas as pd
dfs=datass
s = dfs.values
emd = CEEMDAN(epsilon=0.05)
emd.noise_seed(12345)
IMFs = emd(s)
full_imf=pd.DataFrame(IMFs)
data_imf=full_imf.T
import pandas as pd
pred_test=[]
test_ori=[]
pred_train=[]
train_ori=[]
for col in data_imf:
datasetss2=pd.DataFrame(data_imf[col])
datasets=datasetss2.values
train_size = int(len(datasets) * data_partition)
test_size = len(datasets) - train_size
train, test = datasets[0:train_size], datasets[train_size:len(datasets)]
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
X_train=pd.DataFrame(trainX)
Y_train=pd.DataFrame(trainY)
X_test=pd.DataFrame(testX)
Y_test=pd.DataFrame(testY)
sc_X = StandardScaler()
sc_y = StandardScaler()
X= sc_X.fit_transform(X_train)
y= sc_y.fit_transform(Y_train)
X1= sc_X.fit_transform(X_test)
y1= sc_y.fit_transform(Y_test)
y=y.ravel()
y1=y1.ravel()
import numpy
numpy.random.seed(1234)
import tensorflow as tf
tf.random.set_seed(1234)
from sklearn.ensemble import RandomForestRegressor
grid = RandomForestRegressor(max_features=max_features)
grid.fit(X,y)
y_pred_train= grid.predict(X)
y_pred_test= grid.predict(X1)
y_pred_test=pd.DataFrame(y_pred_test)
y_pred_train=pd.DataFrame(y_pred_train)
y1=pd.DataFrame(y1)
y=pd.DataFrame(y)
y_test= sc_y.inverse_transform (y1)
y_train= sc_y.inverse_transform (y)
y_pred_test1= sc_y.inverse_transform (y_pred_test)
y_pred_train1= sc_y.inverse_transform (y_pred_train)
pred_test.append(y_pred_test1)
test_ori.append(y_test)
pred_train.append(y_pred_train1)
train_ori.append(y_train)
result_pred_test= pd.DataFrame.from_records(pred_test)
result_pred_train= pd.DataFrame.from_records(pred_train)
a=result_pred_test.sum(axis = 0, skipna = True)
b=result_pred_train.sum(axis = 0, skipna = True)
dataframe=pd.DataFrame(dfs)
dataset=dataframe.values
train_size = int(len(dataset) * data_partition)
test_size = len(dataset) - train_size
train, test = dataset[0:train_size], dataset[train_size:len(dataset)]
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
X_train=pd.DataFrame(trainX)
Y_train=pd.DataFrame(trainY)
X_test=pd.DataFrame(testX)
Y_test=pd.DataFrame(testY)
sc_X = StandardScaler()
sc_y = StandardScaler()
X= sc_X.fit_transform(X_train)
y= sc_y.fit_transform(Y_train)
X1= sc_X.fit_transform(X_test)
y1= sc_y.fit_transform(Y_test)
y=y.ravel()
y1=y1.ravel()
trainX = numpy.reshape(X, (X.shape[0], 1, X.shape[1]))
testX = numpy.reshape(X1, (X1.shape[0], 1, X1.shape[1]))
numpy.random.seed(1234)
import tensorflow as tf
tf.random.set_seed(1234)
y1=pd.DataFrame(y1)
y=pd.DataFrame(y)
y_test= sc_y.inverse_transform (y1)
y_train= sc_y.inverse_transform (y)
a= pd.DataFrame(a)
y_test= pd.DataFrame(y_test)
#summarize the fit of the model
mape=mean_absolute_percentage_error(y_test,a)
rmse= sqrt(mean_squared_error(y_test,a))
mae=metrics.mean_absolute_error(y_test,a)
return mape,rmse,mae
# In[31]:
def hybrid_ceemdan_lstm(datass,look_back,data_partition,max_features,epoch,batch_size,neuron,lr,optimizer):
from PyEMD import CEEMDAN
dfs=datass
s = dfs.values
emd = CEEMDAN(epsilon=0.05)
emd.noise_seed(12345)
IMFs = emd(s)
full_imf=pd.DataFrame(IMFs)
data_imf=full_imf.T
pred_test=[]
test_ori=[]
pred_train=[]
train_ori=[]
for col in data_imf:
datasetss2=pd.DataFrame(data_imf[col])
datasets=datasetss2.values
train_size = int(len(datasets) * data_partition)
test_size = len(datasets) - train_size
train, test = datasets[0:train_size], datasets[train_size:len(datasets)]
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
X_train=pd.DataFrame(trainX)
Y_train=pd.DataFrame(trainY)
X_test=pd.DataFrame(testX)
Y_test= | pd.DataFrame(testY) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.