prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 18 08:32:19 2021
revi take: plot time series of deal amount for SEI/P2015 clusters (5 or 10) on settelament level
and then on the right a map with corohpleths with mean/median value
for this, i need to prepare the muni shapefile with RC in it.
@author: shlomi
"""
from MA_paths import work_david
from shapely.geometry import *
nadlan_path = work_david / 'Nadlan_deals'
apts = ['דירה', 'דירה בבית קומות']
muni_path = work_david/'gis/muni_il'
dis_dict = {}
dis_dict['ירושלים'] = 1
dis_dict['הצפון'] = 2
dis_dict['חיפה'] = 3
dis_dict['המרכז'] = 4
dis_dict['תל אביב'] = 5
dis_dict['הדרום'] = 6
dis_dict['יו"ש'] = 7
dis_en = {1: 'Jerusalem', 2: 'North', 3: 'Haifa',
4: 'Center', 5: 'Tel-Aviv', 6: 'South',
7: 'J&S'}
P2015_2_dict = {1: 1,
2: 1,
3: 2,
4: 2,
5: 3,
6: 4,
7: 4,
8: 5,
9: 5,
10: 5}
P2015_2_name = {1: 'Very Peripheral',
2: 'Peripheral',
3: 'In Between',
4: 'Centralized',
5: 'Very Centralized'}
SEI2_dict = {1: 1,
2: 1,
3: 2,
4: 2,
5: 3,
6: 3,
7: 4,
8: 4,
9: 5,
10: 5}
def convert_df_variable_names(df, path=work_david, drop=True):
import pandas as pd
vlist = pd.read_csv(path/'nadlan_database_variable_list.csv', header=None)
vlist.columns = ['old', 'new', 'final']
vlist['final'] = vlist['final'].fillna(vlist['new'])
vlist.set_index('old', inplace=True)
di = vlist['final'].to_dict()
df = df.rename(di, axis=1)
df = df[[x for x in df.columns if 'to_drop' not in x]]
return df
def extract_JS_settelments_from_stat_areas(path=work_david, muni_path=muni_path):
from cbs_procedures import read_statistical_areas_gis_file
from cbs_procedures import read_bycode_city_data
import pandas as pd
import geopandas as gpd
st = read_statistical_areas_gis_file(path)
print('extrcting JS big settelments...')
# J&S city codes from nadlan database:
js_cc = [3780, 3616, 3730, 3797, 3760, 3570, 3769, 3640, 3720,
3778]
# js_st = st[st['city_code'].isin(js_cc)]
ccs = st[st['city_code'].isin(js_cc)]['city_code'].unique()
js = st[st['city_code'].isin(ccs)]
sers = []
for cc in ccs:
cols = js[js['city_code']==cc].loc[:, ['city_code', 'NameHe', 'NameEn']]
ser = gpd.GeoSeries(js[js['city_code']==cc]['geometry'].unary_union)
ser['city_code'] = cols['city_code'].unique()[0]
ser['NameHe'] = cols['NameHe'].unique()[0]
ser['NameEn'] = cols['NameEn'].unique()[0]
ser = ser.rename({0: 'geometry'})
sers.append(ser)
gdf = gpd.GeoDataFrame(sers, geometry='geometry')
geos_to_complete = [1, 2, 4, 8, 9]
city_codes_to_complete = [ccs[x] for x in geos_to_complete]
bycode = read_bycode_city_data(path)
names = [bycode.loc[x]['NameHe'] for x in city_codes_to_complete]
js = gpd.read_file(muni_path/'JS_plans.shp')
geos = [js[js['P_NAME']==x].geometry.unary_union for x in names]
sers = []
for i, geo in zip(geos_to_complete, geos):
ser = gpd.GeoSeries(geo)
ser['city_code'] = gdf.iloc[i]['city_code']
ser['NameHe'] = gdf.iloc[i]['NameHe']
ser['NameEn'] = gdf.iloc[i]['NameEn']
ser = ser.rename({0: 'geometry'})
sers.append(ser)
gdf = gdf.drop(geos_to_complete, axis=0)
gdf1 = gpd.GeoDataFrame(sers, geometry='geometry')
gdf = pd.concat([gdf, gdf1], axis=0)
gdf['district'] = 'יו"ש'
return gdf
def prepare_just_city_codes_gis_areas(path=work_david, muni_path=muni_path):
import geopandas as gpd
import pandas as pd
from cbs_procedures import read_statistical_areas_gis_file
js = extract_JS_settelments_from_stat_areas(path, muni_path)
js = js.drop('district', axis=1)
js_ccs = js['city_code'].unique()
st = read_statistical_areas_gis_file(path)
ccs = st['city_code'].unique()
sers = []
ccs = [x for x in ccs if x not in js_ccs]
for cc in ccs:
geo = st[st['city_code'] == cc]['geometry'].unary_union
geo = remove_third_dimension(geo)
ser = gpd.GeoSeries(geo)
ser['city_code'] = cc
ser['NameHe'] = st[st['city_code'] == cc]['NameHe'].unique()[0]
ser['NameEn'] = st[st['city_code'] == cc]['NameEn'].unique()[0]
ser = ser.rename({0: 'geometry'})
sers.append(ser)
gdf_cc = gpd.GeoDataFrame(sers, geometry='geometry')
gdf = pd.concat([gdf_cc, js], axis=0)
gdf = gdf.set_index('city_code')
filename = 'Municipal+J&S+city_code_level.shp'
gdf.to_file(muni_path/filename, encoding='cp1255', index=True, na_rep='None')
print('{} was saved to {}.'.format(filename, muni_path))
return gdf
def prepare_municiapal_level_and_RC_gis_areas(path=work_david, muni_path=muni_path):
import geopandas as gpd
import pandas as pd
js = extract_JS_settelments_from_stat_areas(path, muni_path)
muni = gpd.read_file(path/'gis/muni_il/muni_il.shp')
muni['city_code'] = pd.to_numeric(muni['CR_LAMAS'])
muni['Machoz'] = muni['Machoz'].str.replace('צפון', 'הצפון')
muni['Machoz'] = muni['Machoz'].str.replace('דרום', 'הדרום')
muni['Machoz'] = muni['Machoz'].str.replace('מרכז', 'המרכז')
muni_type_dict = {}
muni_type_dict['עירייה'] = 'City'
muni_type_dict['מועצה מקומית'] = 'LC'
muni_type_dict['מועצה אזורית'] = 'RC'
muni_type_dict['ללא שיפוט'] = 'NA'
muni_type_dict['מועצה מקומית תעשייתית'] = 'ILC'
muni['muni_type'] = muni['Sug_Muni'].map(muni_type_dict)
muni['rc_code'] = muni[muni['muni_type'] ==
'RC']['CR_PNIM'].str[2:4].astype(int)
print('aggragating polygons to city/rc level...')
rc = muni[muni['muni_type'] == 'RC']
non_rc = muni[muni['muni_type'] != 'RC']
sers = []
for nrc in rc['rc_code'].unique():
geo = rc[rc['rc_code'] == nrc]['geometry'].unary_union
geo = remove_third_dimension(geo)
ser = gpd.GeoSeries(geo)
ser['rc_code'] = nrc
ser['NameHe'] = rc[rc['rc_code'] == nrc]['Muni_Heb'].unique()[0]
ser['NameEn'] = rc[rc['rc_code'] == nrc]['Muni_Eng'].unique()[0]
ser['district'] = rc[rc['rc_code'] == nrc]['Machoz'].unique()[0]
ser = ser.rename({0: 'geometry'})
sers.append(ser)
gdf_rc = gpd.GeoDataFrame(sers, geometry='geometry')
sers = []
ccs = non_rc[~non_rc['city_code'].isnull()]['city_code'].unique()
for cc in ccs:
# print(cc)
geo = non_rc[non_rc['city_code'] == cc]['geometry'].unary_union
geo = remove_third_dimension(geo)
ser = gpd.GeoSeries(geo)
ser['city_code'] = cc
ser['NameHe'] = non_rc[non_rc['city_code'] == cc]['Muni_Heb'].unique()[
0]
ser['NameEn'] = non_rc[non_rc['city_code'] == cc]['Muni_Eng'].unique()[
0]
ser['district'] = non_rc[non_rc['city_code'] == cc]['Machoz'].unique()[
0]
ser = ser.rename({0: 'geometry'})
sers.append(ser)
gdf_nonrc = gpd.GeoDataFrame(sers, geometry='geometry')
gdf = pd.concat([gdf_rc, gdf_nonrc, js], axis=0)
gdf = gdf.reset_index(drop=True)
filename = 'Municipal+J&S+Regional.shp'
gdf.to_file(muni_path/filename, encoding='cp1255')
print('{} was saved to {}.'.format(filename, muni_path))
return gdf
def remove_third_dimension(geom):
if geom.is_empty:
return geom
if isinstance(geom, Polygon):
exterior = geom.exterior
new_exterior = remove_third_dimension(exterior)
interiors = geom.interiors
new_interiors = []
for int in interiors:
new_interiors.append(remove_third_dimension(int))
return Polygon(new_exterior, new_interiors)
elif isinstance(geom, LinearRing):
return LinearRing([xy[0:2] for xy in list(geom.coords)])
elif isinstance(geom, LineString):
return LineString([xy[0:2] for xy in list(geom.coords)])
elif isinstance(geom, Point):
return Point([xy[0:2] for xy in list(geom.coords)])
elif isinstance(geom, MultiPoint):
points = list(geom.geoms)
new_points = []
for point in points:
new_points.append(remove_third_dimension(point))
return MultiPoint(new_points)
elif isinstance(geom, MultiLineString):
lines = list(geom.geoms)
new_lines = []
for line in lines:
new_lines.append(remove_third_dimension(line))
return MultiLineString(new_lines)
elif isinstance(geom, MultiPolygon):
pols = list(geom.geoms)
new_pols = []
for pol in pols:
new_pols.append(remove_third_dimension(pol))
return MultiPolygon(new_pols)
elif isinstance(geom, GeometryCollection):
geoms = list(geom.geoms)
new_geoms = []
for geom in geoms:
new_geoms.append(remove_third_dimension(geom))
return GeometryCollection(new_geoms)
else:
raise RuntimeError("Currently this type of geometry is not supported: {}".format(type(geom)))
def create_israel_districts(path=muni_path):
import geopandas as gpd
import pandas as pd
from shapely.geometry import MultiPolygon, Polygon, LineString
from shapely.ops import cascaded_union
muni = gpd.read_file(path/'muni_il.shp')
muni['Machoz'] = muni['Machoz'].str.replace('צפון', 'הצפון')
muni['Machoz'] = muni['Machoz'].str.replace('דרום', 'הדרום')
muni['Machoz'] = muni['Machoz'].str.replace('מרכז', 'המרכז')
dists = muni['Machoz'].unique()
sers = []
for dis in dists:
print(dis)
# print(dis)
geo = muni[muni['Machoz'] == dis].geometry.unary_union
if isinstance(geo, MultiPolygon):
eps = 0.01
omega = cascaded_union([
Polygon(component.exterior).buffer(eps).buffer(-eps) for component in geo
])
geo = omega[0]
geo = remove_third_dimension(geo)
ser = gpd.GeoSeries(geo)
ser = ser.rename({0: 'geometry'})
# print(type(ser))
ser['district'] = dis
ser['district_EN'] = dis_en[dis_dict[dis]]
ser['district_code'] = dis_dict[dis]
bound = ser['geometry'].boundary
if not isinstance(bound, LineString):
ser['geometry'] = Polygon(bound[0])
# ser['geometry'] = ser['geometry'].simplify(0.1)
# ser.crs = muni.crs
sers.append(ser)
# now add J&S:
js = gpd.read_file(path/'J&S_matakim.geojson')
js = js.to_crs(2039)
js1 = gpd.GeoSeries(js.geometry.unary_union)
js1 = js1.rename({0: 'geometry'})
js1['district'] = 'יו"ש'
js1['district_EN'] = 'J&S'
js1['district_code'] = 7
js1 = gpd.GeoDataFrame([js1])
b = js1.geometry.boundary.values[0]
js1['geometry'] = Polygon(b[0])
js1.index = [6]
# sers.append(js1)
dgf = gpd.GeoDataFrame(sers, geometry='geometry', crs=muni.crs)
dgf = pd.concat([dgf, js1], axis=0)
dgf = dgf.rename(
{'district': 'NameHe', 'district_EN': 'NameEn', 'district_code': 'Code'}, axis=1)
dgf.geometry = dgf.geometry.simplify(10)
filename = 'Israel_districts_incl_J&S.shp'
dgf.to_file(path/filename)
print('{} was saved to {}.'.format(filename, path))
return dgf
def create_higher_group_category(df, existing_col='SEI_cluster', n_groups=2,
new_col='SEI2_cluster', names=None):
import pandas as pd
lower_group = sorted(df[existing_col].dropna().unique())
new_group = [lower_group[i:i+n_groups+1]
for i in range(0, len(lower_group), n_groups+1)]
new_dict = {}
if names is not None:
assert len(names) == len(new_group)
for i, item in enumerate(new_group):
if names is not None:
new_dict[names[i]] = new_group[i]
else:
new_dict[i+1] = new_group[i]
m = pd.Series(new_dict).explode().sort_values()
d = {x: y for (x, y) in zip(m.values, m.index)}
df[new_col] = df[existing_col].map(d)
return df
# def geolocate_nadlan_deals_within_city_or_rc(df, muni_path=muni_path,
# savepath=work_david):
# import geopandas as gpd
# import pandas as pd
# # run load_nadlan_combined_deal with return_XY and without add_geo_layers:
# gdf = gpd.read_file(muni_path/'Municipal+J&S+Regional.shp')
# print('geolocating nadlan deals within city or RC...')
# total = gdf.index.size
# keys = []
# for i, row in gdf.iterrows():
# print('index: {} / {}'.format(i, total))
# within = df.geometry.within(row['geometry'])
# if within.sum() == 0:
# print('no deals found in {}'.format(row['NameHe']))
# continue
# inds = df.loc[within].index
# dff = pd.DataFrame(df.loc[inds, 'KEYVALUE'])
# dff['muni_gdf_index'] = i
# keys.append(dff)
# filename = 'Muni_gdf_KEYVALUE_index.csv'
# dff = pd.concat(keys, axis=0)
# dff.to_csv(savepath/filename, na_rep='None')
# print('Done!')
# return dff
def load_nadlan_combined_deal(path=work_david, times=['1998Q1', '2021Q1'],
dealamount_iqr=2, return_XY=False, add_bgr=None,
add_geo_layers=False, add_mean_salaries=False,
rename_vars=True, agg_rooms_345=True):
import pandas as pd
from Migration_main import path_glob
import geopandas as gpd
from cbs_procedures import read_statistical_areas_gis_file
import numpy as np
from cbs_procedures import read_mean_salary
def add_bgr_func(grp, bgr, rooms='Total'):
import numpy as np
cc_as_str = str(grp['city_code'].unique()[0])
try:
gr = bgr.loc[cc_as_str][rooms]
except KeyError:
gr = np.nan
grp['Building_Growth_Rate'] = gr
return grp
def add_stat_area_func(grp, stat_gdf):
city_code11 = grp['city_stat_code'].unique()[0]
geo = stat_gdf[stat_gdf['city_stat11']==city_code11].geometry.item()
grp['stat_geo'] = [geo]*len(grp)
return grp
def add_district_area_func(grp, dis_df):
district_code = grp['district_code'].unique()[0]
geo = dis_df[dis_df['Code']==district_code].geometry.item()
grp['district_geo'] = [geo]*len(grp)
return grp
def add_mean_salary_func(grp, sal):
year = grp['year'].unique()[0]
salary = sal[sal['year']==year]['mean_salary'].item()
grp['mean_salary'] = [salary]*len(grp)
return grp
file = path_glob(
path, 'Nadlan_deals_neighborhood_combined_processed_*.csv')[0]
print(file)
dtypes = {'FULLADRESS': 'object', 'Street': 'object', 'FLOORNO': float,
'NEWPROJECTTEXT': bool, 'PROJECTNAME': 'object', 'DEALAMOUNT': float}
df = pd.read_csv(file, na_values='None', parse_dates=['DEALDATETIME'],
dtype=dtypes)
# filter nans:
# df = df[~df['district'].isnull()]
if times is not None:
print('Slicing to times {} to {}.'.format(*times))
# df = df[df['year'].isin(np.arange(years[0], years[1] + 1))]
df = df.set_index('DEALDATETIME')
df = df.loc[times[0]:times[1]]
df = df.reset_index()
if dealamount_iqr is not None:
print('Filtering DEALAMOUNT with IQR of {}.'.format(dealamount_iqr))
df = df[~df.groupby('year')['DEALAMOUNT'].apply(
is_outlier, method='iqr', k=dealamount_iqr)]
df = df.reset_index(drop=True)
# print('loading gdf muni index...')
df['P2015_cluster2'] = df['P2015_cluster'].map(P2015_2_dict)
if return_XY:
inds = df[df['X'] == 0].index
df.loc[inds, 'X'] = np.nan
inds = df[df['Y'] == 0].index
df.loc[inds, 'Y'] = np.nan
df = gpd.GeoDataFrame(
df, geometry=gpd.points_from_xy(df['X'], df['Y']))
if add_mean_salaries:
print('adding mean salaries.')
sal = read_mean_salary()
df = df.groupby('year').apply(add_mean_salary_func, sal)
df['MSAL_per_ASSET'] = (df['DEALAMOUNT'] / df['mean_salary']).round()
if add_bgr is not None:
print('Adding Building Growth rate.')
file = path_glob(path, 'Building_*_growth_rate_*.csv')[0]
bgr = pd.read_csv(file, na_values='None', index_col='ID')
df = df.groupby('city_code').apply(add_bgr_func, bgr, rooms=add_bgr)
df.loc[df['Building_Growth_Rate'] == 0] = np.nan
if add_geo_layers:
print('adding statistical area geometry')
stat_gdf = read_statistical_areas_gis_file(path)
df = df.groupby('city_stat_code').apply(add_stat_area_func, stat_gdf)
print('adding district area geometry')
dis_df = gpd.read_file(path/'gis/muni_il/Israel_districts_incl_J&S.shp')
df['district_code'] = df['district'].map(dis_dict)
df = df.groupby('district_code').apply(add_district_area_func, dis_df)
if agg_rooms_345:
inds = df.loc[(df['ASSETROOMNUM']>=3) & (df['ASSETROOMNUM']<4)].index
df.loc[inds, 'Rooms_345'] = 3
inds = df.loc[(df['ASSETROOMNUM']>=4) & (df['ASSETROOMNUM']<5)].index
df.loc[inds, 'Rooms_345'] = 4
inds = df.loc[(df['ASSETROOMNUM']>=5) & (df['ASSETROOMNUM']<6)].index
df.loc[inds, 'Rooms_345'] = 5
df['Rooms_345'] = df['Rooms_345'].astype(pd.Int64Dtype()).astype('category')
if rename_vars:
print('renaming vars.')
var_names = pd.read_excel(path/'nadlan_database_variable_list.xls', header=None)
var_di = dict(zip(var_names[0], var_names[1]))
df = df.rename(var_di, axis=1)
return df
def load_nadlan_deals(path=work_david, csv=True,
times=['1998Q1', '2021Q1'], dealamount_iqr=2,
fix_new_status=True, add_SEI2_cluster=True,
add_peripheri_data=True, add_bycode_data=True
):
import pandas as pd
import numpy as np
from Migration_main import path_glob
from cbs_procedures import read_periphery_index
from cbs_procedures import read_bycode_city_data
if csv:
file = path_glob(path, 'Nadlan_deals_processed_*.csv')
dtypes = {'FULLADRESS': 'object', 'Street': 'object', 'FLOORNO': 'object',
'NEWPROJECTTEXT': 'object', 'PROJECTNAME': 'object', 'DEALAMOUNT': float}
df = pd.read_csv(file[0], na_values='None', parse_dates=['DEALDATETIME'],
dtype=dtypes)
else:
file = path_glob(path, 'Nadlan_deals_processed_*.hdf')
df = pd.read_hdf(file)
df['year'] = df['DEALDATETIME'].dt.year
df['month'] = df['DEALDATETIME'].dt.month
df['quarter'] = df['DEALDATETIME'].dt.quarter
df['YQ'] = df['year'].astype(str) + 'Q' + df['quarter'].astype(str)
if times is not None:
print('Slicing to times {} to {}.'.format(*times))
# df = df[df['year'].isin(np.arange(years[0], years[1] + 1))]
df = df.set_index('DEALDATETIME')
df = df.loc[times[0]:times[1]]
df = df.reset_index()
if dealamount_iqr is not None:
print('Filtering DEALAMOUNT with IQR of {}.'.format(dealamount_iqr))
df = df[~df.groupby('year')['DEALAMOUNT'].apply(
is_outlier, method='iqr', k=dealamount_iqr)]
if fix_new_status:
inds = df.loc[(df['Age'] < 0) & (df['Age'] > -5)].index
df.loc[inds, 'New'] = True
df['NEWPROJECTTEXT'] = pd.to_numeric(df['NEWPROJECTTEXT']).fillna(0)
df['NEWPROJECTTEXT'] = df['NEWPROJECTTEXT'].astype(bool)
if add_SEI2_cluster:
SEI_cluster = [x+1 for x in range(10)]
new = [SEI_cluster[i:i+2] for i in range(0, len(SEI_cluster), 2)]
SEI2 = {}
for i, item in enumerate(new):
SEI2[i+1] = new[i]
m = pd.Series(SEI2).explode().sort_values()
d = {x: y for (x, y) in zip(m.values, m.index)}
df['SEI2_cluster'] = df['SEI_cluster'].map(d)
if add_peripheri_data:
pdf = read_periphery_index()
cols = ['TLV_proximity_value', 'TLV_proximity_rank', 'PAI_value',
'PAI_rank', 'P2015_value', 'P2015_rank', 'P2015_cluster']
dicts = [pdf[x].to_dict() for x in cols]
series = [df['city_code'].map(x) for x in dicts]
pdf1 = pd.concat(series, axis=1)
pdf1.columns = cols
df = pd.concat([df, pdf1], axis=1)
if add_bycode_data:
bdf = read_bycode_city_data()
cols = ['district', 'district_EN', 'region', 'natural_area']
dicts = [bdf[x].to_dict() for x in cols]
series = [df['city_code'].map(x) for x in dicts]
bdf1 = | pd.concat(series, axis=1) | pandas.concat |
## for data
import pandas as pd
import numpy as np
import requests
import json
import os
from datetime import datetime, date
from dotenv import load_dotenv
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
## for plotting
import matplotlib.pyplot as plt
import matplotlib.patches as pltpatches
## for stationarity test
import statsmodels.api as sm
## for outliers detection, models tuning, clustering
from sklearn import preprocessing, svm, model_selection, metrics, cluster
## for autoregressive models
import pmdarima
import statsmodels.tsa.api as smt
import arch
import tensorflow as tf
## for deep learning
from tensorflow.python.keras import models, layers, preprocessing as kprocessing
## for prophet
from fbprophet import Prophet
pd.plotting.register_matplotlib_converters()
## for parametric fit and resistence/support
from scipy import optimize, stats, signal, cluster as sci_cluster
## for clustering
from tslearn.metrics import dtw
from tslearn.utils import to_time_series_dataset
from tslearn.clustering import TimeSeriesKMeans
###############################################################################
# TS ANALYSIS #
###############################################################################
def get_data_api_toTs(ini,coin):
coin_url = os.getenv(coin.upper()+"_HISTOHOUR")
if ini == 0 :
request = requests.get(coin_url)
else:
request = requests.get(coin_url+f"&toTs={ini}")
todo = json.loads(request.content)
return todo['Data']['Data']
def convertToDF(dfJSON):
return(pd.json_normalize(dfJSON))
'''
get cryptocurrency dataSet
:parameter
:param coin: coin name (BTC,ETH or XRP)
:param researches: number of observations * 2001
'''
def get_data_df(coin,researches):
load_dotenv()
data = get_data_api_toTs(0,coin)
df_aux = convertToDF(data)
for x in range(researches-1):
ini = df_aux['time'][0]
print("Buscando dados de : ",datetime.fromtimestamp(ini))
data1=get_data_api_toTs(ini,coin)
df_aux1 = convertToDF(data1)
df_aux = df_aux1.append(df_aux,ignore_index=True)
return df_aux
'''
get cryptocurrency dataSet
:parameter
:param coin: coin name (BTC,ETH or XRP)
:param sample_data: get sample data from api? (true or false)
'''
def get_data(coin, sample_data=True):
if coin.upper() not in ('BTC', 'ETH', 'XRP'):
err_msg = coin + ' is a invalid coin!'
raise ValueError(err_msg)
name_coin = "_SAMPLE_DATA" if sample_data else "_ALL_DATA"
name_coin = coin.upper() + name_coin
print("\nBuscando ", "amostra" if sample_data else "todas",
" observações da moeda", coin.upper())
load_dotenv()
coin_url = os.getenv(name_coin)
request = requests.get(coin_url)
data = json.loads(request.content)
content = data.get("Data")
content = content.get("Data")
print("Dataset foi carregado! Formatando Dataset ...")
df = pd.json_normalize(content[0])
for i in range(1, len(content)):
observation = content[i]
df_temp = pd.json_normalize(observation)
df = pd.DataFrame.append(df, df_temp)
return df
'''
Plot ts with rolling mean and 95% confidence interval with rolling std.
:parameter
:param ts: pandas Series
:param window: num for rolling stats
:param plot_intervals: bool - if True plots the conf interval
:param plot_ma: bool - if True plots the moving avg
'''
def plot_ts(ts, plot_ma=True, plot_intervals=True, window=30, figsize=(15,5)):
rolling_mean = ts.rolling(window=window).mean()
rolling_std = ts.rolling(window=window).std()
plt.figure(figsize=figsize)
plt.title(ts.name)
plt.plot(ts[window:], label='ts', color="black")
if plot_ma:
plt.plot(rolling_mean, 'g', label='MA'+str(window), color="red")
if plot_intervals:
lower_bound = rolling_mean - (1.96 * rolling_std)
upper_bound = rolling_mean + (1.96 * rolling_std)
plt.fill_between(x=ts.index, y1=lower_bound, y2=upper_bound, color='lightskyblue', alpha=0.4)
plt.legend(loc='best')
plt.grid(True)
plt.show()
'''
Fit a parametric trend line.
:parameter
:param ts: pandas Series
:param degree: polynomial order, ex. if 1 --> trend line = constant + slope*x, if 2 --> trend line = constant + a*x + b*x^2
'''
def fit_trend(ts, degree=1, plot=True, figsize=(15,5)):
## fit trend
dtf = ts.to_frame(name="ts")
params = np.polyfit(ts.reset_index().index, ts.values, deg=degree)
costant = params[-1]
dtf["trend"] = costant
X = np.array(range(1,len(ts)+1))
for i in range(1,degree+1):
dtf["trend"] = dtf["trend"] + params[i-1]*(X**i)
## plot
if plot is True:
ax = dtf.plot(grid=True, title="Fitting Trend", figsize=figsize, color=["black","red"])
ax.set(xlabel=None)
plt.show()
return dtf, params
'''
Fit a parametric trend poly.
:parameter
:param ts: pandas Series
:param degree: polynomial order, ex. if 2 --> trend line = constant + a*x + b*x^2 ...
'''
def fit_poly(ts_train, ts_test, degree=2, plot=True, figsize=(6,6)):
ts = ts_train.append(ts_test)
x = ts.reset_index().index
y = ts.values
params = np.polyfit(x, y,degree)
poly1d_fn = np.poly1d(params)
y_pred = poly1d_fn(x)
ts_plot = ts.reset_index()
poly = pd.DataFrame({'forecast': y_pred, 'x': ts.reset_index()['date'], 'ts': ts_plot['sales']})
## plot
if plot is True:
plt.figure(figsize=figsize)
es_ts = poly[["x","ts"]]
es_fc = poly[["x","forecast"]]
print(es_fc)
plt.plot(es_ts['x'], es_ts['ts'],color="black", label = "Histórico")
plt.plot(es_fc['x'], es_fc['forecast'],color="green", label = "Treinamento")
plt.xlabel("Data")
plt.xticks(rotation=45)
plt.ylabel("US$")
plt.grid(True)
plt.legend()
if degree > 1 :
plt.savefig('regressao_polinomial_train.png', format='png', bbox_inches='tight')
else:
plt.savefig('regressao_linear_train.png', format='png', bbox_inches='tight')
plt.show()
print('Figura Salva!')
plt.figure(figsize=figsize)
first_idx = poly[pd.notnull(poly["forecast"])].index[0]
first_loc = poly.index.tolist().index(first_idx)
zoom_idx = poly.index[first_loc-len(ts_test)]
es_ts = poly.loc[zoom_idx:][["x","ts"]]
es_fc = poly.loc[zoom_idx:][["x","forecast"]]
plt.plot(es_ts['x'], es_ts['ts'],color="black", label = "Histórico")
plt.plot(es_fc['x'], es_fc['forecast'],color="green", label = "Teste")
plt.xlabel("Data")
plt.xticks(rotation=45)
plt.ylabel("US$")
plt.grid(True)
plt.legend()
if degree > 1 :
plt.savefig('regressao_polinomial_test.png', format='png', bbox_inches='tight')
else:
plt.savefig('regressao_linear_test.png', format='png', bbox_inches='tight')
plt.show()
print('Figura Salva!')
d = y - y_pred
mape = np.mean(np.abs(d / y)) * 100
mse = np.mean(d**2)
mae = np.mean(abs(d))
rmse = np.sqrt(mse)
print("Results by manual calculation: Treinamento")
print("MAPE:%.4f" %mape,"%")
print("MAE:%.4f" %mae)
print("MSE:%.4f" %mse)
print("RMSE:%.4f" %rmse)
es_ts = poly.loc[zoom_idx:][["x","ts"]]
es_fc = poly.loc[zoom_idx:][["x","forecast"]]
poly["error"] = es_ts["ts"] - es_fc["forecast"]
poly["error_pct"] = poly["error"] / es_ts["ts"]
### kpi
error_mean = poly["error"].mean()
error_std = poly["error"].std()
mae = poly["error"].apply(lambda x: np.abs(x)).mean() #mean absolute error
mape = poly["error_pct"].apply(lambda x: np.abs(x)).mean() *100 #mean absolute error %
mse = poly["error"].apply(lambda x: x**2).mean() #mean squared error
rmse = np.sqrt(mse) #root mean squared error
print("Results by manual calculation Teste:")
print("MAPE:%.4f" %mape,"%")
print("MAE:%.4f" %mae)
print("MSE:%.4f" %mse)
print("RMSE:%.4f" %rmse)
'''
Defferenciate ts.
:parameter
:param ts: pandas Series
:param lag: num - diff[t] = y[t] - y[t-lag]
:param order: num - how many times it has to differenciate: diff[t]^order = diff[t] - diff[t-lag]
:param drop_na: logic - if True Na are dropped, else are filled with last observation
'''
def diff_ts(ts, lag=1, order=1, drop_na=True):
for i in range(order):
ts = ts - ts.shift(lag)
ts = ts[(pd.notnull(ts))] if drop_na is True else ts.fillna(method="bfill")
return ts
'''
Find outliers using sklearn unsupervised support vetcor machine.
:parameter
:param ts: pandas Series
:param perc: float - percentage of outliers to look for
:return
dtf with raw ts, outlier 1/0 (yes/no), numeric index
'''
def find_outliers(ts, perc=0.01, figsize=(6,6)):
## fit svm
scaler = preprocessing.StandardScaler()
ts_scaled = scaler.fit_transform(ts.values.reshape(-1,1))
model = svm.OneClassSVM(nu=perc, kernel="rbf", gamma=0.01)
model.fit(ts_scaled)
## dtf output
dtf_outliers = ts.to_frame(name="ts")
dtf_outliers["outlier"] = model.predict(ts_scaled)
dtf_outliers["outlier"] = dtf_outliers["outlier"].apply(lambda x: 1 if x == -1 else 0)
## plot
fig, ax = plt.subplots(figsize=figsize)
ax.set(title="Outliers detection: found "+str(sum(dtf_outliers["outlier"] == 1)))
ax.plot(dtf_outliers.index, dtf_outliers["ts"], color="black")
ax.scatter(x=dtf_outliers[dtf_outliers["outlier"]==1].index, y=dtf_outliers[dtf_outliers["outlier"]==1]['ts'], color='red')
ax.grid(True)
plt.show()
return dtf_outliers
'''
Interpolate outliers in a ts.
'''
def remove_outliers(ts, outliers_idx, figsize=(6,6)):
ts_clean = ts.copy()
ts_clean.loc[outliers_idx] = np.nan
ts_clean = ts_clean.interpolate(method="linear")
ax = ts.plot(figsize=figsize, color="red", alpha=0.5, label="Histórico", legend=True)
ts_clean.plot(ax=ax, grid=True, color="black", label="Interpolado", legend=True)
ax.set(xlabel=None)
plt.xlabel("Data")
plt.ylabel("US$")
plt.legend()
plt.savefig('remocao_outliers.png', format='png', bbox_inches='tight')
plt.show()
return ts_clean
'''
Finds Maxs, Mins, Resistence and Support levels.
:parameter
:param ts: pandas Series
:param window: int - rolling window
:param trend: bool - False if ts is flat
:return
dtf with raw ts, max, min, resistence, support
'''
def resistence_support(ts, window=30, trend=False, plot=True, figsize=(15,5)):
dtf = ts.to_frame(name="ts")
dtf["max"], dtf["min"] = [np.nan, np.nan]
rolling = dtf['ts'].rolling(window=window).mean().dropna()
## maxs
local_max = signal.argrelextrema(rolling.values, np.greater)[0]
local_max_idx = [dtf.iloc[i-window:i+window]['ts'].idxmax() for i in local_max if (i > window) and (i < len(dtf)-window)]
dtf["max"].loc[local_max_idx] = dtf["ts"].loc[local_max_idx]
## mins
local_min = signal.argrelextrema(rolling.values, np.less)[0]
local_min_idx = [dtf.iloc[i-window:i+window]['ts'].idxmin() for i in local_min if (i > window) and (i < len(dtf)-window)]
dtf["min"].loc[local_min_idx] = dtf["ts"].loc[local_min_idx]
## resistence/support
dtf["resistence"] = dtf["max"].interpolate(method="linear") if trend is True else dtf["max"].fillna(method="ffill")
dtf["support"] = dtf["min"].interpolate(method="linear") if trend is True else dtf["min"].fillna(method="ffill")
## plot
if plot is True:
ax = dtf["ts"].plot(color="black", figsize=figsize, grid=True)
dtf["resistence"].plot(ax=ax, color="darkviolet", label="resistence", grid=True, linestyle="--")
dtf["support"].plot(ax=ax, color="green", label="support", grid=True, linestyle="--")
ax.scatter(x=dtf["max"].index, y=dtf["max"].values, color="darkviolet", label="max")
ax.scatter(x=dtf["min"].index, y=dtf["min"].values, color="green", label="min")
ax.set(xlabel=None)
ax.legend()
plt.show()
return dtf
###############################################################################
# MODEL DESIGN & TESTING - FORECASTING #
###############################################################################
'''
Split train/test from any given data point.
:parameter
:param ts: pandas Series
:param exog: array len(ts) x n regressors
:param test: num or str - test size (ex. 0.20) or index position (ex. "yyyy-mm-dd", 1000)
:return
ts_train, ts_test, exog_train, exog_test
'''
def split_train_test(ts, exog=None, test=0.20, plot=True, figsize=(6,6)):
## define splitting point
if type(test) is float:
split = int(len(ts)*(1-test))
perc = test
elif type(test) is str:
split = ts.reset_index()[ts.reset_index().iloc[:,0]==test].index[0]
perc = round(len(ts[split:])/len(ts), 2)
else:
split = test
perc = round(len(ts[split:])/len(ts), 2)
print("--- splitting at index: ", split, "|", ts.index[split], "| test size:", perc, " ---")
## split ts
ts_train = ts.head(split)
ts_test = ts.tail(len(ts)-split)
upper_bound = max(ts) * 1.05
lower_bound = min(ts) * 1.05
if plot is True:
ts_train.plot(grid=True, title="", color="black")
plt.xlabel('Data')
plt.ylabel('US$')
plt.savefig('dados_treino.png', format='png', bbox_inches='tight')
plt.show()
ts_test.plot(grid=True, title="", color="black")
plt.xlabel('Data')
plt.ylabel('US$')
plt.savefig('dados_teste.png', format='png', bbox_inches='tight')
plt.show()
## split exog
if exog is not None:
exog_train = exog[0:split]
exog_test = exog[split:]
return ts_train, ts_test, exog_train, exog_test
else:
return ts_train, ts_test
'''
Compute the confidence interval for predictions:
[y[t+h] +- (c*σ*√h)]
:parameter
:param lst_values: list or array
:param error_std: σ (standard dev of residuals)
:param conf: num - confidence level (90%, 95%, 99%)
:return
array with 2 columns (upper and lower bounds)
'''
def utils_conf_int(lst_values, error_std, conf=0.95):
lst_values = list(lst_values) if type(lst_values) != list else lst_values
c = round( stats.norm.ppf(1-(1-conf)/2), 2)
lst_ci = []
for x in lst_values:
lst_x = lst_values[:lst_values.index(x)+1]
h = len(lst_x)
ci = [x - (c*error_std*np.sqrt(h)), x + (c*error_std*np.sqrt(h))]
lst_ci.append(ci)
return np.array(lst_ci)
'''
Evaluation metrics for predictions.
:parameter
:param dtf: DataFrame with columns "ts", "model", "forecast", and "lower"/"upper" (if available)
:return
dtf with columns "ts", "model", "residuals", "lower", "forecast", "upper", "error"
'''
def utils_evaluate_ts_model(dtf, conf=0.95, title=None, plot=True, figsize=(20,13)):
try:
## residuals from fitting
### add column
dtf["residuals"] = dtf["ts"] - dtf["model"]
### kpi
residuals_mean = dtf["residuals"].mean()
residuals_std = dtf["residuals"].std()
## Model error
### add column
dtf["model_error_pct"] = dtf["residuals"] / dtf["ts"]
### kpi
model_error_mean = dtf["residuals"].mean()
model_error_std = dtf["residuals"].std()
model_mae = dtf["residuals"].apply(lambda x: np.abs(x)).mean() #mean absolute error
model_mape = dtf["model_error_pct"].apply(lambda x: np.abs(x)).mean() #mean absolute error %
model_mse = dtf["residuals"].apply(lambda x: x**2).mean() #mean squared error
model_rmse = np.sqrt(model_mse) #root mean squared error
## forecasting error
### add column
dtf["error"] = dtf["ts"] - dtf["forecast"]
dtf["error_pct"] = dtf["error"] / dtf["ts"]
### kpi
error_mean = dtf["error"].mean()
error_std = dtf["error"].std()
mae = dtf["error"].apply(lambda x: np.abs(x)).mean() #mean absolute error
mape = dtf["error_pct"].apply(lambda x: np.abs(x)).mean() #mean absolute error %
mse = dtf["error"].apply(lambda x: x**2).mean() #mean squared error
rmse = np.sqrt(mse) #root mean squared error
## interval
if "upper" not in dtf.columns:
print("--- computing confidence interval ---")
dtf["lower"], dtf["upper"] = [np.nan, np.nan]
dtf.loc[dtf["forecast"].notnull(), ["lower","upper"]] = utils_conf_int(
dtf[dtf["forecast"].notnull()]["forecast"], residuals_std, conf)
## plot
if plot is True:
plt.figure(figsize=figsize)
### training
ts = dtf[pd.notnull(dtf["model"])][["ts"]]
print(ts.reset_index().head())
model = dtf[pd.notnull(dtf["model"])][["model"]]
print(model.reset_index().head())
plt.plot(ts, color='black', label='Histórico')
plt.plot(model, color='green', label='Treinamento')
plt.xlabel("Data")
plt.xticks(rotation=45)
plt.ylabel("US$")
plt.grid(True)
plt.legend()
plt.savefig(title+'treinamento.png', format='png', bbox_inches='tight')
plt.show()
print('\nFigura Salva!\n')
### testing
plt.figure(figsize=figsize)
ts = dtf[pd.isnull(dtf["model"])][["ts"]]
forecast = dtf[pd.isnull(dtf["model"])][["forecast"]]
plt.plot(ts, color='black', label='Histórico')
plt.plot(forecast, color='green', label='Teste')
plt.xlabel("Data")
plt.fill_between(x=dtf.index, y1=dtf['lower'], y2=dtf['upper'], color='b', alpha=0.2)
plt.xticks(rotation=45)
plt.ylabel("US$")
plt.grid(True)
plt.legend()
plt.savefig(title+'teste.png', format='png', bbox_inches='tight')
plt.show()
print('\nFigura Salva!\n')
print("Training --> Residuals mean:", np.round(residuals_mean), " | std:", np.round(residuals_std),
" | mae:",np.round(model_mae), " | mape:",np.round(model_mape*100), "% | mse:",np.round(model_mse), " | rmse:",np.round(model_rmse))
print("Test --> Error mean:", np.round(error_mean), " | std:", np.round(error_std),
" | mae:",np.round(mae), " | mape:",np.round(mape*100), "% | mse:",np.round(mse), " | rmse:",np.round(rmse))
return dtf[["ts", "model", "residuals", "lower", "forecast", "upper", "error"]]
except Exception as e:
print("--- got error ---")
print(e)
'''
Generate dates to index predictions.
:parameter
:param start: str - "yyyy-mm-dd"
:param end: str - "yyyy-mm-dd"
:param n: num - length of index
:param freq: None or str - 'B' business day, 'D' daily, 'W' weekly, 'M' monthly, 'A' annual, 'Q' quarterly
'''
def utils_generate_indexdate(start, end=None, n=None, freq="D"):
if end is not None:
index = pd.date_range(start=start, end=end, freq=freq)
else:
index = pd.date_range(start=start, periods=n, freq=freq)
index = index[1:]
print("--- generating index date --> start:", index[0], "| end:", index[-1], "| len:", len(index), "---")
return index
'''
Plot unknown future forecast and produce conf_int with residual_std and pred_int if an error_std is given.
:parameter
:param dtf: DataFrame with columns "ts", "model", "forecast", and "lower"/"upper" (if available)
:param conf: num - confidence level (90%, 95%, 99%)
:param zoom: int - plots the focus on the last zoom days
:return
dtf with columns "ts", "model", "residuals", "lower", "forecast", "upper" (No error)
'''
def utils_add_forecast_int(dtf, conf=0.95, plot=True, zoom=30, figsize=(6,6), title=None):
## residuals from fitting
### add column
dtf["residuals"] = dtf["ts"] - dtf["model"]
### kpi
residuals_std = dtf["residuals"].std()
## interval
if "upper" not in dtf.columns:
print("--- computing confidence interval ---")
dtf["lower"], dtf["upper"] = [np.nan, np.nan]
dtf.loc[dtf["forecast"].notnull(), ["lower","upper"]] = utils_conf_int(
dtf[dtf["forecast"].notnull()]["forecast"], residuals_std, conf)
## plot
if plot is True:
plt.figure(figsize=figsize)
### entire series
es_ts = dtf[["ts"]]
es_fc = dtf[["forecast"]]
plt.plot(es_ts,color="black", label = "Histórico")
plt.plot(es_fc,color="red", label = "Projeção")
plt.xlabel("Data")
plt.fill_between(x=dtf.index, y1=dtf['lower'], y2=dtf['upper'], color='b', alpha=0.2)
plt.xticks(rotation=45)
plt.ylabel("US$")
plt.grid(True)
plt.legend()
plt.savefig(title+'_entire_series.png', format='png', bbox_inches='tight')
plt.show()
### focus on last
plt.figure(figsize=figsize)
first_idx = dtf[pd.notnull(dtf["forecast"])].index[0]
first_loc = dtf.index.tolist().index(first_idx)
zoom_idx = dtf.index[first_loc-zoom]
es_ts = dtf.loc[zoom_idx:][["ts"]]
es_fc = dtf.loc[zoom_idx:][["forecast"]]
plt.plot(es_ts,color="black", label = "Histórico")
plt.plot(es_fc,color="red", label = "Projeção")
plt.xlabel("Data")
plt.fill_between(x=dtf.loc[zoom_idx:].index, y1=dtf.loc[zoom_idx:]['lower'], y2=dtf.loc[zoom_idx:]['upper'], color='b', alpha=0.2)
plt.xticks(rotation=45)
plt.ylabel("US$")
plt.grid(True)
plt.legend()
plt.savefig(title+'_zoom.png', format='png', bbox_inches='tight')
plt.show()
return dtf[["ts", "model", "residuals", "lower", "forecast", "upper"]]
###############################################################################
# AUTOREGRESSIVE #
###############################################################################
'''
Tune Holt-Winters Exponential Smoothing
:parameter
:param ts_train: pandas timeseries
:param s: num - number of observations per seasonal (ex. 7 for weekly seasonality with daily data, 12 for yearly seasonality with monthly data)
:param val_size: num - size of validation fold
:param scoring: function(y_true, y_pred)
:param top: num - plot top models only
:return
dtf with results
'''
def tune_expsmooth_model(ts_train, s=7, val_size=0.2, scoring=None, top=None, figsize=(15,5)):
## split
dtf_fit, dtf_val = model_selection.train_test_split(ts_train, test_size=val_size, shuffle=False)
dtf_fit, dtf_val = dtf_fit.to_frame(name="ts"), dtf_val.to_frame(name="ts")
## scoring
scoring = metrics.mean_absolute_error if scoring is None else scoring
## hyperamater space
trend = ['add', 'mul', None]
damped = [True, False]
seasonal = ['add', 'mult', None]
## grid search
dtf_search = pd.DataFrame(columns=["combo","score","model"])
combinations = []
for t in trend:
for d in damped:
for ss in seasonal:
combo = "trend="+str(t)+", damped="+str(d)+", seas="+str(ss)
if combo not in combinations:
combinations.append(combo)
try:
### fit
model = smt.ExponentialSmoothing(dtf_fit, trend=t, damped=d, seasonal=ss, seasonal_periods=s).fit()
### predict
pred = model.forecast(len(dtf_val))
if pred.isna().sum() == 0:
dtf_val[combo] = pred.values
score = scoring(dtf_val["ts"].values, dtf_val[combo].values)
dtf_search = dtf_search.append(pd.DataFrame({"combo":[combo],"score":[score],"model":[model]}))
except:
continue
## find best
dtf_search = dtf_search.sort_values("score").reset_index(drop=True)
best = dtf_search["combo"].iloc[0]
dtf_val = dtf_val.rename(columns={best:best+" [BEST]"})
dtf_val = dtf_val[["ts",best+" [BEST]"] + list(dtf_search["combo"].unique())[1:]]
## plot
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=figsize)
fig.suptitle("Model Tuning", fontsize=15)
combos = dtf_val.drop("ts", axis=1).columns[:top]
if (len(combos) <= 7) or ((top is not None) and (top <= 7)):
colors = ["red","blue","green","violet","sienna","orange","yellow"]
else:
colors = [tuple(np.random.rand(3,)) for i in range(len(combos))]
### main
ts_train.plot(ax=ax[0], grid=True, color="black", legend=True, label="ts")
ax[0].fill_between(x=dtf_fit.index, y1=ts_train.max(), color='grey', alpha=0.2)
dtf_val[combos].plot(grid=True, ax=ax[0], color=colors, legend=True)
ax[0].legend(loc="upper left")
ax[0].set(xlabel=None)
### zoom
dtf_val["ts"].plot(grid=True, ax=ax[1], color="black", legend=False)
for i,col in enumerate(combos):
linewidth = 2 if col == best+" [BEST]" else 1
dtf_val[col].plot(grid=True, ax=ax[1], color=colors[i], legend=False, linewidth=linewidth)
ax[1].set(xlabel=None)
plt.show()
return dtf_search
'''
Fits Exponential Smoothing:
Simple (level) --> trend=None + seasonal=None
y[t+i] = α*y[t] + α(1-α)^1*y[t-1] + α(1-α)^2*y[t-2] + ... = (α)*y[t] + (1-α)*yhat[t]
Holt (level + trend) --> trend=["add","mul"] + seasonal=None
y[t+i] = level_f(α) + i*trend_f(β)
Holt-Winters (level + trend + seasonality) --> trend=["add","mul"] + seasonal=["add","mul"]
y[t+i] = level_f(α) + i*trend_f(β) + seasonality_f(γ)
:parameter
:param ts_train: pandas timeseries
:param ts_test: pandas timeseries
:param trend: str - "additive" (linear), "multiplicative" (non-linear)
:param damped: bool - damp trend
:param seasonal: str - "additive" (ex. +100 every 7 days), "multiplicative" (ex. x10 every 7 days)
:param s: num - number of observations per seasonal (ex. 7 for weekly seasonality with daily data, 12 for yearly seasonality with monthly data)
:param factors: tuple - (α,β,γ) smoothing factor for the level (ex 0.94), trend, seasonal
:return
dtf with predictons and the model
'''
def fit_expsmooth(ts_train, ts_test, trend="additive", damped=False, seasonal="multiplicative", s=None, factors=(None,None,None), conf=0.95, figsize=(15,10)):
## checks
check_seasonality = "Seasonal parameters: No Seasonality" if (seasonal is None) & (s is None) else "Seasonal parameters: "+str(seasonal)+" Seasonality every "+str(s)+" observations"
print(check_seasonality)
## train
model = smt.ExponentialSmoothing(ts_train, trend=trend, damped=damped, seasonal=seasonal, seasonal_periods=s).fit(factors[0], factors[1], factors[2])
dtf_train = ts_train.to_frame(name="ts")
dtf_train["model"] = model.fittedvalues
## test
dtf_test = ts_test.to_frame(name="ts")
dtf_test["forecast"] = model.predict(start=len(ts_train), end=len(ts_train)+len(ts_test)-1)
## evaluate
dtf = dtf_train.append(dtf_test)
alpha, beta, gamma = round(model.params["smoothing_level"],2), round(model.params["smoothing_slope"],2), round(model.params["smoothing_seasonal"],2)
dtf = utils_evaluate_ts_model(dtf, conf=conf, figsize=figsize, title="Holt-Winters "+str((alpha, beta, gamma)))
return dtf, model
'''
Tune ARIMA
:parameter
:param ts_train: pandas timeseries
:param s: num - number of observations per seasonal (ex. 7 for weekly seasonality with daily data, 12 for yearly seasonality with monthly data)
:param val_size: num - size of validation fold
:param max_order: tuple - max (p,d,q) values
:param seasonal_order: tuple - max (P,D,Q) values
:param scoring: function(y_true, y_pred)
:param top: num - plot top models only
:return
dtf with results
'''
def tune_arima_model(ts_train, s=7, val_size=0.2, max_order=(3,1,3), seasonal_order=(1,1,1), scoring=None, top=None, figsize=(15,5)):
## split
dtf_fit, dtf_val = model_selection.train_test_split(ts_train, test_size=val_size, shuffle=False)
dtf_fit, dtf_val = dtf_fit.to_frame(name="ts"), dtf_val.to_frame(name="ts")
## scoring
scoring = metrics.mean_absolute_error if scoring is None else scoring
## hyperamater space
ps = range(0,max_order[0]+1)
ds = range(0,max_order[1]+1)
qs = range(0,max_order[2]+1)
Ps = range(0,seasonal_order[0]+1)
Ds = range(0,seasonal_order[1]+1)
Qs = range(0,seasonal_order[2]+1)
## grid search
dtf_search = pd.DataFrame(columns=["combo","score","model"])
combinations = []
for p in ps:
for d in ds:
for q in qs:
for P in Ps:
for D in Ds:
for Q in Qs:
combo = "("+str(p)+","+str(d)+","+str(q)+")x("+str(P)+","+str(D)+","+str(Q)+")"
if combo not in combinations:
combinations.append(combo)
try:
### fit
model = smt.SARIMAX(ts_train, order=(p,d,q), seasonal_order=(P,D,Q,s)).fit()
### predict
pred = model.forecast(len(dtf_val))
if pred.isna().sum() == 0:
dtf_val[combo] = pred.values
score = scoring(dtf_val["ts"].values, dtf_val[combo].values)
dtf_search = dtf_search.append(pd.DataFrame({"combo":[combo],"score":[score],"model":[model]}))
except:
continue
## find best
dtf_search = dtf_search.sort_values("score").reset_index(drop=True)
best = dtf_search["combo"].iloc[0]
dtf_val = dtf_val.rename(columns={best:best+" [BEST]"})
dtf_val = dtf_val[["ts",best+" [BEST]"] + list(dtf_search["combo"].unique())[1:]]
## plot
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=figsize)
fig.suptitle("Model Tuning", fontsize=15)
combos = dtf_val.drop("ts", axis=1).columns[:top]
if (len(combos) <= 7) or ((top is not None) and (top <= 7)):
colors = ["red","blue","green","violet","sienna","orange","yellow"]
else:
colors = [tuple(np.random.rand(3,)) for i in range(len(combos))]
### main
ts_train.plot(ax=ax[0], grid=True, color="black", legend=True, label="ts")
ax[0].fill_between(x=dtf_fit.index, y1=ts_train.max(), color='grey', alpha=0.2)
dtf_val[combos].plot(grid=True, ax=ax[0], color=colors, legend=True)
ax[0].legend(loc="upper left")
ax[0].set(xlabel=None)
### zoom
dtf_val["ts"].plot(grid=True, ax=ax[1], color="black", legend=False)
for i,col in enumerate(combos):
linewidth = 2 if col == best+" [BEST]" else 1
dtf_val[col].plot(grid=True, ax=ax[1], color=colors[i], legend=False, linewidth=linewidth)
ax[1].set(xlabel=None)
plt.show()
return dtf_search
'''
Find best Seasonal-ARIMAX parameters.
:parameter
:param ts: pandas timeseries
:param exog: pandas dataframe or numpy array
:param s: num - number of observations per seasonal (ex. 7 for weekly seasonality with daily data, 12 for yearly seasonality with monthly data)
:return
best model
'''
def find_best_sarimax(ts, seasonal=True, stationary=False, s=1, exog=None,
max_p=10, max_d=3, max_q=10,
max_P=10, max_D=3, max_Q=10):
best_model = pmdarima.auto_arima(ts, exogenous=exog,
seasonal=seasonal, stationary=stationary, m=s,
information_criterion='aic', max_order=20,
max_p=max_p, max_d=max_d, max_q=max_q,
max_P=max_P, max_D=max_D, max_Q=max_Q,
error_action='ignore')
print("best model --> (p, d, q):", best_model.order, " and (P, D, Q, s):", best_model.seasonal_order)
return best_model.summary()
'''
Fits SARIMAX (Seasonal ARIMA with External Regressors) (p,d,q)x(P,D,Q,s):
y[t+1] = (c + a0*y[t] + a1*y[t-1] +...+ ap*y[t-p]) + (e[t] + b1*e[t-1] + b2*e[t-2] +...+ bq*e[t-q]) + (B*X[t])
:parameter
:param ts_train: pandas timeseries
:param ts_test: pandas timeseries
:param order: tuple - (p,d,q) --> p: lag order (AR), d: degree of differencing (to remove trend), q: order of moving average (MA)
:param seasonal_order: tuple - (P,D,Q) --> seasonal lag orders (ex. lag from the last 2 seasons)
:param s: num - number of observations per seasonal (ex. 7 for weekly seasonality with daily data, 12 for yearly seasonality with monthly data)
:param exog_train: pandas dataframe or numpy array
:param exog_test: pandas dataframe or numpy array
:return
dtf with predictons and the model
'''
def fit_sarimax(ts_train, ts_test, order=(1,0,1), seasonal_order=(1,0,1), s=7, exog_train=None, exog_test=None, conf=0.95, figsize=(15,10)):
## checks
check_trend = "Trend parameters: No differencing" if order[1] == 0 else "Trend parameters: d="+str(order[1])
print(check_trend)
check_seasonality = "Seasonal parameters: No Seasonality" if (s == 0) & (np.sum(seasonal_order[0:2]) == 0) else "Seasonal parameters: Seasonality every "+str(s)+" observations"
print(check_seasonality)
check_exog = "Exog parameters: Not given" if (exog_train is None) & (exog_test is None) else "Exog parameters: number of regressors="+str(exog_train.shape[1])
print(check_exog)
## train
model = smt.SARIMAX(ts_train, order=order, seasonal_order=seasonal_order+(s,), exog=exog_train, enforce_stationarity=False, enforce_invertibility=False).fit()
dtf_train = ts_train.to_frame(name="ts")
dtf_train["model"] = model.fittedvalues
## test
dtf_test = ts_test.to_frame(name="ts")
dtf_test["forecast"] = model.predict(start=len(ts_train), end=len(ts_train)+len(ts_test)-1, exog=exog_test)
## add conf_int
ci = model.get_forecast(len(ts_test)).conf_int(1-conf).values
dtf_test["lower"], dtf_test["upper"] = ci[:,0], ci[:,1]
## evaluate
dtf = dtf_train.append(dtf_test)
title = "ARIMA "+str(order) if exog_train is None else "ARIMAX "+str(order)
title = "S"+title+" x "+str(seasonal_order) if np.sum(seasonal_order) > 0 else title
dtf = utils_evaluate_ts_model(dtf, conf=conf, figsize=figsize, title=title)
return dtf, model
'''
Forecast unknown future with sarimax or expsmooth.
:parameter
:param ts: pandas series
:param model: model object
:param pred_ahead: number of observations to forecast (ex. pred_ahead=30)
:param end: string - date to forecast (ex. end="2016-12-31")
:param freq: None or str - 'B' business day, 'D' daily, 'W' weekly, 'M' monthly, 'A' annual, 'Q' quarterly
:param zoom: for plotting
'''
def forecast_autoregressive(ts, model=None, pred_ahead=None, end=None, freq="D", conf=0.95, zoom=30, figsize=(6,6)):
## model
model = smt.SARIMAX(ts, order=(1,1,1), seasonal_order=(0,0,0,0)).fit() if model is None else model
## fit
dtf = ts.to_frame(name="ts")
dtf["model"] = model.fittedvalues
dtf["residuals"] = dtf["ts"] - dtf["model"]
## index
index = utils_generate_indexdate(start=ts.index[-1], end=end, n=pred_ahead, freq=freq)
## forecast
if "holtwinters" in str(model):
preds = model.forecast(len(index))
dtf_preds = preds.to_frame(name="forecast")
else:
preds = model.get_forecast(len(index))
dtf_preds = preds.predicted_mean.to_frame(name="forecast")
ci = preds.conf_int(1-conf).values
dtf_preds["lower"], dtf_preds["upper"] = ci[:,0], ci[:,1]
#dtf_preds.index, dtf_preds.index.freq = index, 'D'
#print(dtf_preds)
## add intervals and plot
dtf = dtf.append(dtf_preds)
dtf = utils_add_forecast_int(dtf, conf=conf, zoom=zoom, title="SARIMAX", figsize=figsize)
return dtf
###############################################################################
# RNN #
###############################################################################
'''
Plot loss and metrics of keras training.
'''
def utils_plot_keras_training(training):
metrics = [k for k in training.history.keys() if ("loss" not in k) and ("val" not in k)]
fig, ax = plt.subplots(nrows=1, ncols=2, sharey=True, figsize=(15,3))
## training
ax[0].set(title="Training")
ax11 = ax[0].twinx()
ax[0].plot(training.history['loss'], color='black')
ax[0].set_xlabel('Epochs')
ax[0].set_ylabel('Loss', color='black')
for metric in metrics:
ax11.plot(training.history[metric], label=metric)
ax11.set_ylabel("Score", color='steelblue')
ax11.legend()
## validation
ax[1].set(title="Validation")
ax22 = ax[1].twinx()
ax[1].plot(training.history['val_loss'], color='black')
ax[1].set_xlabel('Epochs')
ax[1].set_ylabel('Loss', color='black')
for metric in metrics:
ax22.plot(training.history['val_'+metric], label=metric)
ax22.set_ylabel("Score", color="steelblue")
plt.show()
'''
Preprocess a ts for LSTM partitioning into X and y.
:parameter
:param ts: pandas timeseries
:param s: num - number of observations per seasonal (ex. 7 for weekly seasonality with daily data, 12 for yearly seasonality with monthly data)
:param scaler: sklearn scaler object - if None is fitted
:param exog: pandas dataframe or numpy array
:return
X with shape: (len(ts)-s, s, features)
y with shape: (len(ts)-s,)
the fitted scaler
'''
def utils_preprocess_lstm(ts, s, scaler=None, exog=None):
## scale
if scaler is None:
scaler = preprocessing.MinMaxScaler(feature_range=(0,1))
ts_preprocessed = scaler.fit_transform(ts.values.reshape(-1,1)).reshape(-1)
## create X (N,s,x) and y (N,)
ts_preprocessed = kprocessing.sequence.TimeseriesGenerator(data=ts_preprocessed,
targets=ts_preprocessed,
length=s, batch_size=1)
lst_X, lst_y = [], []
for i in range(len(ts_preprocessed)):
xi, yi = ts_preprocessed[i]
lst_X.append(xi[0])
lst_y.append(yi[0])
X = np.expand_dims(np.array(lst_X), axis=2)
y = np.array(lst_y)
return X, y, scaler
'''
Get fitted values from LSTM.
'''
def utils_fitted_lstm(ts, model, scaler, exog=None):
## scale
s = model.input_shape[1]
ts_preprocessed = scaler.transform(ts.values.reshape(-1,1)).reshape(-1)
## create Xy, predict = fitted
lst_fitted = [np.nan]*s
for i in range(len(ts_preprocessed)):
end_ix = i + s
if end_ix > len(ts_preprocessed)-1:
break
X = ts_preprocessed[i:end_ix]
X = np.array(X)
X = np.reshape(X, (1,s,1))
fit = model.predict(X)
fit = scaler.inverse_transform(fit)[0][0]
lst_fitted.append(fit)
return np.array(lst_fitted)
'''
Predict ts with LSTM using previous predictions.
'''
def utils_predict_lstm(last_s_obs, model, scaler, pred_ahead, exog=None):
## scale
s = model.input_shape[1]
ts_preprocessed = list(scaler.transform(last_s_obs.values.reshape(-1,1)))
## predict, append, re-predict
lst_preds = []
for i in range(pred_ahead):
X = np.array(ts_preprocessed[len(ts_preprocessed)-s:])
X = np.reshape(X, (1,s,1))
pred = model.predict(X)
ts_preprocessed.append(pred[0])
pred = scaler.inverse_transform(pred)[0][0]
lst_preds.append(pred)
return np.array(lst_preds)
'''
Fit Long Short-Term Memory neural network.
:parameter
:param ts: pandas timeseries
:param exog: pandas dataframe or numpy array
:param s: num - number of observations per seasonal (ex. 7 for weekly seasonality with daily data, 12 for yearly seasonality with monthly data)
:return
dtf with predictons and the model
'''
def fit_lstm(ts_train, ts_test, model, exog=None, s=20, epochs=100, conf=0.95, figsize=(15,5)):
## check
print("Seasonality: using the last", s, "observations to predict the next 1")
## preprocess train
X_train, y_train, scaler = utils_preprocess_lstm(ts_train, scaler=None, exog=exog, s=s)
print("--- X:", X_train.shape, "| y:", y_train.shape, "---")
## lstm
if model is None:
model = models.Sequential()
model.add( layers.LSTM(input_shape=X_train.shape[1:], units=50, activation='relu', return_sequences=False) )
model.add( layers.Dense(1) )
model.compile(optimizer='adam', loss='mean_absolute_error')
print(model.summary())
## train
verbose = 0 if epochs > 1 else 1
training = model.fit(x=X_train, y=y_train, batch_size=1, epochs=epochs, shuffle=True, verbose=verbose, validation_split=0.3)
dtf_train = ts_train.to_frame(name="ts")
dtf_train["model"] = utils_fitted_lstm(ts_train, training.model, scaler, exog)
dtf_train["model"] = dtf_train["model"].fillna(method='bfill')
## test
last_s_obs = ts_train[-s:]
preds = utils_predict_lstm(last_s_obs, training.model, scaler, pred_ahead=len(ts_test), exog=None)
dtf_test = ts_test.to_frame(name="ts").merge( | pd.DataFrame(data=preds, index=ts_test.index, columns=["forecast"]) | pandas.DataFrame |
"""
This module contains methods for generating H2H data for games
"""
import pandas as pd
from scrapenhl2.manipulate import manipulate as manip, add_onice_players as onice
from scrapenhl2.scrape import general_helpers as helpers, parse_toi, parse_pbp, team_info, teams
def get_game_combo_toi(season, game, player_n=2, *hrcodes):
"""
This method gets H2H TOI at 5v5 for the given game.
:param season: int, the season
:param game: int, the game
:param player_n: int. E.g. 1 gives you a list of players and TOI, 2 gives you h2h, 3 gives you groups of 3, etc.
:param hrcodes: to limit exploding joins, specify strings containing 'H' and 'R' and 'A', each of length player_n
For example, if player_n=3, specify 'HHH' to only get home team player combos.
If this is left unspecified, will do all combos, which can be problematic when player_n > 3.
'R' for road, 'H' for home, 'A' for all (both)
:return: a df with [P1, P1Team, P2, P2Team, TOI, etc]. Entries will be duplicated.
"""
if len(hrcodes) == 0:
hrcodes = ['A'*player_n]
for hrcode in hrcodes:
assert len(hrcode) == player_n
home, road = parse_toi.get_melted_home_road_5v5_toi(season, game)
return _combo_secs_from_hrcodes(home, road, *hrcodes)
def get_game_combo_corsi(season, game, player_n=2, cfca=None, *hrcodes):
"""
This method gets H2H Corsi at 5v5 for the given game.
:param season: int, the season
:param game: int, the game
:param player_n: int. E.g. 1 gives you a list of players and TOI, 2 gives you h2h, 3 gives you groups of 3, etc.
:param cfca: str, or None. If you specify 'cf', returns CF only. For CA, use 'ca'. None returns CF - CA.
:param hrcodes: to limit exploding joins, specify strings containing 'H' and 'R' and 'A', each of length player_n
For example, if player_n=3, specify 'HHH' to only get home team player combos.
If this is left unspecified, will do all combos, which can be problematic when player_n > 3.
'R' for road, 'H' for home, 'A' for all (both)
:return: a df with [P1, P1Team, P2, P2Team, TOI, etc]. Entries will be duplicated.
"""
if len(hrcodes) == 0:
hrcodes = ['A'*player_n]
for hrcode in hrcodes:
assert len(hrcode) == player_n
corsipm = parse_pbp.get_5v5_corsi_pm(season, game)
home, road = parse_toi.get_melted_home_road_5v5_toi(season, game)
return _combo_corsi_from_hrcodes(home, road, corsipm, cfca, *hrcodes)
def _combo_corsi_from_hrcodes(homedf=None, roaddf=None, corsidf=None, cfca=None, *hrcodes):
"""
Joins the homedf and roaddf as specified by hrcodes.
:param homedf: home team df (e.g. for TOI)
:param roaddf: road team df (e.g. for TOI)
:param corsidf: a dataframe with Time and HomeCorsi (1 or -1), one row per event
:param hrcodes: to limit exploding joins, specify strings containing 'H' and 'R' and 'A', each of length player_n
For example, if player_n=3, specify 'HHH' to only get home team player combos.
If this is left unspecified, will do all combos, which can be problematic when player_n > 3.
'R' for road, 'H' for home, 'A' for all (both)
:return: joined df, grouped and summed by player combos
"""
alldf = | pd.concat([homedf, roaddf]) | pandas.concat |
"""Contains functions for preprocessing data
Classes
-------
Person
Functions
----------
recurcive_append
create_pedigree
add_control
prepare_data
"""
import logging
import pandas as pd
import numpy as np
from pysnptools.snpreader import Bed
from bgen_reader import open_bgen, read_bgen
from config import nan_integer
class Person:
"""Just a simple data structure representing individuals
Args:
id : str
IID of the individual.
fid : str
FID of the individual.
pid : str
IID of the father of that individual.
mid : str
IID of the mother of that individual.
"""
def __init__(self, id, fid=None, pid=None, mid=None):
self.id = id
self.fid = fid
self.pid = pid
self.mid = mid
def recurcive_append(dictionary, index, element):
"""Adds an element to value of all the keys that can be reached from index with using get recursively.
Args:
dictionary : dict
A dictionary of objects to list
index
The start point
element
What should be added to values
"""
queue = {index}
seen_so_far = set()
while queue:
current_index = queue.pop()
seen_so_far.add(current_index)
dictionary[current_index].add(element)
queue = queue.union(dictionary[current_index])
queue = queue.difference(seen_so_far)
def create_pedigree(king_address, agesex_address):
"""Creates pedigree table from agesex file and kinship file in KING format.
Args:
king_address : str
Address of a kinship file in KING format. kinship file is a '\t' seperated csv with columns "FID1", "ID1", "FID2", "ID2, "InfType".
Each row represents a relationship between two individuals. InfType column states the relationship between two individuals.
The only relationships that matter for this script are full sibling and parent-offspring which are shown by 'FS' and 'PO' respectively.
This file is used in creating a pedigree file and can be generated using KING.
As fids starting with '_' are reserved for control there should be no fids starting with '_'.
agesex_address : str
Address of the agesex file. This is a " " seperated CSV with columns "FID", "IID", "FATHER_ID", "MOTHER_ID", "sex", "age".
Each row contains the age and sex of one individual. Male and Female sex should be represented with 'M' and 'F'.
Age column is used for distinguishing between parent and child in a parent-offspring relationship inferred from the kinship file.
ID1 is a parent of ID2 if there is a 'PO' relationship between them and 'ID1' is at least 12 years older than ID2.
Returns:
pd.DataFrame:
A pedigree table with 'FID', 'IID', 'FATHER_ID', 'MOTHER_ID'. Each row represents an individual.
"""
kinship = pd.read_csv(king_address, delimiter="\t").astype(str)
logging.info("loaded kinship file")
agesex = pd.read_csv(agesex_address, delim_whitespace=True)
agesex["IID"] = agesex["IID"].astype(str)
agesex["FID"] = agesex["FID"].astype(str)
logging.info("loaded agesex file")
agesex = agesex.set_index("IID")
logging.info("creating age and sex dictionaries")
kinship = pd.merge(kinship, agesex.rename(columns={"sex":"sex1", "age":"age1"}), left_on="ID1", right_index=True)
kinship = pd.merge(kinship, agesex.rename(columns={"sex":"sex2", "age":"age2"}), left_on="ID2", right_index=True)
logging.info("dictionaries created")
people = {}
fid_counter = 0
dropouts = []
kinship_cols = kinship.columns.tolist()
index_id1 = kinship_cols.index("ID1")
index_id2 = kinship_cols.index("ID2")
index_sex1 = kinship_cols.index("sex1")
index_sex2 = kinship_cols.index("sex2")
index_age1 = kinship_cols.index("age1")
index_age2 = kinship_cols.index("age2")
index_inftype = kinship_cols.index("InfType")
logging.info("creating pedigree objects")
pop_size = kinship.values.shape[0]
t = kinship.values.tolist()
for row in range(pop_size):
relation = t[row][index_inftype]
id1 = t[row][index_id1]
id2 = t[row][index_id2]
age1 = t[row][index_age1]
age2 = t[row][index_age2]
sex1 = t[row][index_sex1]
sex2 = t[row][index_sex2]
p1 = people.get(id1)
if p1 is None:
p1 = Person(id1)
people[id1] = p1
p2 = people.get(id2)
if p2 is None:
p2 = Person(id2)
people[id2] = p2
if relation == "PO":
if age1 > age2+12:
if sex1 == "F":
p2.mid = p1.id
if sex1 == "M":
p2.pid = p1.id
if age2 > age1+12:
if sex2 == "F":
p1.mid = p2.id
if sex2 == "M":
p1.pid = p2.id
if relation == "FS":
if p1.fid is None and p2.fid is None:
p1.fid = str(fid_counter)
p2.fid = str(fid_counter)
fid_counter += 1
if p1.fid is None and p2.fid is not None:
p1.fid = p2.fid
if p1.fid is not None and p2.fid is None:
p2.fid = p1.fid
for excess in dropouts:
people.pop(excess)
data = []
for p in people.values():
if p.fid is None:
p.fid = str(fid_counter)
fid_counter += 1
if p.mid is None:
#default mother id
p.mid = p.fid + "___M"
if p.pid is None:
#default father ir
p.pid = p.fid + "___P"
data.append((p.fid, p.id, p.pid, p.mid))
data = pd.DataFrame(data, columns = ['FID' , 'IID', 'FATHER_ID' , 'MOTHER_ID']).astype(str)
return data
def add_control(pedigree):
"""Adds control families to the pedigree table for testing.
For each family that has two or more siblings and both parents, creates a 3 new familes, one has no parents, one with no mother and one with no father.
gFID of these families are x+original_fid where x is "_o_", "_p_", "_m_" for these cases: no parent, only has father, only has mother. IIDs are the same in both families.
Args:
pedigree : pd.DataFrame
A pedigree table with 'FID', 'IID', 'FATHER_ID', 'MOTHER_ID'. Each row represents an individual.
fids starting with "_" are reserved for control.
Returns:
pd.DataFrame
A pedigree table with 'FID', 'IID', 'FATHER_ID', 'MOTHER_ID'. Each row represents an individual.
For each family with both parents and more than one offspring, it has a control family(fids for control families start with '_')
"""
pedigree["has_mother"] = pedigree["MOTHER_ID"].isin(pedigree["IID"])
pedigree["has_father"] = pedigree["FATHER_ID"].isin(pedigree["IID"])
families_with_both_parents = pedigree[pedigree["has_father"] & pedigree["has_mother"]]
count_of_sibs_in_fam = families_with_both_parents.groupby(["FID", "FATHER_ID", "MOTHER_ID"]).count().reset_index()
FIDs_with_multiple_sibs = count_of_sibs_in_fam[count_of_sibs_in_fam["IID"] > 1][["FID"]]
families_with_multiple_sibs = families_with_both_parents.merge(FIDs_with_multiple_sibs, on = "FID")
families_with_multiple_sibs["FID"] = "_o_" + families_with_multiple_sibs["FID"].astype(str)
families_with_multiple_sibs["MOTHER_ID"] = families_with_multiple_sibs["FID"].astype(str) + "_M"
families_with_multiple_sibs["FATHER_ID"] = families_with_multiple_sibs["FID"].astype(str) + "_P"
keep_mother = families_with_both_parents.copy()
keep_mother["FID"] = "_m_" + keep_mother["FID"].astype(str)
keep_mother["FATHER_ID"] = keep_mother["FID"].astype(str) + "_P"
keep_father = families_with_both_parents.copy()
keep_father["FID"] = "_p_" + keep_father["FID"].astype(str)
keep_father["MOTHER_ID"] = keep_father["FID"].astype(str) + "_M"
pedigree = pedigree.append(families_with_multiple_sibs).append(keep_father).append(keep_mother)
pedigree = pedigree[['FID' , 'IID', 'FATHER_ID' , 'MOTHER_ID']]
return pedigree
#TODO raise error if file is multi chrom
def preprocess_king(ibd, segs, bim, chromosomes, sibships):
ibd["Chr"] = ibd["Chr"].astype(int)
segs["Chr"] = segs["Chr"].astype(int)
chromosomes = [int(x) for x in chromosomes]
if len(chromosomes)>1:
ibd = ibd[ibd["Chr"].isin(chromosomes)][["ID1", "ID2", "IBDType", "StartSNP", "StopSNP"]]
else:
ibd = ibd[ibd["Chr"]==chromosomes[0]][["ID1", "ID2", "IBDType", "StartSNP", "StopSNP"]]
#TODO cancel or generalize this
if set(ibd["IBDType"].unique().tolist()) == {"IBD1", "IBD2"}:
ibd["IBDType"] = ibd["IBDType"].apply(lambda x: 2 if x=="IBD2" else 1)
ibd["IBDType"] = ibd["IBDType"].astype(int)
temp = bim[["id", "coordinate"]].rename(columns = {"id":"StartSNP","coordinate":"StartSNPLoc"})
ibd= ibd.merge(temp, on="StartSNP")
temp = bim[["id", "coordinate"]].rename(columns = {"id":"StopSNP","coordinate":"StopSNPLoc"})
ibd = ibd.merge(temp, on="StopSNP")
ibd['segment'] = ibd[['StartSNPLoc', 'StopSNPLoc', "IBDType"]].values.tolist()
ibd = ibd.groupby(["ID1", "ID2"]).agg({'segment':sorted}).reset_index()
if len(chromosomes)>1:
segs = segs[segs["Chr"].isin(chromosomes)][["StartSNP", "StopSNP"]]
else:
segs = segs[segs["Chr"]==chromosomes[0]][["StartSNP", "StopSNP"]]
temp = bim[["id", "coordinate"]].rename(columns = {"id":"StartSNP","coordinate":"StartSNPLoc"})
segs= segs.merge(temp, on="StartSNP")
temp = bim[["id", "coordinate"]].rename(columns = {"id":"StopSNP","coordinate":"StopSNPLoc"})
segs = segs.merge(temp, on="StopSNP")
segs = segs[['StartSNPLoc', 'StopSNPLoc']].sort_values('StartSNPLoc').values.tolist()
flatten_seg_as_ibd0 = []
for l in segs:
flatten_seg_as_ibd0 = flatten_seg_as_ibd0 + l + [0]
#TODO does this work with multichromosome in the ibd file? it won't work if snplocs are indexed from zero in each snp
all_ibd_segs = []
for index, row in ibd.iterrows():
id1, id2, segments = row["ID1"], row["ID2"], row["segment"]
seg_counter = 0
row_ibd0 = []
start, end = segs[seg_counter]
prev_end = start
for seg_start, seg_end, ibd_type in segments:
while seg_start>end:
if prev_end<end:
row_ibd0.append([prev_end, end, 0])
if (seg_counter+1) < len(segs):
seg_counter+=1
start, end = segs[seg_counter]
prev_end = start
else:
raise Exception("this segments starts after all meaningfull segments")
if seg_start<start:
raise Exception("segment starts sooner than it should")
if seg_start>prev_end:
row_ibd0.append([prev_end, seg_start, 0])
if seg_end>end:
raise Exception("segment ends outside where it should have")
prev_end=seg_end
if prev_end<end:
row_ibd0.append([prev_end, end, 0])
row_ibd0 = row_ibd0 + [[start, end, 0] for start, end in segs[seg_counter+1:]]
row_ibd = segments+row_ibd0
row_ibd = sorted(row_ibd, key=lambda x:x[0])
flatten_row_ibd = []
for l in row_ibd:
for el in l:
flatten_row_ibd.append(el)
all_ibd_segs.append(flatten_row_ibd)
ibd["segment"] = pd.Series(all_ibd_segs)
ibd_dict = ibd.set_index(["ID1", "ID2"]).to_dict()["segment"]
for index, row in sibships.iterrows():
sibs = row["IID"]
nsibs = len(sibs)
if nsibs > 1:
for i in range(1, nsibs):
for j in range(0, i):
sib1 = sibs[i].decode()
sib2 = sibs[j].decode()
if not((sib1, sib2) in ibd_dict or (sib2, sib1) in ibd_dict):
ibd_dict[(sib1, sib2)] = flatten_seg_as_ibd0
return ibd_dict
def prepare_data(pedigree, phased_address, unphased_address, king_ibd = None, king_segs = None, snipar_ibd = None, bim_address = None, fam_address = None, chromosome = None, pedigree_nan = '0'):
"""Processes the non_gts required data for the imputation and returns it.
Outputs for used for the imputation have ascii bytes instead of strings.
Args:
pedigree : pd.DataFrame
The pedigree table. It contains 'FID', 'IID', 'FATHER_ID' and, 'MOTHER_ID' columns.
phased_address : str
Address of the phased bgen file (does not inlude '.bgen'). Only one of unphased_address and phased_address is neccessary.
unphased_address : str
Address of the bed file (does not inlude '.bed'). Only one of unphased_address and phased_address is neccessary.
king_ibd : pd.DataFrame, optional
A pandas dataframe containing IBD statuses for all SNPs.
This It has these columns: "chr", "ID1", "ID2", "IBDType", "StartSNP", "StopSNP".
Each line states an IBD segment between a pair on individuals. This can be generated using King software.
Either king inputs or snipar should be provided.
king_segs : pd.DataFrame, optional
A pandas dataframe containing IBD segments that have been processed.
This It has these columns: Segment, Chr, StartSNP, StopSNP
Each line states a segment that's been processed. This can be generated using King software.
Either king inputs or snipar should be provided.
snipar_ibd : pd.DataFrame, optional
A pandas dataframe containing IBD statuses for all SNPs.
This It has these columns: ID1, ID2, IBDType, Chr, start_coordinate, stop_coordinate
Each line states an IBD segment between a pair on individuals. This can be generated using snipar.
Either king inputs or snipar should be provided.
bim_address : str, optional
Address of the bim file if it's different from the address of the bed file. Does not include '.bim'.
chromosome: str, optional
Number of the chromosome that's going to be loaded.
pedigree_nan: str, optional
Value that's considered nan in the pedigree
Returns:
tuple(pandas.Dataframe, dict, numpy.ndarray, pandas.Dataframe, numpy.ndarray, numpy.ndarray)
Returns the data required for the imputation. This data is a tuple of multiple objects.
sibships: pandas.DataFrame
A pandas DataFrame with columns ['FID', 'FATHER_ID', 'MOTHER_ID', 'IID', 'has_father', 'has_mother', 'single_parent'] where IID columns is a list of the IIDs of individuals in that family.
It only contains families that have more than one child or only one parent.
ibd: pandas.DataFrame
A pandas DataFrame with columns "ID1", "ID2", 'segment'. The segments column is a list of IBD segments between ID1 and ID2.
Each segment consists of a start, an end, and an IBD status. The segment list is flattened meaning it's like [start0, end0, ibd_status0, start1, end1, ibd_status1, ...]
bim: pandas.DataFrame
A dataframe with these columns(dtype str): Chr id morgans coordinate allele1 allele2
chromosomes: str
A string containing all the chromosomes present in the data.
ped_ids: set
Set of ids of individuals with missing parents.
pedigree_output: np.array
Pedigree with added parental status.
"""
logging.info("For file "+str(phased_address)+";"+str(unphased_address)+": Finding which chromosomes")
if unphased_address:
if bim_address is None:
bim_address = unphased_address+'.bim'
bim = pd.read_csv(bim_address, delim_whitespace=True, header=None, names=["Chr", "id", "morgans", "coordinate", "allele1", "allele2"])
if fam_address is None:
fam_address = unphased_address+'.fam'
fam = pd.read_csv(fam_address, delim_whitespace=True, header=None, names=["FID", "IID", "PID", "MID", "SEX", "Phen"])
else:
if bim_address is None:
bim_address = phased_address+'.bgen'
if fam_address is None:
fam_address = phased_address+'.bgen'
bgen = read_bgen(bim_address, verbose=False)
bim = bgen["variants"].compute().rename(columns={"chrom":"Chr", "pos":"coordinate"})
#TODO this line should be replaced
bim["id"]=bim["rsid"]
if chromosome is None:
raise Exception("chromosome should be specified when using phased data")
bim["Chr"] = chromosome
bgen = read_bgen(bim_address, verbose=False)
bim = bgen["variants"].compute().rename(columns={"chrom":"Chr", "pos":"coordinate"})
#TODO this line should be replaced
bim["id"]=bim["rsid"]
if chromosome is None:
raise Exception("chromosome should be specified when using phased data")
bim["Chr"] = chromosome
fam = pd.DataFrame({"IID":read_bgen(fam_address)["samples"]})
chromosomes = bim["Chr"].unique().astype(int)
logging.info(f"with chromosomes {chromosomes} initializing non_gts data")
logging.info(f"with chromosomes {chromosomes} loading and filtering pedigree file ...")
#keeping individuals with no parents
pedigree["has_father"] = pedigree["FATHER_ID"].isin(pedigree["IID"]) & pedigree["FATHER_ID"].isin(fam["IID"])
pedigree["has_mother"] = pedigree["MOTHER_ID"].isin(pedigree["IID"]) & pedigree["MOTHER_ID"].isin(fam["IID"])
no_parent_pedigree = pedigree[~(pedigree["has_mother"] & pedigree["has_father"])]
#removing individual whose parents are nan
no_parent_pedigree = no_parent_pedigree[(no_parent_pedigree["MOTHER_ID"] != pedigree_nan) & (no_parent_pedigree["FATHER_ID"] != pedigree_nan)]
no_parent_pedigree[["FID", "IID", "FATHER_ID", "MOTHER_ID"]] = no_parent_pedigree[["FID", "IID", "FATHER_ID", "MOTHER_ID"]].astype("S")
ped_ids = set(no_parent_pedigree["IID"].tolist())
#finding siblings in each family
sibships = no_parent_pedigree.groupby(["FID", "FATHER_ID", "MOTHER_ID", "has_father", "has_mother"]).agg({'IID':lambda x: list(x)}).reset_index()
sibships["sib_count"] = sibships["IID"].apply(len)
sibships["single_parent"] = sibships["has_father"] ^ sibships["has_mother"]
sibships = sibships[(sibships["sib_count"]>1) | sibships["single_parent"]]
fids = set([i for i in sibships["FID"].values.tolist() if i.startswith(b"_")])
logging.info(f"with chromosomes {chromosomes} loading bim file ...")
logging.info(f"with chromosomes {chromosomes} loading and transforming ibd file ...")
if snipar_ibd is None:
ibd = preprocess_king(king_ibd, king_segs, bim, chromosomes, sibships)
else:
ibd = snipar_ibd.astype(str)
ibd[["IBDType", "start_coordinate", "stop_coordinate"]] = ibd[["IBDType", "start_coordinate", "stop_coordinate"]].astype(int)
#Adding location of start and end of each
chromosomes = chromosomes.astype(str)
if len(chromosomes)>1:
ibd = ibd[ibd["Chr"].isin(chromosomes)]
else:
ibd = ibd[ibd["Chr"]==chromosomes[0]]
#TODO cancel or generalize this
ibd['segment'] = ibd[['start_coordinate', 'stop_coordinate', "IBDType"]].values.tolist()
def create_seg_list(x):
elements = list(x)
result = []
for el in elements:
result = result+el
return result
ibd = ibd.groupby(["ID1", "ID2"]).agg({'segment':lambda x:create_seg_list(x)}).to_dict()["segment"]
logging.info(f"with chromosomes {chromosomes} loading genotype file ...")
logging.info(f"with chromosomes {chromosomes} initializing non_gts data done ...")
pedigree[["FID", "IID", "FATHER_ID", "MOTHER_ID"]] = pedigree[["FID", "IID", "FATHER_ID", "MOTHER_ID"]].astype(str)
pedigree_output = np.concatenate(([pedigree.columns.values.tolist()], pedigree.values))
return sibships, ibd, bim, chromosomes, ped_ids, pedigree_output
def prepare_gts(phased_address, unphased_address, bim, pedigree_output, ped_ids, chromosomes, start=None, end=None):
""" Processes the gts required data for the imputation and returns it.
Outputs for used for the imputation have ascii bytes instead of strings.
Args:
phased_address : str
Address of the phased bgen file (does not inlude '.bgen'). Only one of unphased_address and phased_address is neccessary.
unphased_address : str
Address of the bed file (does not inlude '.bed'). Only one of unphased_address and phased_address is neccessary.
bim: pandas.DataFrame
A dataframe with these columns(dtype str): Chr id morgans coordinate allele1 allele2
pedigree_output: np.array
Pedigree with added parental status.
ped_ids: set
Set of ids of individuals with missing parents.
chromosomes: str
A string containing all the chromosomes present in the data.
start : int, optional
This function can be used for preparing a slice of a chromosome. This is the location of the start of the slice.
end : int, optional
This function can be used for preparing a slice of a chromosome. This is the location of the end of the slice.
Returns:
tuple(np.array[signed char], np.array[signed char], str->int, np.array[int], np.array[float], dict)
phased_gts: np.array[signed char], optional
A three-dimensional array containing genotypes for all individuals, SNPs and, haplotypes respectively.
unphased_gts: np.array[signed char]
A two-dimensional array containing genotypes for all individuals and SNPs respectively.
iid_to_bed_index: str->int
A str->int dictionary mapping IIDs of people to their location in bed file.
pos: np.array[int]
A numpy array with the position of each SNP in the order of appearance in gts.
freqs: np.array[float]
Min allele frequency for all the SNPs present in the genotypes in that order.
hdf5_output_dict: dict
A dictionary whose values will be written in the imputation output under its keys.
"""
logging.info(f"with chromosomes {chromosomes} initializing gts data with start={start} end={end}")
phased_gts = None
unphased_gts = None
if unphased_address:
bim_as_csv = | pd.read_csv(unphased_address+".bim", delim_whitespace=True, header=None) | pandas.read_csv |
'''
OptiSS tool for optimizing spatial joining of big social media data
Arcpy 2.6 and Python 3
Local app that optimize spatial join of social media posts with regions layer.
Check read me file of repository for more details of how it works.
Thanks to the methodological development of Vuokko Heikinheimo in SOME project and the
method performance evaluation of <NAME> this app helps to researchers in the
enrichment process of social media posting history as first step
for Origin (home) Detection (country, county, municipality)
Author: <NAME>/Digital Geography Lab
Project: Borderspace Project
Date: 10-2021
'''
import streamlit as st
import pandas as pd
import glob
import os
import sys
import time
from datetime import datetime
import geopandas as gpd
from pyproj import CRS
from shapely.geometry import Point
import matplotlib.pyplot as plt
# directory
homefolder = os.getcwd()
# __________________FUNCTIONS___________________
def data_list():
'''Function that gives back a list of [ULRS of files, names of files] in folder app_data'''
files_list = glob.glob(os.path.join(homefolder, 'app_data/*.csv'))
names_list = [os.path.basename(file) for file in files_list]
return [files_list, names_list]
def read_data(selectbox_button):
'''Function that reads CSV file selected in selectionbox'''
path = os.path.join(homefolder, 'app_data/{}'.format(str(selectbox_button)))
dataset = pd.read_csv(path, sep=';')
return dataset
def read_geodata(selectbox_button):
'''Function that reads CSV file selected in selectionbox'''
path = os.path.join(homefolder, 'regions_layer/{}'.format(str(selectbox_button)))
dataset = gpd.read_file(path)
return dataset
def create_geomlist(dataframe, long_selection, lat_selection):
'''Function that gives back a list of geometries of input data'''
try:
geom_list = [Point(lon, lat) for lon, lat in
zip(dataframe[long_selection].to_list(),
dataframe[lat_selection].to_list())]
return geom_list
except:
st.warning('Select the correct columns!')
def plot_geodata(geodata_layer):
'''Function that plot the GADM layer - extra functionality'''
plt.style.use('seaborn')
fig, ax = plt.subplots()
geodata_layer.plot(ax=ax, facecolor="lightgray", edgecolor="black")
plt.axis('off')
return fig
# ______________________________________
st.title('Welcome to OptiSS 🧐')
st.header('Tool for optimizing spatial joining of big social media data')
st.write('Follow the next steps that help you in the optimization of the spatial join process. The app comes with a sample dataset of twitter posting history but you can add your own social media datasets and regions. Check instructions before using it.')
st.markdown('[How to use OptiSS?](https://github.com/DigitalGeographyLab/OptimizedSpatialJoin-tool)')
# 1. ______________________READ SOCIAL MEDIA DATASET_________________________
st.header('1. Upload your social media posting history dataset')
st.write('Dataset must be in **.csv** and delimited by **semicolons**')
filepath = st.selectbox('Files in <app_data> folder:', options=data_list()[1])
data = read_data(filepath)
columns = list(data.columns)
st.write('**Timestamp** column, and **User ID** column are used only for displaying info (optional columns) ')
datacol1, datacol2, datacol3, datacol4 = st.columns(4)
time_col = datacol1.selectbox('Timestamp column', options=columns)
long_col = datacol2.selectbox('Longitude', options=columns)
lat_col = datacol3.selectbox('Latitude', options=columns)
userid_col = datacol4.selectbox('User ID', options=columns)
# DATA
data = | pd.DataFrame(data[[time_col, long_col, lat_col, userid_col]]) | pandas.DataFrame |
from __future__ import division # brings in Python 3.0 mixed type calculation rules
import datetime
import inspect
import numpy as np
import numpy.testing as npt
import os.path
import pandas as pd
import sys
from tabulate import tabulate
import unittest
print("Python version: " + sys.version)
print("Numpy version: " + np.__version__)
from ..ted_exe import Ted
test = {}
class TestTed(unittest.TestCase):
"""
Unit tests for TED model.
"""
print("ted unittests conducted at " + str(datetime.datetime.today()))
def setUp(self):
"""
Setup routine for ted unit tests.
:return:
"""
pass
def tearDown(self):
"""
Teardown routine for ted unit tests.
:return:
"""
pass
# teardown called after each test
# e.g. maybe write test results to some text file
def create_ted_object(self):
# create empty pandas dataframes to create empty object for testing
df_empty = pd.DataFrame()
# create an empty ted object
ted_empty = Ted(df_empty, df_empty)
return ted_empty
def test_daily_app_flag(self):
"""
:description generates a daily flag to denote whether a pesticide is applied that day or not (1 - applied, 0 - anot applied)
:param num_apps; number of applications
:param app_interval; number of days between applications
:NOTE in TED model there are two application scenarios per simulation (one for a min/max exposure scenario)
(this is why the parameters are passed in)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='bool')
result = pd.Series([[]], dtype='bool')
expected_results = [[True, False, False, True, False, False, True, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False],
[True, False, False, False, False, False, False, True, False, False,
False, False, False, False, True, False, False, False, False, False,
False, True, False, False, False, False, False, False, True, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False],
[True, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False]]
try:
# internal model constants
ted_empty.num_simulation_days = 366
# input varialbles that change per simulation
ted_empty.num_apps_min = pd.Series([3, 5, 1])
ted_empty.app_interval_min = pd.Series([3, 7, 1])
for i in range (3):
result[i] = ted_empty.daily_app_flag(ted_empty.num_apps_min[i], ted_empty.app_interval_min[i])
np.array_equal(result[i],expected_results[i])
finally:
for i in range(3):
tab = [result[i], expected_results[i]]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_set_drift_parameters(self):
"""
:description provides parmaeter values to use when calculating distances from edge of application source area to
concentration of interest
:param app_method; application method (aerial/ground/airblast)
:param boom_hgt; height of boom (low/high) - 'NA' if not ground application
:param drop_size; droplet spectrum for application (see list below for aerial/ground - 'NA' if airblast)
:param param_a (result[i][0]; parameter a for spray drift distance calculation
:param param_b (result[i][1]; parameter b for spray drift distance calculation
:param param_c (result[i][2]; parameter c for spray drift distance calculation
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series(9*[[0.,0.,0.]], dtype='float')
expected_results = [[0.0292,0.822,0.6539],[0.043,1.03,0.5],[0.0721,1.0977,0.4999],[0.1014,1.1344,0.4999],
[1.0063,0.9998,1.0193],[5.5513,0.8523,1.0079],[0.1913,1.2366,1.0552],
[2.4154,0.9077,1.0128],[0.0351,2.4586,0.4763]]
try:
# input variable that change per simulation
ted_empty.app_method_min = pd.Series(['aerial','aerial','aerial','aerial','ground','ground','ground','ground','airblast'])
ted_empty.boom_hgt_min = pd.Series(['','','','','low','low','high','high',''])
ted_empty.droplet_spec_min = pd.Series(['very_fine_to_fine','fine_to_medium','medium_to_coarse','coarse_to_very_coarse',
'very_fine_to_fine','fine_to_medium-coarse','very_fine_to_fine','fine_to_medium-coarse',''])
for i in range (9): # test that the nine combinations are accessed
result[i][0], result[i][1], result[i][2] = ted_empty.set_drift_parameters(ted_empty.app_method_min[i], ted_empty.boom_hgt_min[i], ted_empty.droplet_spec_min[i])
npt.assert_allclose(result[i],expected_results[i],rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
for i in range (9):
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_drift_distance_calc(self):
"""
:description provides parmaeter values to use when calculating distances from edge of application source area to
concentration of interest
:param app_rate_frac; fraction of active ingredient application rate equivalent to the health threshold of concern
:param param_a; parameter a for spray drift distance calculation
:param param_b; parameter b for spray drift distance calculation
:param param_c; parameter c for spray drift distance calculation
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([], dtype='float')
expected_results = [302.050738, 11.484378, 0.0]
try:
# internal model constants
ted_empty.max_distance_from_source = 1000.
# input variable that is internally specified from among options
param_a = pd.Series([0.0292, 0.1913, 0.0351], dtype='float')
param_b = pd.Series([0.822, 1.2366, 2.4586], dtype='float')
param_c = pd.Series([0.6539, 1.0522, 0.4763], dtype='float')
# internally calculated variables
app_rate_frac = pd.Series([0.1,0.25,0.88], dtype='float')
for i in range(3):
result[i] = ted_empty.drift_distance_calc(app_rate_frac[i], param_a[i], param_b[i], param_c[i], ted_empty.max_distance_from_source)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_timestep(self):
"""
:description unittest for function conc_timestep:
:param conc_ini; initial concentration for day (actually previous day concentration)
:param half_life; halflife of pesiticde representing either foliar dissipation halflife or aerobic soil metabolism halflife (days)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([], dtype='float')
expected_results = [9.803896e-4, 0.106066, 1.220703e-3]
try:
# input variable that is internally specified from among options
half_life = pd.Series([35., 2., .1])
# internally calculated variables
conc_ini = pd.Series([1.e-3, 0.15, 1.25])
result = ted_empty.conc_timestep(conc_ini, half_life)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_initial_canopy_air(self):
"""
:description calculates initial (1st application day) air concentration of pesticide within plant canopy (ug/mL)
:param application rate; active ingredient application rate (lbs a.i./acre)
:param mass_pest; mass of pesticide on treated field (mg)
:param volume_air; volume of air in 1 hectare to a height equal to the height of the crop canopy
:param biotransfer_factor; the volume_based biotransfer factor; function of Henry's las constant and Log Kow
NOTE: this represents Eq 24 (and supporting eqs 25,26,27) of Attachment 1-7 of 'Biological Evaluation Chapters for Diazinon ESA Assessment'
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([], dtype='float')
expected_results = [1.152526e-7, 1.281910e-5, 7.925148e-8]
try:
# internal model constants
ted_empty.hectare_to_acre = 2.47105
ted_empty.gms_to_mg = 1000.
ted_empty.lbs_to_gms = 453.592
ted_empty.crop_hgt = 1. #m
ted_empty.hectare_area = 10000. #m2
ted_empty.m3_to_liters = 1000.
ted_empty.mass_plant = 25000. # kg/hectare
ted_empty.density_plant = 0.77 #kg/L
# input variables that change per simulation
ted_empty.log_kow = pd.Series([2., 4., 6.], dtype='float')
ted_empty.log_unitless_hlc = pd.Series([-5., -3., -4.], dtype='float')
ted_empty.app_rate_min = pd.Series([1.e-3, 0.15, 1.25]) # lbs a.i./acre
for i in range(3): #let's do 3 iterations
result[i] = ted_empty.conc_initial_canopy_air(i, ted_empty.app_rate_min[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_initial_soil_h2o(self):
"""
:description calculates initial (1st application day) concentration in soil pore water or surface puddles(ug/L)
:param application rate; active ingredient application rate (lbs a.i./acre)
:param soil_depth
:param soil_bulk_density; kg/L
:param porosity; soil porosity
:param frac_org_cont_soil; fraction organic carbon in soil
:param app_rate_conv; conversion factor used to convert units of application rate (lbs a.i./acre) to (ug a.i./mL)
:NOTE this represents Eq 3 of Attachment 1-7 of 'Biological Evaluation Chapters for Diazinon ESA Assessment'
(the depth of water in this equation is assumed to be 0.0 and therefore not included here)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([], dtype='float')
expected_results = [5.067739e-3, 1.828522, 6.13194634]
try:
# internal model constants
ted_empty.app_rate_conv1 = 11.2
ted_empty.soil_depth = 2.6 # cm
ted_empty.soil_porosity = 0.35
ted_empty.soil_bulk_density = 1.5 # kg/L
ted_empty.soil_foc = 0.015
ted_empty.h2o_depth_soil = 0.0
ted_empty.h2o_depth_puddles = 1.3
# internally specified variable
ted_empty.water_type = pd.Series(["puddles", "pore_water", "puddles"])
# input variables that change per simulation
ted_empty.koc = pd.Series([1.e-3, 0.15, 1.25])
ted_empty.app_rate_min = pd.Series([1.e-3, 0.15, 1.25]) # lbs a.i./acre
for i in range(3): #let's do 3 iterations
result[i] = ted_empty.conc_initial_soil_h2o(i, ted_empty.app_rate_min[i], ted_empty.water_type[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_initial_plant(self):
"""
:description calculates initial (1st application day) dietary based EEC (residue concentration) from pesticide application
(mg/kg-diet for food items including short/tall grass, broadleaf plants, seeds/fruit/pods, and above ground arthropods)
:param application rate; active ingredient application rate (lbs a.i./acre)
:param food_multiplier; factor by which application rate of active ingredient is multiplied to estimate dietary based EECs
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([], dtype='float')
expected_results = [1.5e-2, 22.5, 300.]
try:
# input variables that change per simulation
ted_empty.food_multiplier = pd.Series([15., 150., 240.])
ted_empty.app_rate_min = pd.Series([1.e-3, 0.15, 1.25]) # lbs a.i./acre
result = ted_empty.conc_initial_plant(ted_empty.app_rate_min, ted_empty.food_multiplier)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_animal_dietary_intake(self):
"""
:description generates pesticide intake via consumption of diet containing pesticide for animals (mammals, birds, amphibians, reptiles)
:param a1; coefficient of allometric expression
:param b1; exponent of allometric expression
:param body_wgt; body weight of species (g)
:param frac_h2o; fraction of water in food item
# this represents Eqs 6 of Attachment 1-7 of 'Biological Evaluation Chapters for Diazinon ESA Assessment'
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([], dtype='float')
expected_results = [8.050355, 3.507997, 64.92055]
try:
# internally specified parameters
a1 = pd.Series([.398, .013, .621], dtype='float')
b1 = pd.Series([.850, .773, .564], dtype='float')
# variables from external database
body_wgt = pd.Series([10., 120., 450.], dtype='float')
frac_h2o = pd.Series([0.65, 0.85, 0.7], dtype='float')
result = ted_empty.animal_dietary_intake(a1, b1, body_wgt, frac_h2o)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_animal_dietary_dose(self):
"""
:description generates pesticide dietary-based dose for animals (mammals, birds, amphibians, reptiles)
:param body_wgt; body weight of species (g)
:param frac_h2o; fraction of water in food item
:param food_intake_rate; ingestion rate of food item (g/day-ww)
:param food_pest_conc; pesticide concentration in food item (mg a.i./kg)
# this represents Eqs 5 of Attachment 1-7 of 'Biological Evaluation Chapters for Diazinon ESA Assessment'
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([], dtype='float')
expected_results = [3.e-4, 3.45e-2, 4.5]
try:
# variables from external database
body_wgt = pd.Series([10., 120., 450.], dtype='float')
# internally calculated variables
food_intake_rate = pd.Series([3., 12., 45.], dtype='float')
food_pest_conc = pd.Series([1.e-3, 3.45e-1, 4.50e+1], dtype='float')
result = ted_empty.animal_dietary_dose(body_wgt, food_intake_rate, food_pest_conc)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_daily_plant_timeseries(self):
"""
:description generates annual timeseries of daily pesticide residue concentration (EECs) for a food item
:param i; simulation number/index
:param application rate; active ingredient application rate (lbs a.i./acre)
:param food_multiplier; factor by which application rate of active ingredient is multiplied to estimate dietary based EECs
:param daily_flag; daily flag denoting if pesticide is applied (0 - not applied, 1 - applied)
:Notes # calculations are performed daily from day of first application (assumed day 0) through the last day of a year
# note: day numbers are synchronized with 0-based array indexing; thus the year does not have a calendar specific
# assoication, rather it is one year from the day of 1st pesticide application
#expected results generated by running OPP spreadsheet with appropriate inputs
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([], dtype='float')
expected_results = [[2.700000E+00,2.578072E+00,2.461651E+00,5.050487E+00,4.822415E+00,4.604642E+00,7.096704E+00,
6.776228E+00,6.470225E+00,6.178040E+00,5.899049E+00,5.632658E+00,5.378296E+00,5.135421E+00,
4.903513E+00,4.682078E+00,4.470643E+00,4.268756E+00,4.075986E+00,3.891921E+00,3.716168E+00,
3.548352E+00,3.388114E+00,3.235112E+00,3.089020E+00,2.949525E+00,2.816329E+00,2.689148E+00,
2.567710E+00,2.451757E+00,2.341039E+00,2.235322E+00,2.134378E+00,2.037993E+00,1.945961E+00,
1.858084E+00,1.774176E+00,1.694057E+00,1.617556E+00,1.544510E+00,1.474762E+00,1.408164E+00,
1.344574E+00,1.283855E+00,1.225878E+00,1.170520E+00,1.117661E+00,1.067189E+00,1.018997E+00,
9.729803E-01,9.290420E-01,8.870880E-01,8.470285E-01,8.087781E-01,7.722549E-01,7.373812E-01,
7.040822E-01,6.722870E-01,6.419276E-01,6.129392E-01,5.852598E-01,5.588304E-01,5.335945E-01,
5.094983E-01,4.864901E-01,4.645210E-01,4.435440E-01,4.235143E-01,4.043890E-01,3.861275E-01,
3.686906E-01,3.520411E-01,3.361435E-01,3.209638E-01,3.064696E-01,2.926299E-01,2.794152E-01,
2.667973E-01,2.547491E-01,2.432451E-01,2.322605E-01,2.217720E-01,2.117571E-01,2.021945E-01,
1.930637E-01,1.843453E-01,1.760206E-01,1.680717E-01,1.604819E-01,1.532348E-01,1.463150E-01,
1.397076E-01,1.333986E-01,1.273746E-01,1.216225E-01,1.161303E-01,1.108860E-01,1.058786E-01,
1.010973E-01,9.653187E-02,9.217264E-02,8.801028E-02,8.403587E-02,8.024095E-02,7.661739E-02,
7.315748E-02,6.985380E-02,6.669932E-02,6.368728E-02,6.081127E-02,5.806513E-02,5.544300E-02,
5.293928E-02,5.054863E-02,4.826593E-02,4.608632E-02,4.400514E-02,4.201794E-02,4.012047E-02,
3.830870E-02,3.657874E-02,3.492690E-02,3.334966E-02,3.184364E-02,3.040563E-02,2.903256E-02,
2.772150E-02,2.646964E-02,2.527431E-02,2.413297E-02,2.304316E-02,2.200257E-02,2.100897E-02,
2.006024E-02,1.915435E-02,1.828937E-02,1.746345E-02,1.667483E-02,1.592182E-02,1.520282E-02,
1.451628E-02,1.386075E-02,1.323482E-02,1.263716E-02,1.206648E-02,1.152158E-02,1.100128E-02,
1.050448E-02,1.003012E-02,9.577174E-03,9.144684E-03,8.731725E-03,8.337415E-03,7.960910E-03,
7.601408E-03,7.258141E-03,6.930375E-03,6.617410E-03,6.318579E-03,6.033242E-03,5.760790E-03,
5.500642E-03,5.252242E-03,5.015059E-03,4.788587E-03,4.572342E-03,4.365863E-03,4.168707E-03,
3.980455E-03,3.800704E-03,3.629070E-03,3.465187E-03,3.308705E-03,3.159289E-03,3.016621E-03,
2.880395E-03,2.750321E-03,2.626121E-03,2.507530E-03,2.394294E-03,2.286171E-03,2.182931E-03,
2.084354E-03,1.990228E-03,1.900352E-03,1.814535E-03,1.732594E-03,1.654353E-03,1.579645E-03,
1.508310E-03,1.440198E-03,1.375161E-03,1.313061E-03,1.253765E-03,1.197147E-03,1.143086E-03,
1.091466E-03,1.042177E-03,9.951138E-04,9.501760E-04,9.072676E-04,8.662969E-04,8.271763E-04,
7.898223E-04,7.541552E-04,7.200988E-04,6.875803E-04,6.565303E-04,6.268824E-04,5.985734E-04,
5.715428E-04,5.457328E-04,5.210884E-04,4.975569E-04,4.750880E-04,4.536338E-04,4.331484E-04,
4.135881E-04,3.949112E-04,3.770776E-04,3.600494E-04,3.437901E-04,3.282651E-04,3.134412E-04,
2.992867E-04,2.857714E-04,2.728664E-04,2.605442E-04,2.487784E-04,2.375440E-04,2.268169E-04,
2.165742E-04,2.067941E-04,1.974556E-04,1.885388E-04,1.800247E-04,1.718951E-04,1.641326E-04,
1.567206E-04,1.496433E-04,1.428857E-04,1.364332E-04,1.302721E-04,1.243892E-04,1.187720E-04,
1.134085E-04,1.082871E-04,1.033970E-04,9.872779E-05,9.426940E-05,9.001235E-05,8.594753E-05,
8.206628E-05,7.836030E-05,7.482167E-05,7.144285E-05,6.821660E-05,6.513605E-05,6.219461E-05,
5.938600E-05,5.670423E-05,5.414355E-05,5.169852E-05,4.936390E-05,4.713470E-05,4.500617E-05,
4.297377E-05,4.103314E-05,3.918015E-05,3.741084E-05,3.572142E-05,3.410830E-05,3.256803E-05,
3.109731E-05,2.969300E-05,2.835211E-05,2.707178E-05,2.584926E-05,2.468195E-05,2.356735E-05,
2.250309E-05,2.148688E-05,2.051657E-05,1.959007E-05,1.870542E-05,1.786071E-05,1.705415E-05,
1.628401E-05,1.554865E-05,1.484650E-05,1.417606E-05,1.353589E-05,1.292463E-05,1.234097E-05,
1.178368E-05,1.125154E-05,1.074344E-05,1.025829E-05,9.795037E-06,9.352709E-06,8.930356E-06,
8.527075E-06,8.142006E-06,7.774326E-06,7.423250E-06,7.088028E-06,6.767944E-06,6.462315E-06,
6.170487E-06,5.891838E-06,5.625772E-06,5.371721E-06,5.129143E-06,4.897519E-06,4.676355E-06,
4.465178E-06,4.263538E-06,4.071003E-06,3.887163E-06,3.711625E-06,3.544014E-06,3.383972E-06,
3.231157E-06,3.085243E-06,2.945919E-06,2.812886E-06,2.685860E-06,2.564571E-06,2.448759E-06,
2.338177E-06,2.232589E-06,2.131769E-06,2.035502E-06,1.943582E-06,1.855813E-06,1.772007E-06,
1.691986E-06,1.615579E-06,1.542622E-06,1.472959E-06,1.406443E-06,1.342930E-06,1.282286E-06,
1.224380E-06,1.169089E-06,1.116294E-06,1.065884E-06,1.017751E-06,9.717908E-07,9.279063E-07,
8.860035E-07,8.459930E-07,8.077893E-07,7.713109E-07,7.364797E-07,7.032215E-07,6.714651E-07,
6.411428E-07,6.121898E-07,5.845443E-07,5.581472E-07,5.329422E-07,5.088754E-07,4.858954E-07,
4.639531E-07,4.430018E-07],
[5.500000E+01,5.349602E+01,5.203317E+01,5.061032E+01,4.922638E+01,4.788028E+01,4.657099E+01,
1.002975E+02,9.755487E+01,9.488722E+01,9.229253E+01,8.976878E+01,8.731405E+01,8.492644E+01,
1.376041E+02,1.338413E+02,1.301814E+02,1.266216E+02,1.231591E+02,1.197913E+02,1.165156E+02,
1.683295E+02,1.637265E+02,1.592494E+02,1.548947E+02,1.506591E+02,1.465394E+02,1.425322E+02,
1.936347E+02,1.883397E+02,1.831896E+02,1.781802E+02,1.733079E+02,1.685688E+02,1.639593E+02,
1.594758E+02,1.551149E+02,1.508733E+02,1.467476E+02,1.427348E+02,1.388317E+02,1.350354E+02,
1.313428E+02,1.277512E+02,1.242579E+02,1.208600E+02,1.175551E+02,1.143406E+02,1.112139E+02,
1.081728E+02,1.052148E+02,1.023377E+02,9.953925E+01,9.681734E+01,9.416987E+01,9.159479E+01,
8.909012E+01,8.665395E+01,8.428439E+01,8.197963E+01,7.973789E+01,7.755746E+01,7.543664E+01,
7.337382E+01,7.136741E+01,6.941587E+01,6.751769E+01,6.567141E+01,6.387562E+01,6.212894E+01,
6.043002E+01,5.877756E+01,5.717028E+01,5.560696E+01,5.408638E+01,5.260739E+01,5.116884E+01,
4.976962E+01,4.840867E+01,4.708493E+01,4.579739E+01,4.454506E+01,4.332697E+01,4.214220E+01,
4.098981E+01,3.986895E+01,3.877873E+01,3.771832E+01,3.668691E+01,3.568371E+01,3.470793E+01,
3.375884E+01,3.283571E+01,3.193781E+01,3.106447E+01,3.021501E+01,2.938878E+01,2.858514E+01,
2.780348E+01,2.704319E+01,2.630369E+01,2.558442E+01,2.488481E+01,2.420434E+01,2.354247E+01,
2.289870E+01,2.227253E+01,2.166349E+01,2.107110E+01,2.049491E+01,1.993447E+01,1.938936E+01,
1.885916E+01,1.834346E+01,1.784185E+01,1.735397E+01,1.687942E+01,1.641785E+01,1.596891E+01,
1.553224E+01,1.510751E+01,1.469439E+01,1.429257E+01,1.390174E+01,1.352160E+01,1.315185E+01,
1.279221E+01,1.244241E+01,1.210217E+01,1.177123E+01,1.144935E+01,1.113627E+01,1.083174E+01,
1.053555E+01,1.024745E+01,9.967237E+00,9.694682E+00,9.429580E+00,9.171728E+00,8.920927E+00,
8.676983E+00,8.439711E+00,8.208926E+00,7.984453E+00,7.766118E+00,7.553753E+00,7.347195E+00,
7.146286E+00,6.950870E+00,6.760798E+00,6.575924E+00,6.396105E+00,6.221203E+00,6.051084E+00,
5.885617E+00,5.724674E+00,5.568133E+00,5.415872E+00,5.267774E+00,5.123727E+00,4.983618E+00,
4.847341E+00,4.714790E+00,4.585864E+00,4.460463E+00,4.338492E+00,4.219855E+00,4.104463E+00,
3.992226E+00,3.883059E+00,3.776876E+00,3.673597E+00,3.573143E+00,3.475435E+00,3.380399E+00,
3.287962E+00,3.198052E+00,3.110601E+00,3.025542E+00,2.942808E+00,2.862337E+00,2.784066E+00,
2.707936E+00,2.633887E+00,2.561863E+00,2.491809E+00,2.423670E+00,2.357395E+00,2.292932E+00,
2.230232E+00,2.169246E+00,2.109928E+00,2.052232E+00,1.996113E+00,1.941529E+00,1.888438E+00,
1.836799E+00,1.786571E+00,1.737718E+00,1.690200E+00,1.643981E+00,1.599026E+00,1.555301E+00,
1.512771E+00,1.471404E+00,1.431169E+00,1.392033E+00,1.353968E+00,1.316944E+00,1.280932E+00,
1.245905E+00,1.211835E+00,1.178698E+00,1.146466E+00,1.115116E+00,1.084623E+00,1.054964E+00,
1.026116E+00,9.980566E-01,9.707647E-01,9.442191E-01,9.183994E-01,8.932857E-01,8.688588E-01,
8.450998E-01,8.219905E-01,7.995131E-01,7.776504E-01,7.563855E-01,7.357021E-01,7.155843E-01,
6.960166E-01,6.769840E-01,6.584718E-01,6.404659E-01,6.229523E-01,6.059176E-01,5.893488E-01,
5.732330E-01,5.575579E-01,5.423115E-01,5.274819E-01,5.130579E-01,4.990283E-01,4.853824E-01,
4.721095E-01,4.591997E-01,4.466428E-01,4.344294E-01,4.225499E-01,4.109952E-01,3.997565E-01,
3.888252E-01,3.781927E-01,3.678510E-01,3.577921E-01,3.480083E-01,3.384920E-01,3.292359E-01,
3.202329E-01,3.114761E-01,3.029588E-01,2.946744E-01,2.866165E-01,2.787790E-01,2.711557E-01,
2.637410E-01,2.565290E-01,2.495142E-01,2.426912E-01,2.360548E-01,2.295998E-01,2.233214E-01,
2.172147E-01,2.112749E-01,2.054976E-01,1.998783E-01,1.944126E-01,1.890964E-01,1.839255E-01,
1.788961E-01,1.740041E-01,1.692460E-01,1.646180E-01,1.601165E-01,1.557381E-01,1.514794E-01,
1.473372E-01,1.433082E-01,1.393895E-01,1.355779E-01,1.318705E-01,1.282645E-01,1.247571E-01,
1.213456E-01,1.180274E-01,1.147999E-01,1.116607E-01,1.086073E-01,1.056375E-01,1.027488E-01,
9.993914E-02,9.720630E-02,9.454818E-02,9.196276E-02,8.944803E-02,8.700207E-02,8.462300E-02,
8.230898E-02,8.005823E-02,7.786904E-02,7.573970E-02,7.366860E-02,7.165412E-02,6.969474E-02,
6.778893E-02,6.593524E-02,6.413224E-02,6.237854E-02,6.067279E-02,5.901369E-02,5.739996E-02,
5.583036E-02,5.430367E-02,5.281874E-02,5.137440E-02,4.996957E-02,4.860315E-02,4.727409E-02,
4.598138E-02,4.472402E-02,4.350104E-02,4.231150E-02,4.115449E-02,4.002912E-02,3.893452E-02,
3.786985E-02,3.683430E-02,3.582706E-02,3.484737E-02,3.389447E-02,3.296762E-02,3.206612E-02,
3.118927E-02,3.033640E-02,2.950685E-02,2.869998E-02,2.791518E-02,2.715184E-02,2.640937E-02,
2.568720E-02,2.498478E-02,2.430157E-02,2.363705E-02,2.299069E-02,2.236201E-02,2.175052E-02,
2.115575E-02,2.057724E-02,2.001456E-02,1.946726E-02,1.893493E-02,1.841715E-02,1.791353E-02,
1.742368E-02,1.694723E-02],
[3.000000E+02,2.941172E+02,2.883497E+02,2.826954E+02,2.771519E+02,2.717171E+02,2.663889E+02,
2.611652E+02,2.560439E+02,2.510230E+02,2.461006E+02,2.412747E+02,2.365435E+02,2.319050E+02,
2.273575E+02,2.228991E+02,2.185282E+02,2.142430E+02,2.100418E+02,2.059231E+02,2.018850E+02,
1.979262E+02,1.940450E+02,1.902399E+02,1.865094E+02,1.828520E+02,1.792664E+02,1.757511E+02,
1.723048E+02,1.689260E+02,1.656134E+02,1.623658E+02,1.591820E+02,1.560605E+02,1.530002E+02,
1.500000E+02,1.470586E+02,1.441749E+02,1.413477E+02,1.385759E+02,1.358585E+02,1.331944E+02,
1.305826E+02,1.280219E+02,1.255115E+02,1.230503E+02,1.206374E+02,1.182717E+02,1.159525E+02,
1.136787E+02,1.114496E+02,1.092641E+02,1.071215E+02,1.050209E+02,1.029615E+02,1.009425E+02,
9.896309E+01,9.702249E+01,9.511994E+01,9.325469E+01,9.142602E+01,8.963322E+01,8.787556E+01,
8.615238E+01,8.446298E+01,8.280671E+01,8.118292E+01,7.959098E+01,7.803025E+01,7.650012E+01,
7.500000E+01,7.352930E+01,7.208743E+01,7.067384E+01,6.928797E+01,6.792927E+01,6.659722E+01,
6.529129E+01,6.401097E+01,6.275575E+01,6.152515E+01,6.031868E+01,5.913587E+01,5.797625E+01,
5.683937E+01,5.572479E+01,5.463206E+01,5.356076E+01,5.251046E+01,5.148076E+01,5.047126E+01,
4.948155E+01,4.851124E+01,4.755997E+01,4.662735E+01,4.571301E+01,4.481661E+01,4.393778E+01,
4.307619E+01,4.223149E+01,4.140336E+01,4.059146E+01,3.979549E+01,3.901512E+01,3.825006E+01,
3.750000E+01,3.676465E+01,3.604372E+01,3.533692E+01,3.464398E+01,3.396464E+01,3.329861E+01,
3.264565E+01,3.200548E+01,3.137788E+01,3.076258E+01,3.015934E+01,2.956793E+01,2.898813E+01,
2.841969E+01,2.786239E+01,2.731603E+01,2.678038E+01,2.625523E+01,2.574038E+01,2.523563E+01,
2.474077E+01,2.425562E+01,2.377998E+01,2.331367E+01,2.285651E+01,2.240830E+01,2.196889E+01,
2.153809E+01,2.111575E+01,2.070168E+01,2.029573E+01,1.989774E+01,1.950756E+01,1.912503E+01,
1.875000E+01,1.838232E+01,1.802186E+01,1.766846E+01,1.732199E+01,1.698232E+01,1.664931E+01,
1.632282E+01,1.600274E+01,1.568894E+01,1.538129E+01,1.507967E+01,1.478397E+01,1.449406E+01,
1.420984E+01,1.393120E+01,1.365801E+01,1.339019E+01,1.312762E+01,1.287019E+01,1.261781E+01,
1.237039E+01,1.212781E+01,1.188999E+01,1.165684E+01,1.142825E+01,1.120415E+01,1.098445E+01,
1.076905E+01,1.055787E+01,1.035084E+01,1.014787E+01,9.948872E+00,9.753781E+00,9.562515E+00,
9.375000E+00,9.191162E+00,9.010929E+00,8.834230E+00,8.660996E+00,8.491159E+00,8.324653E+00,
8.161412E+00,8.001371E+00,7.844469E+00,7.690644E+00,7.539835E+00,7.391984E+00,7.247031E+00,
7.104921E+00,6.965598E+00,6.829007E+00,6.695094E+00,6.563808E+00,6.435095E+00,6.308907E+00,
6.185193E+00,6.063905E+00,5.944996E+00,5.828418E+00,5.714127E+00,5.602076E+00,5.492223E+00,
5.384524E+00,5.278936E+00,5.175420E+00,5.073933E+00,4.974436E+00,4.876890E+00,4.781258E+00,
4.687500E+00,4.595581E+00,4.505464E+00,4.417115E+00,4.330498E+00,4.245580E+00,4.162326E+00,
4.080706E+00,4.000686E+00,3.922235E+00,3.845322E+00,3.769918E+00,3.695992E+00,3.623516E+00,
3.552461E+00,3.482799E+00,3.414504E+00,3.347547E+00,3.281904E+00,3.217548E+00,3.154454E+00,
3.092597E+00,3.031953E+00,2.972498E+00,2.914209E+00,2.857063E+00,2.801038E+00,2.746111E+00,
2.692262E+00,2.639468E+00,2.587710E+00,2.536966E+00,2.487218E+00,2.438445E+00,2.390629E+00,
2.343750E+00,2.297790E+00,2.252732E+00,2.208558E+00,2.165249E+00,2.122790E+00,2.081163E+00,
2.040353E+00,2.000343E+00,1.961117E+00,1.922661E+00,1.884959E+00,1.847996E+00,1.811758E+00,
1.776230E+00,1.741400E+00,1.707252E+00,1.673774E+00,1.640952E+00,1.608774E+00,1.577227E+00,
1.546298E+00,1.515976E+00,1.486249E+00,1.457105E+00,1.428532E+00,1.400519E+00,1.373056E+00,
1.346131E+00,1.319734E+00,1.293855E+00,1.268483E+00,1.243609E+00,1.219223E+00,1.195314E+00,
1.171875E+00,1.148895E+00,1.126366E+00,1.104279E+00,1.082625E+00,1.061395E+00,1.040582E+00,
1.020176E+00,1.000171E+00,9.805587E-01,9.613305E-01,9.424794E-01,9.239979E-01,9.058789E-01,
8.881152E-01,8.706998E-01,8.536259E-01,8.368868E-01,8.204760E-01,8.043869E-01,7.886134E-01,
7.731492E-01,7.579882E-01,7.431245E-01,7.285523E-01,7.142658E-01,7.002595E-01,6.865278E-01,
6.730654E-01,6.598670E-01,6.469274E-01,6.342416E-01,6.218045E-01,6.096113E-01,5.976572E-01,
5.859375E-01,5.744476E-01,5.631831E-01,5.521394E-01,5.413123E-01,5.306975E-01,5.202908E-01,
5.100882E-01,5.000857E-01,4.902793E-01,4.806652E-01,4.712397E-01,4.619990E-01,4.529395E-01,
4.440576E-01,4.353499E-01,4.268129E-01,4.184434E-01,4.102380E-01,4.021935E-01,3.943067E-01,
3.865746E-01,3.789941E-01,3.715622E-01,3.642761E-01,3.571329E-01,3.501297E-01,3.432639E-01,
3.365327E-01,3.299335E-01,3.234637E-01,3.171208E-01,3.109023E-01,3.048056E-01,2.988286E-01,
2.929687E-01,2.872238E-01,2.815915E-01,2.760697E-01,2.706561E-01,2.653487E-01,2.601454E-01,
2.550441E-01,2.500429E-01,2.451397E-01,2.403326E-01,2.356198E-01,2.309995E-01,2.264697E-01,
2.220288E-01,2.176749E-01]]
try:
# internal model constants
ted_empty.num_simulation_days = 366
# internally specified variable (from internal database)
food_multiplier = pd.Series([15., 110., 240.])
# input variables that change per simulation
ted_empty.foliar_diss_hlife = pd.Series([15., 25., 35.])
ted_empty.app_rate_min = pd.Series([0.18, 0.5, 1.25]) # lbs a.i./acre
# application scenarios generated from 'daily_app_flag' tests and reused here
daily_flag = pd.Series([[True, False, False, True, False, False, True, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False],
[True, False, False, False, False, False, False, True, False, False,
False, False, False, False, True, False, False, False, False, False,
False, True, False, False, False, False, False, False, True, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False],
[True, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False]], dtype='bool')
for i in range(3):
result[i] = ted_empty.daily_plant_timeseries(i, ted_empty.app_rate_min[i], food_multiplier[i], daily_flag[i])
npt.assert_allclose(result[i],expected_results[i],rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
for i in range(3):
tab = [result[i], expected_results[i]]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_daily_soil_h2o_timeseries(self):
"""
:description generates annual timeseries of daily pesticide concentrations in soil pore water and surface puddles
:param i; simulation number/index
:param application rate; active ingredient application rate (lbs a.i./acre)
:param food_multiplier; factor by which application rate of active ingredient is multiplied to estimate dietary based EECs
:param daily_flag; daily flag denoting if pesticide is applied (0 - not applied, 1 - applied)
:param water_type; type of water (pore water or surface puddles)
:Notes # calculations are performed daily from day of first application (assumed day 0) through the last day of a year
# note: day numbers are synchronized with 0-based array indexing; thus the year does not have a calendar specific
# assoication, rather it is one year from the day of 1st pesticide application
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([], dtype='float')
expected_results = [[2.235571E-02,2.134616E-02,2.038220E-02,4.181749E-02,3.992908E-02,3.812594E-02,
5.875995E-02,5.610644E-02,5.357277E-02,5.115350E-02,4.884349E-02,4.663780E-02,
4.453171E-02,4.252073E-02,4.060056E-02,3.876711E-02,3.701645E-02,3.534484E-02,
3.374873E-02,3.222469E-02,3.076947E-02,2.937997E-02,2.805322E-02,2.678638E-02,
2.557675E-02,2.442175E-02,2.331890E-02,2.226586E-02,2.126037E-02,2.030028E-02,
1.938355E-02,1.850822E-02,1.767242E-02,1.687436E-02,1.611234E-02,1.538474E-02,
1.468999E-02,1.402661E-02,1.339319E-02,1.278838E-02,1.221087E-02,1.165945E-02,
1.113293E-02,1.063018E-02,1.015014E-02,9.691777E-03,9.254112E-03,8.836211E-03,
8.437182E-03,8.056172E-03,7.692368E-03,7.344993E-03,7.013305E-03,6.696596E-03,
6.394188E-03,6.105437E-03,5.829725E-03,5.566464E-03,5.315091E-03,5.075070E-03,
4.845888E-03,4.627056E-03,4.418105E-03,4.218591E-03,4.028086E-03,3.846184E-03,
3.672497E-03,3.506653E-03,3.348298E-03,3.197094E-03,3.052718E-03,2.914863E-03,
2.783232E-03,2.657546E-03,2.537535E-03,2.422944E-03,2.313528E-03,2.209053E-03,
2.109295E-03,2.014043E-03,1.923092E-03,1.836248E-03,1.753326E-03,1.674149E-03,
1.598547E-03,1.526359E-03,1.457431E-03,1.391616E-03,1.328773E-03,1.268768E-03,
1.211472E-03,1.156764E-03,1.104526E-03,1.054648E-03,1.007022E-03,9.615460E-04,
9.181242E-04,8.766632E-04,8.370745E-04,7.992735E-04,7.631796E-04,7.287156E-04,
6.958080E-04,6.643864E-04,6.343838E-04,6.057361E-04,5.783820E-04,5.522632E-04,
5.273239E-04,5.035108E-04,4.807730E-04,4.590621E-04,4.383316E-04,4.185372E-04,
3.996368E-04,3.815898E-04,3.643578E-04,3.479040E-04,3.321932E-04,3.171919E-04,
3.028680E-04,2.891910E-04,2.761316E-04,2.636619E-04,2.517554E-04,2.403865E-04,
2.295310E-04,2.191658E-04,2.092686E-04,1.998184E-04,1.907949E-04,1.821789E-04,
1.739520E-04,1.660966E-04,1.585960E-04,1.514340E-04,1.445955E-04,1.380658E-04,
1.318310E-04,1.258777E-04,1.201933E-04,1.147655E-04,1.095829E-04,1.046343E-04,
9.990919E-05,9.539745E-05,9.108945E-05,8.697600E-05,8.304830E-05,7.929798E-05,
7.571701E-05,7.229775E-05,6.903290E-05,6.591548E-05,6.293885E-05,6.009663E-05,
5.738276E-05,5.479145E-05,5.231715E-05,4.995459E-05,4.769873E-05,4.554473E-05,
4.348800E-05,4.152415E-05,3.964899E-05,3.785850E-05,3.614887E-05,3.451645E-05,
3.295774E-05,3.146942E-05,3.004831E-05,2.869138E-05,2.739572E-05,2.615858E-05,
2.497730E-05,2.384936E-05,2.277236E-05,2.174400E-05,2.076208E-05,1.982449E-05,
1.892925E-05,1.807444E-05,1.725822E-05,1.647887E-05,1.573471E-05,1.502416E-05,
1.434569E-05,1.369786E-05,1.307929E-05,1.248865E-05,1.192468E-05,1.138618E-05,
1.087200E-05,1.038104E-05,9.912247E-06,9.464626E-06,9.037219E-06,8.629112E-06,
8.239435E-06,7.867356E-06,7.512079E-06,7.172845E-06,6.848931E-06,6.539644E-06,
6.244324E-06,5.962341E-06,5.693091E-06,5.436000E-06,5.190519E-06,4.956124E-06,
4.732313E-06,4.518609E-06,4.314556E-06,4.119718E-06,3.933678E-06,3.756039E-06,
3.586423E-06,3.424465E-06,3.269822E-06,3.122162E-06,2.981170E-06,2.846545E-06,
2.718000E-06,2.595260E-06,2.478062E-06,2.366156E-06,2.259305E-06,2.157278E-06,
2.059859E-06,1.966839E-06,1.878020E-06,1.793211E-06,1.712233E-06,1.634911E-06,
1.561081E-06,1.490585E-06,1.423273E-06,1.359000E-06,1.297630E-06,1.239031E-06,
1.183078E-06,1.129652E-06,1.078639E-06,1.029929E-06,9.834195E-07,9.390098E-07,
8.966056E-07,8.561164E-07,8.174555E-07,7.805405E-07,7.452926E-07,7.116364E-07,
6.795000E-07,6.488149E-07,6.195154E-07,5.915391E-07,5.648262E-07,5.393195E-07,
5.149647E-07,4.917097E-07,4.695049E-07,4.483028E-07,4.280582E-07,4.087278E-07,
3.902703E-07,3.726463E-07,3.558182E-07,3.397500E-07,3.244074E-07,3.097577E-07,
2.957696E-07,2.824131E-07,2.696598E-07,2.574824E-07,2.458549E-07,2.347525E-07,
2.241514E-07,2.140291E-07,2.043639E-07,1.951351E-07,1.863231E-07,1.779091E-07,
1.698750E-07,1.622037E-07,1.548789E-07,1.478848E-07,1.412065E-07,1.348299E-07,
1.287412E-07,1.229274E-07,1.173762E-07,1.120757E-07,1.070145E-07,1.021819E-07,
9.756757E-08,9.316157E-08,8.895455E-08,8.493750E-08,8.110186E-08,7.743943E-08,
7.394239E-08,7.060327E-08,6.741494E-08,6.437059E-08,6.146372E-08,5.868811E-08,
5.603785E-08,5.350727E-08,5.109097E-08,4.878378E-08,4.658079E-08,4.447727E-08,
4.246875E-08,4.055093E-08,3.871971E-08,3.697119E-08,3.530163E-08,3.370747E-08,
3.218529E-08,3.073186E-08,2.934406E-08,2.801893E-08,2.675364E-08,2.554549E-08,
2.439189E-08,2.329039E-08,2.223864E-08,2.123438E-08,2.027546E-08,1.935986E-08,
1.848560E-08,1.765082E-08,1.685373E-08,1.609265E-08,1.536593E-08,1.467203E-08,
1.400946E-08,1.337682E-08,1.277274E-08,1.219595E-08,1.164520E-08,1.111932E-08,
1.061719E-08,1.013773E-08,9.679929E-09,9.242799E-09,8.825409E-09,8.426867E-09,
8.046324E-09,7.682965E-09,7.336014E-09,7.004732E-09,6.688409E-09,6.386371E-09,
6.097973E-09,5.822598E-09,5.559659E-09,5.308594E-09,5.068866E-09,4.839964E-09,
4.621399E-09,4.412704E-09,4.213434E-09,4.023162E-09,3.841482E-09,3.668007E-09],
[9.391514E-02,8.762592E-02,8.175787E-02,7.628279E-02,7.117436E-02,6.640803E-02,
6.196088E-02,1.517267E-01,1.415660E-01,1.320858E-01,1.232404E-01,1.149873E-01,
1.072870E-01,1.001023E-01,1.873139E-01,1.747700E-01,1.630662E-01,1.521461E-01,
1.419574E-01,1.324509E-01,1.235811E-01,2.092203E-01,1.952095E-01,1.821369E-01,
1.699397E-01,1.585594E-01,1.479411E-01,1.380340E-01,2.227054E-01,2.077915E-01,
1.938763E-01,1.808930E-01,1.687791E-01,1.574765E-01,1.469307E-01,1.370912E-01,
1.279106E-01,1.193449E-01,1.113527E-01,1.038957E-01,9.693814E-02,9.044648E-02,
8.438955E-02,7.873824E-02,7.346537E-02,6.854562E-02,6.395532E-02,5.967242E-02,
5.567634E-02,5.194786E-02,4.846907E-02,4.522324E-02,4.219478E-02,3.936912E-02,
3.673269E-02,3.427281E-02,3.197766E-02,2.983621E-02,2.783817E-02,2.597393E-02,
2.423454E-02,2.261162E-02,2.109739E-02,1.968456E-02,1.836634E-02,1.713640E-02,
1.598883E-02,1.491811E-02,1.391909E-02,1.298697E-02,1.211727E-02,1.130581E-02,
1.054869E-02,9.842280E-03,9.183172E-03,8.568202E-03,7.994415E-03,7.459053E-03,
6.959543E-03,6.493483E-03,6.058634E-03,5.652905E-03,5.274347E-03,4.921140E-03,
4.591586E-03,4.284101E-03,3.997208E-03,3.729527E-03,3.479771E-03,3.246741E-03,
3.029317E-03,2.826453E-03,2.637174E-03,2.460570E-03,2.295793E-03,2.142051E-03,
1.998604E-03,1.864763E-03,1.739886E-03,1.623371E-03,1.514658E-03,1.413226E-03,
1.318587E-03,1.230285E-03,1.147896E-03,1.071025E-03,9.993019E-04,9.323816E-04,
8.699428E-04,8.116854E-04,7.573292E-04,7.066131E-04,6.592934E-04,6.151425E-04,
5.739482E-04,5.355126E-04,4.996509E-04,4.661908E-04,4.349714E-04,4.058427E-04,
3.786646E-04,3.533066E-04,3.296467E-04,3.075712E-04,2.869741E-04,2.677563E-04,
2.498255E-04,2.330954E-04,2.174857E-04,2.029213E-04,1.893323E-04,1.766533E-04,
1.648233E-04,1.537856E-04,1.434871E-04,1.338782E-04,1.249127E-04,1.165477E-04,
1.087429E-04,1.014607E-04,9.466615E-05,8.832664E-05,8.241167E-05,7.689281E-05,
7.174353E-05,6.693908E-05,6.245637E-05,5.827385E-05,5.437143E-05,5.073034E-05,
4.733308E-05,4.416332E-05,4.120584E-05,3.844640E-05,3.587176E-05,3.346954E-05,
3.122818E-05,2.913693E-05,2.718571E-05,2.536517E-05,2.366654E-05,2.208166E-05,
2.060292E-05,1.922320E-05,1.793588E-05,1.673477E-05,1.561409E-05,1.456846E-05,
1.359286E-05,1.268258E-05,1.183327E-05,1.104083E-05,1.030146E-05,9.611601E-06,
8.967941E-06,8.367385E-06,7.807046E-06,7.284232E-06,6.796428E-06,6.341292E-06,
5.916635E-06,5.520415E-06,5.150730E-06,4.805801E-06,4.483971E-06,4.183692E-06,
3.903523E-06,3.642116E-06,3.398214E-06,3.170646E-06,2.958317E-06,2.760208E-06,
2.575365E-06,2.402900E-06,2.241985E-06,2.091846E-06,1.951762E-06,1.821058E-06,
1.699107E-06,1.585323E-06,1.479159E-06,1.380104E-06,1.287682E-06,1.201450E-06,
1.120993E-06,1.045923E-06,9.758808E-07,9.105289E-07,8.495535E-07,7.926615E-07,
7.395793E-07,6.900519E-07,6.438412E-07,6.007251E-07,5.604963E-07,5.229616E-07,
4.879404E-07,4.552645E-07,4.247768E-07,3.963307E-07,3.697897E-07,3.450260E-07,
3.219206E-07,3.003625E-07,2.802482E-07,2.614808E-07,2.439702E-07,2.276322E-07,
2.123884E-07,1.981654E-07,1.848948E-07,1.725130E-07,1.609603E-07,1.501813E-07,
1.401241E-07,1.307404E-07,1.219851E-07,1.138161E-07,1.061942E-07,9.908269E-08,
9.244741E-08,8.625649E-08,8.048015E-08,7.509063E-08,7.006204E-08,6.537019E-08,
6.099255E-08,5.690806E-08,5.309710E-08,4.954134E-08,4.622371E-08,4.312824E-08,
4.024007E-08,3.754532E-08,3.503102E-08,3.268510E-08,3.049627E-08,2.845403E-08,
2.654855E-08,2.477067E-08,2.311185E-08,2.156412E-08,2.012004E-08,1.877266E-08,
1.751551E-08,1.634255E-08,1.524814E-08,1.422702E-08,1.327427E-08,1.238534E-08,
1.155593E-08,1.078206E-08,1.006002E-08,9.386329E-09,8.757755E-09,8.171274E-09,
7.624068E-09,7.113507E-09,6.637137E-09,6.192668E-09,5.777963E-09,5.391030E-09,
5.030009E-09,4.693165E-09,4.378877E-09,4.085637E-09,3.812034E-09,3.556754E-09,
3.318569E-09,3.096334E-09,2.888982E-09,2.695515E-09,2.515005E-09,2.346582E-09,
2.189439E-09,2.042819E-09,1.906017E-09,1.778377E-09,1.659284E-09,1.548167E-09,
1.444491E-09,1.347758E-09,1.257502E-09,1.173291E-09,1.094719E-09,1.021409E-09,
9.530086E-10,8.891884E-10,8.296421E-10,7.740835E-10,7.222454E-10,6.738788E-10,
6.287512E-10,5.866456E-10,5.473597E-10,5.107046E-10,4.765043E-10,4.445942E-10,
4.148211E-10,3.870417E-10,3.611227E-10,3.369394E-10,3.143756E-10,2.933228E-10,
2.736798E-10,2.553523E-10,2.382521E-10,2.222971E-10,2.074105E-10,1.935209E-10,
1.805614E-10,1.684697E-10,1.571878E-10,1.466614E-10,1.368399E-10,1.276762E-10,
1.191261E-10,1.111486E-10,1.037053E-10,9.676043E-11,9.028068E-11,8.423485E-11,
7.859390E-11,7.333070E-11,6.841996E-11,6.383808E-11,5.956303E-11,5.557428E-11,
5.185263E-11,4.838022E-11,4.514034E-11,4.211743E-11,3.929695E-11,3.666535E-11,
3.420998E-11,3.191904E-11,2.978152E-11,2.778714E-11,2.592632E-11,2.419011E-11,
2.257017E-11,2.105871E-11,1.964847E-11,1.833267E-11,1.710499E-11,1.595952E-11],
[1.172251E-01,1.132320E-01,1.093749E-01,1.056492E-01,1.020504E-01,9.857420E-02,
9.521640E-02,9.197298E-02,8.884005E-02,8.581383E-02,8.289069E-02,8.006713E-02,
7.733975E-02,7.470528E-02,7.216054E-02,6.970249E-02,6.732817E-02,6.503472E-02,
6.281940E-02,6.067954E-02,5.861257E-02,5.661601E-02,5.468746E-02,5.282461E-02,
5.102521E-02,4.928710E-02,4.760820E-02,4.598649E-02,4.442002E-02,4.290691E-02,
4.144535E-02,4.003357E-02,3.866988E-02,3.735264E-02,3.608027E-02,3.485124E-02,
3.366408E-02,3.251736E-02,3.140970E-02,3.033977E-02,2.930629E-02,2.830801E-02,
2.734373E-02,2.641230E-02,2.551260E-02,2.464355E-02,2.380410E-02,2.299325E-02,
2.221001E-02,2.145346E-02,2.072267E-02,2.001678E-02,1.933494E-02,1.867632E-02,
1.804014E-02,1.742562E-02,1.683204E-02,1.625868E-02,1.570485E-02,1.516989E-02,
1.465314E-02,1.415400E-02,1.367187E-02,1.320615E-02,1.275630E-02,1.232178E-02,
1.190205E-02,1.149662E-02,1.110501E-02,1.072673E-02,1.036134E-02,1.000839E-02,
9.667469E-03,9.338160E-03,9.020068E-03,8.712811E-03,8.416021E-03,8.129340E-03,
7.852425E-03,7.584943E-03,7.326572E-03,7.077002E-03,6.835933E-03,6.603076E-03,
6.378151E-03,6.160888E-03,5.951025E-03,5.748312E-03,5.552503E-03,5.363364E-03,
5.180668E-03,5.004196E-03,4.833735E-03,4.669080E-03,4.510034E-03,4.356406E-03,
4.208010E-03,4.064670E-03,3.926212E-03,3.792471E-03,3.663286E-03,3.538501E-03,
3.417966E-03,3.301538E-03,3.189075E-03,3.080444E-03,2.975513E-03,2.874156E-03,
2.776251E-03,2.681682E-03,2.590334E-03,2.502098E-03,2.416867E-03,2.334540E-03,
2.255017E-03,2.178203E-03,2.104005E-03,2.032335E-03,1.963106E-03,1.896236E-03,
1.831643E-03,1.769250E-03,1.708983E-03,1.650769E-03,1.594538E-03,1.540222E-03,
1.487756E-03,1.437078E-03,1.388126E-03,1.340841E-03,1.295167E-03,1.251049E-03,
1.208434E-03,1.167270E-03,1.127508E-03,1.089101E-03,1.052003E-03,1.016168E-03,
9.815531E-04,9.481178E-04,9.158214E-04,8.846252E-04,8.544916E-04,8.253845E-04,
7.972689E-04,7.701110E-04,7.438782E-04,7.185389E-04,6.940629E-04,6.704205E-04,
6.475836E-04,6.255245E-04,6.042168E-04,5.836350E-04,5.637542E-04,5.445507E-04,
5.260013E-04,5.080838E-04,4.907766E-04,4.740589E-04,4.579107E-04,4.423126E-04,
4.272458E-04,4.126923E-04,3.986344E-04,3.850555E-04,3.719391E-04,3.592695E-04,
3.470314E-04,3.352103E-04,3.237918E-04,3.127622E-04,3.021084E-04,2.918175E-04,
2.818771E-04,2.722753E-04,2.630006E-04,2.540419E-04,2.453883E-04,2.370295E-04,
2.289554E-04,2.211563E-04,2.136229E-04,2.063461E-04,1.993172E-04,1.925277E-04,
1.859695E-04,1.796347E-04,1.735157E-04,1.676051E-04,1.618959E-04,1.563811E-04,
1.510542E-04,1.459087E-04,1.409386E-04,1.361377E-04,1.315003E-04,1.270209E-04,
1.226941E-04,1.185147E-04,1.144777E-04,1.105782E-04,1.068115E-04,1.031731E-04,
9.965861E-05,9.626387E-05,9.298477E-05,8.981737E-05,8.675786E-05,8.380257E-05,
8.094794E-05,7.819056E-05,7.552710E-05,7.295437E-05,7.046928E-05,6.806884E-05,
6.575016E-05,6.351047E-05,6.134707E-05,5.925736E-05,5.723884E-05,5.528908E-05,
5.340573E-05,5.158653E-05,4.982930E-05,4.813194E-05,4.649239E-05,4.490868E-05,
4.337893E-05,4.190128E-05,4.047397E-05,3.909528E-05,3.776355E-05,3.647719E-05,
3.523464E-05,3.403442E-05,3.287508E-05,3.175523E-05,3.067354E-05,2.962868E-05,
2.861942E-05,2.764454E-05,2.670286E-05,2.579327E-05,2.491465E-05,2.406597E-05,
2.324619E-05,2.245434E-05,2.168946E-05,2.095064E-05,2.023699E-05,1.954764E-05,
1.888178E-05,1.823859E-05,1.761732E-05,1.701721E-05,1.643754E-05,1.587762E-05,
1.533677E-05,1.481434E-05,1.430971E-05,1.382227E-05,1.335143E-05,1.289663E-05,
1.245733E-05,1.203298E-05,1.162310E-05,1.122717E-05,1.084473E-05,1.047532E-05,
1.011849E-05,9.773820E-06,9.440888E-06,9.119297E-06,8.808660E-06,8.508605E-06,
8.218770E-06,7.938809E-06,7.668384E-06,7.407170E-06,7.154855E-06,6.911134E-06,
6.675716E-06,6.448316E-06,6.228663E-06,6.016492E-06,5.811548E-06,5.613585E-06,
5.422366E-06,5.237660E-06,5.059247E-06,4.886910E-06,4.720444E-06,4.559648E-06,
4.404330E-06,4.254302E-06,4.109385E-06,3.969404E-06,3.834192E-06,3.703585E-06,
3.577428E-06,3.455567E-06,3.337858E-06,3.224158E-06,3.114332E-06,3.008246E-06,
2.905774E-06,2.806793E-06,2.711183E-06,2.618830E-06,2.529623E-06,2.443455E-06,
2.360222E-06,2.279824E-06,2.202165E-06,2.127151E-06,2.054693E-06,1.984702E-06,
1.917096E-06,1.851793E-06,1.788714E-06,1.727784E-06,1.668929E-06,1.612079E-06,
1.557166E-06,1.504123E-06,1.452887E-06,1.403396E-06,1.355592E-06,1.309415E-06,
1.264812E-06,1.221728E-06,1.180111E-06,1.139912E-06,1.101082E-06,1.063576E-06,
1.027346E-06,9.923511E-07,9.585480E-07,9.258963E-07,8.943569E-07,8.638918E-07,
8.344645E-07,8.060396E-07,7.785829E-07,7.520615E-07,7.264435E-07,7.016982E-07,
6.777958E-07,6.547076E-07,6.324058E-07,6.108638E-07,5.900555E-07,5.699560E-07,
5.505412E-07,5.317878E-07,5.136731E-07,4.961755E-07,4.792740E-07,4.629482E-07,
4.471784E-07,4.319459E-07,4.172322E-07,4.030198E-07,3.892914E-07,3.760307E-07]]
try:
# internal model constants
ted_empty.num_simulation_days = 366
ted_empty.app_rate_conv1 = 11.2
ted_empty.h2o_depth_puddles = 1.3
ted_empty.soil_depth = 2.6
ted_empty.soil_porosity = 0.4339623
ted_empty.soil_bulk_density = 1.5
ted_empty.h2o_depth_soil = 0.0
ted_empty.soil_foc = 0.015
# internally specified variable
water_type = ['puddles', 'pore_water', 'puddles']
# input variables that change per simulation
ted_empty.aerobic_soil_meta_hlife = pd.Series([15., 10., 20.], dtype='float')
ted_empty.koc = pd.Series([1500., 1000., 2000.], dtype='float')
ted_empty.app_rate_min = | pd.Series([0.18, 0.5, 1.25]) | pandas.Series |
"""
SCRIPT TO CONVERT WRITE CHARMM RTF AND PRM FILES
FROM BOSS ZMATRIX
Created on Mon Feb 15 15:40:05 2016
@author: <NAME> <EMAIL>
@author: <NAME>
Usage: python OPM_Routines.py -z phenol.z -r PHN
REQUIREMENTS:
BOSS (need to set BOSSdir in bashrc and cshrc)
Preferably Anaconda python with following modules
pandas
argparse
numpy
"""
from LigParGen.BOSSReader import bossPdbAtom2Element,bossElement2Mass,ucomb
import pickle
import pandas as pd
import numpy as np
def retDihedImp(df):
odihed = []
if np.sum([df['V' + str(pot)] for pot in range(1, 5)]) != 0.0:
for pot in range(1, 5):
if (df['V' + str(pot)] != 0.0):
odihed.append('%s %4.5f %d %4.5f \n' % (df['NAME'].replace(
"-", " "), df['V' + str(pot)], pot, 180.00 * abs(pot % 2 - 1)))
else:
pot = 2
odihed.append('%s %4.5f %d %4.5f \n' % (df['NAME'].replace(
"-", " "), df['V' + str(pot)], pot, 180.00 * abs(pot % 2 - 1)))
return (odihed)
def retDihed(df):
odihed = []
for pot in range(1, 5):
odihed.append('%s %4.5f %d %4.5f \n' % (df['NAME'].replace(
"-", " "), df['V' + str(pot)], pot, 180.00 * abs(pot % 2 - 1)))
return (odihed)
def Boss2CharmmRTF(num2typ2symb, Qs, resid, bnd_df, imps):
charges = [float(Qs[i][1]) for i in range(len(Qs))]
rtf = open(resid + '.rtf', 'w+')
rtf.write('! LigParGen generated RFT file for NAMD/CHARMM \n')
Mass = ['MASS %d %s %3.4f %s \n' % ((i + 1), num2typ2symb[i][2], bossElement2Mass(
bossPdbAtom2Element(num2typ2symb[i][0])), bossPdbAtom2Element(num2typ2symb[i][0])) for i in range(len(Qs))]
for i in range(len(Mass)):
rtf.write('%s' % Mass[i])
rtf.write('AUTO ANGLES DIHE \n')
rtf.write('RESI %5s %3.3f \n' % (resid, sum(charges)))
for i in range(len(Qs)):
rtf.write('ATOM %s %s %s \n' % (num2typ2symb[i][0], bossPdbAtom2Element(
num2typ2symb[i][0]) + num2typ2symb[i][1][-3:], Qs[i][1]))
for (x, y) in zip(bnd_df.cl1, bnd_df.cl2):
rtf.write('BOND %s %s \n' % (num2typ2symb[x][0], num2typ2symb[y][0]))
for i in imps:
rtf.write('IMPR %s \n' % (i.replace("-", " ")))
rtf.write('PATCH FIRST NONE LAST NONE \n')
rtf.write('END \n')
rtf.close()
return None
def Boss2CharmmPRM(resid, num2typ2symb, Qs, bnd_df, ang_df, tor_df):
#### COLLECTING NONBONDING PART #######
prm = open(resid + '.prm', 'w+')
prm.write('! LigParGen generated PRM file for NAMD/CHARMM \n')
# bnd_df = bnd_df.drop_duplicates(['TIJ'])
prm.write('\nBOND \n')
for i in bnd_df.index:
prm.write('%s %s %4.3f %4.3f \n' % (num2typ2symb[bnd_df.cl1[i]][
2], num2typ2symb[bnd_df.cl2[i]][2], bnd_df.KIJ[i], bnd_df.RIJ[i]))
# ang_df = ang_df.drop_duplicates(['TY', 'R', 'K'])
prm.write('\nANGLE \n')
for i in ang_df.index:
prm.write('%s %s %s %4.3f %4.3f \n' % (num2typ2symb[ang_df.cl1[i]][2], num2typ2symb[
ang_df.cl2[i]][2], num2typ2symb[ang_df.cl3[i]][2], ang_df.K[i], ang_df.R[i]))
prm.write('\nDIHEDRAL \n')
if len(tor_df.index) > 0:
tor_df = tor_df.drop_duplicates(['NAME', 'TY'])
pro_df = tor_df[tor_df.TY == 'Proper']
for i in list(pro_df.index):
ndf = pro_df.ix[i]
pro_out = retDihed(ndf.to_dict())
for i in range(4):
prm.write('%s' % pro_out[i])
prm.write(
'X X X X 0.00000 1 0.000000 ! WILD CARD FOR MISSING TORSION PARAMETERS\n')
prm.write('\nIMPROPER \n')
imp_df = tor_df[tor_df.TY == 'Improper']
for i in list(imp_df.index):
ndf = tor_df.ix[i]
imp_out = retDihedImp(ndf.to_dict())
for i in range(len(imp_out)):
prm.write('%s' % imp_out[i])
prm.write(
'X X X X 0.00000 1 0.000000 ! WILD CARD FOR MISSING IMPROPER PARAMETERS \n')
prm.write(
'\nNONBONDED nbxmod 5 atom cdiel switch vatom vdistance vswitch - \ncutnb 14.0 ctofnb 12.0 ctonnb 11.5 eps 1.0 e14fac 0.5 geom\n')
Qlines = ['%s 0.00 %3.6f %3.6f 0.00 %3.6f %3.6f \n' %
(num2typ2symb[i][2], float(Qs[i][3]) * -1.00, float(Qs[i][2]) * 0.561231, float(Qs[i][3]) * -0.50,
float(Qs[i][2]) * 0.561231) for i in range(len(Qs))]
# uniqQs = list(set(Qlines))
for i in range(len(Qlines)):
prm.write('%s' % Qlines[i])
prm.close()
return None
def Boss2CharmmTorsion(bnd_df, num2opls, st_no, molecule_data, num2typ2symb):
# print num2opls
dhd = []
for line in molecule_data.MolData['TORSIONS']:
dt = [float(l) for l in line]
dhd.append(dt)
dhd = np.array(dhd)
dhd = dhd # kcal to kj conversion
dhd = dhd / 2.0 # Komm = Vopls/2
dhd_df = pd.DataFrame(dhd, columns=['V1', 'V2', 'V3', 'V4'])
ats = []
for line in molecule_data.MolData['ATOMS'][3:]:
dt = [line.split()[0], line.split()[4],
line.split()[6], line.split()[8]]
dt = [int(d) for d in dt]
ats.append(dt)
for line in molecule_data.MolData['ADD_DIHED']:
dt = [int(l) for l in line]
ats.append(dt)
assert len(ats) == len(
dhd), 'Number of Dihedral angles in Zmatrix and Out file dont match'
ats = np.array(ats) - st_no
for i in range(len(ats)):
for j in range(len(ats[0])):
if ats[i][j] < 0:
ats[i][j] = 0
at_df = pd.DataFrame(ats, columns=['I', 'J', 'K', 'L'])
final_df = pd.concat([dhd_df, at_df], axis=1, join_axes=[at_df.index])
bndlist = list(bnd_df.UR) + (list(bnd_df.UR))
final_df['TY'] = ['Proper' if ucomb(list([final_df.I[n], final_df.J[n], final_df.K[
n], final_df.L[n]]), bndlist) == 3 else 'Improper' for n in range(len(final_df.I))]
# final_df['SumV'] = np.abs(
# final_df.V1) + np.abs(final_df.V2) + np.abs(final_df.V3) + np.abs(final_df.V4)
# final_df = final_df[final_df['SumV'] != 0.00]
final_df['TI'] = [num2typ2symb[j][2] for j in final_df.I]
final_df['TJ'] = [num2typ2symb[j][2] for j in final_df.J]
final_df['TK'] = [num2typ2symb[j][2] for j in final_df.K]
final_df['TL'] = [num2typ2symb[j][2] for j in final_df.L]
final_df['SYMB'] = ['-'.join([num2typ2symb[final_df.I[i]][0], num2typ2symb[final_df.J[i]][
0], num2typ2symb[final_df.K[i]][0], num2typ2symb[final_df.L[i]][0]]) for i in final_df.index]
if len(final_df.index) > 0:
final_df['NAME'] = final_df.TI + '-' + final_df.TJ + \
'-' + final_df.TK + '-' + final_df.TL
return final_df
def boss2CharmmBond(molecule_data, st_no):
bdat = molecule_data.MolData['BONDS']
bdat['cl1'] = [x - st_no if not x - st_no < 0 else 0 for x in bdat['cl1']]
bdat['cl2'] = [x - st_no if not x - st_no < 0 else 0 for x in bdat['cl2']]
bnd_df = | pd.DataFrame(bdat) | pandas.DataFrame |
from pandas import DataFrame
from pandas import Series
from pandas import concat
from pandas import read_csv
from pandas import datetime
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from math import sqrt
import matplotlib
import numpy
from numpy import concatenate
# date-time parsing function for loading the dataset
def parser(x):
return datetime.strptime('190'+x, '%Y-%m')
# frame a sequence as a supervised learning problem
def timeseries_to_supervised(data, lag=1):
df = DataFrame(data)
columns = [df.shift(i) for i in range(1, lag+1)]
columns.append(df)
df = concat(columns, axis=1)
df = df.drop(0)
return df
# create a differenced series
def difference(dataset, interval=1):
diff = list()
for i in range(interval, len(dataset)):
value = dataset[i] - dataset[i - interval]
diff.append(value)
return Series(diff)
# invert differenced value
def inverse_difference(history, yhat, interval=1):
return yhat + history[-interval]
# scale train and test data to [-1, 1]
def scale(train, test):
# fit scaler
scaler = MinMaxScaler(feature_range=(-1, 1))
scaler = scaler.fit(train)
# transform train
train = train.reshape(train.shape[0], train.shape[1])
train_scaled = scaler.transform(train)
# transform test
test = test.reshape(test.shape[0], test.shape[1])
test_scaled = scaler.transform(test)
return scaler, train_scaled, test_scaled
# inverse scaling for a forecasted value
def invert_scale(scaler, X, yhat):
new_row = [x for x in X] + [yhat]
array = numpy.array(new_row)
array = array.reshape(1, len(array))
inverted = scaler.inverse_transform(array)
return inverted[0, -1]
# fit an LSTM network to training data
def fit_lstm(train, batch_size, nb_epoch, neurons):
X, y = train[:, 0:-1], train[:, -1]
X = X.reshape(X.shape[0], 1, X.shape[1])
model = Sequential()
model.add(LSTM(neurons, batch_input_shape=(batch_size, X.shape[1], X.shape[2]), stateful=True))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
for i in range(nb_epoch):
model.fit(X, y, epochs=1, batch_size=batch_size, verbose=0, shuffle=False)
model.reset_states()
return model
# make a one-step forecast
def forecast_lstm(model, batch_size, X):
X = X.reshape(1, 1, len(X))
yhat = model.predict(X, batch_size=batch_size)
return yhat[0,0]
# run a repeated experiment
def experiment(repeats, series):
# transform data to be stationary
raw_values = series.values
diff_values = difference(raw_values, 1)
# transform data to be supervised learning
supervised = timeseries_to_supervised(diff_values, 1)
supervised_values = supervised.values
# split data into train and test-sets
train, test = supervised_values[0:-12], supervised_values[-12:]
# transform the scale of the data
scaler, train_scaled, test_scaled = scale(train, test)
# run experiment
error_scores = list()
for r in range(repeats):
# fit the base model
lstm_model = fit_lstm(train_scaled, 1, 500, 1)
# forecast test dataset
predictions = list()
for i in range(len(test_scaled)):
# predict
X, y = test_scaled[i, 0:-1], test_scaled[i, -1]
yhat = forecast_lstm(lstm_model, 1, X)
# invert scaling
yhat = invert_scale(scaler, X, yhat)
# invert differencing
yhat = inverse_difference(raw_values, yhat, len(test_scaled)+1-i)
# store forecast
predictions.append(yhat)
# report performance
rmse = sqrt(mean_squared_error(raw_values[-12:], predictions))
print('%d) Test RMSE: %.3f' % (r+1, rmse))
error_scores.append(rmse)
return error_scores
# execute the experiment
def run():
# load dataset
series = read_csv('shampoo-sales.csv', header=0, parse_dates=[0], index_col=0, squeeze=True, date_parser=parser)
# experiment
repeats = 10
results = | DataFrame() | pandas.DataFrame |
from flask import Flask, render_template, request, redirect, url_for, session
import pandas as pd
import pymysql
import os
import io
#from werkzeug.utils import secure_filename
from pulp import *
import numpy as np
import pymysql
import pymysql.cursors
from pandas.io import sql
#from sqlalchemy import create_engine
import pandas as pd
import numpy as np
#import io
import statsmodels.formula.api as smf
import statsmodels.api as sm
import scipy.optimize as optimize
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
#from flask import Flask, render_template, request, redirect, url_for, session, g
from sklearn.linear_model import LogisticRegression
from math import sin, cos, sqrt, atan2, radians
from statsmodels.tsa.arima_model import ARIMA
#from sqlalchemy import create_engine
from collections import defaultdict
from sklearn import linear_model
import statsmodels.api as sm
import scipy.stats as st
import pandas as pd
import numpy as np
from pulp import *
import pymysql
import math
app = Flask(__name__)
app.secret_key = os.urandom(24)
localaddress="D:\\home\\site\\wwwroot"
localpath=localaddress
os.chdir(localaddress)
@app.route('/')
def index():
return redirect(url_for('home'))
@app.route('/home')
def home():
return render_template('home.html')
@app.route('/demandplanning')
def demandplanning():
return render_template("Demand_Planning.html")
@app.route("/elasticopt",methods = ['GET','POST'])
def elasticopt():
if request.method== 'POST':
start_date =request.form['from']
end_date=request.form['to']
prdct_name=request.form['typedf']
# connection = pymysql.connect(host='localhost',
# user='user',
# password='',
# db='test',
# charset='utf8mb4',
# cursorclass=pymysql.cursors.DictCursor)
#
# x=connection.cursor()
# x.execute("select * from `transcdata`")
# connection.commit()
# datass=pd.DataFrame(x.fetchall())
datass = pd.read_csv("C:\\Users\\1026819\\Downloads\\optimizdata.csv")
# datas = datass[(datass['Week']>=start_date) & (datass['Week']<=end_date )]
datas=datass
df = datas[datas['Product'] == prdct_name]
df=datass
changeData=pd.concat([df['Product_Price'],df['Product_Qty']],axis=1)
changep=[]
changed=[]
for i in range(0,len(changeData)-1):
changep.append(changeData['Product_Price'].iloc[i]-changeData['Product_Price'].iloc[i+1])
changed.append(changeData['Product_Qty'].iloc[1]-changeData['Product_Qty'].iloc[i+1])
cpd=pd.concat([pd.DataFrame(changep),pd.DataFrame(changed)],axis=1)
cpd.columns=['Product_Price','Product_Qty']
sortedpricedata=df.sort_values(['Product_Price'], ascending=[True])
spq=pd.concat([sortedpricedata['Product_Price'],sortedpricedata['Product_Qty']],axis=1).reset_index(drop=True)
pint=[]
dint=[]
x = spq['Product_Price']
num_bins = 5
# n, pint, patches = plt.hist(x, num_bins, facecolor='blue', alpha=0.5)
y = spq['Product_Qty']
num_bins = 5
# n, dint, patches = plt.hist(y, num_bins, facecolor='blue', alpha=0.5)
arr= np.zeros(shape=(len(pint),len(dint)))
count=0
for i in range(0, len(pint)):
lbp=pint[i]
if i==len(pint)-1:
ubp=pint[i]+1
else:
ubp=pint[i+1]
for j in range(0, len(dint)):
lbd=dint[j]
if j==len(dint)-1:
ubd=dint[j]+1
else:
ubd=dint[j+1]
print(lbd,ubd)
for k in range(0, len(spq)):
if (spq['Product_Price'].iloc[k]>=lbp\
and spq['Product_Price'].iloc[k]<ubp):
if(spq['Product_Qty'].iloc[k]>=lbd\
and spq['Product_Qty'].iloc[k]<ubd):
count+=1
arr[i][j]+=1
price_range=np.zeros(shape=(len(pint),2))
for j in range(0,len(pint)):
lbp=pint[j]
price_range[j][0]=lbp
if j==len(pint)-1:
ubp=pint[j]+1
price_range[j][1]=ubp
else:
ubp=pint[j+1]
price_range[j][1]=ubp
demand_range=np.zeros(shape=(len(dint),2))
for j in range(0,len(dint)):
lbd=dint[j]
demand_range[j][0]=lbd
if j==len(dint)-1:
ubd=dint[j]+1
demand_range[j][1]=ubd
else:
ubd=dint[j+1]
demand_range[j][1]=ubd
pr=pd.DataFrame(price_range)
pr.columns=['Price','Demand']
dr=pd.DataFrame(demand_range)
dr.columns=['Price','Demand']
priceranges=pr.Price.astype(str).str.cat(pr.Demand.astype(str), sep='-')
demandranges=dr.Price.astype(str).str.cat(dr.Demand.astype(str), sep='-')
price=pd.DataFrame(arr)
price.columns=demandranges
price.index=priceranges
pp=price.reset_index()
global data
data=pd.concat([df['Week'],df['Product_Qty'],df['Product_Price'],df['Comp_Prod_Price'],df['Promo1'],df['Promo2'],df['overallsale']],axis=1)
return render_template('dataview.html',cpd=cpd.values,pp=pp.to_html(index=False),data=data.to_html(index=False),graphdata=data.values,ss=1)
return render_template('dataview.html')
@app.route('/priceelasticity',methods = ['GET','POST'])
def priceelasticity():
return render_template('Optimisation_heatmap_revenue.html')
@app.route("/elasticity",methods = ['GET','POST'])
def elasticity():
if request.method== 'POST':
Price=0
Average_Price=0
Promotions=0
Promotionss=0
if request.form.get('Price'):
Price=1
if request.form.get('Average_Price'):
Average_Price=1
if request.form.get('Promotion_1'):
Promotions=1
if request.form.get('Promotion_2'):
Promotionss=1
Modeldata=pd.DataFrame()
Modeldata['Product_Qty']=data.Product_Qty
lst=[]
for row in data.index:
lst.append(row+1)
Modeldata['Week']=np.log(lst)
if Price == 1:
Modeldata['Product_Price']=data['Product_Price']
if Price == 0:
Modeldata['Product_Price']=0
if Average_Price==1:
Modeldata['Comp_Prod_Price']=data['Comp_Prod_Price']
if Average_Price==0:
Modeldata['Comp_Prod_Price']=0
if Promotions==1:
Modeldata['Promo1']=data['Promo1']
if Promotions==0:
Modeldata['Promo1']=0
if Promotionss==1:
Modeldata['Promo2']=data['Promo2']
if Promotionss==0:
Modeldata['Promo2']=0
diffpriceprodvscomp= (Modeldata['Product_Price']-Modeldata['Comp_Prod_Price'])
promo1=Modeldata.Promo1
promo2=Modeldata.Promo2
week=Modeldata.Week
quantityproduct=Modeldata.Product_Qty
df=pd.concat([quantityproduct,diffpriceprodvscomp,promo1,promo2,week],axis=1)
df.columns=['quantityproduct','diffpriceprodvscomp','promo1','promo2','week']
Model = smf.ols(formula='df.quantityproduct ~ df.diffpriceprodvscomp + df.promo1 + df.promo2 + df.week', data=df)
res = Model.fit()
global intercept,diffpriceprodvscomp_param,promo1_param,promo2_param,week_param
intercept=res.params[0]
diffpriceprodvscomp_param=res.params[1]
promo1_param=res.params[2]
promo2_param=res.params[3]
week_param=res.params[4]
Product_Price_min=0
maxvalue_of_price=int(Modeldata['Product_Price'].max())
Product_Price_max=int(Modeldata['Product_Price'].max())
if maxvalue_of_price==0:
Product_Price_max=1
maxfunction=[]
pricev=[]
weeks=[]
dd=[]
ddl=[]
for vatr in range(0,len(Modeldata)):
weeks.append(lst[vatr])
for Product_Price in range(Product_Price_min,Product_Price_max+1):
function=0
function=(intercept+(Modeldata['Promo1'].iloc[vatr]*promo1_param)+(Modeldata['Promo2'].iloc[vatr]*promo2_param) +
(diffpriceprodvscomp_param*(Product_Price-Modeldata['Comp_Prod_Price'].iloc[vatr]))+(Modeldata['Week'].iloc[vatr]*lst[vatr]))
maxfunction.append(function)
dd.append(Product_Price)
ddl.append(vatr)
for Product_Price in range(Product_Price_min,Product_Price_max+1):
pricev.append(Product_Price)
df1=pd.DataFrame(maxfunction)
df2=pd.DataFrame(dd)
df3=pd.DataFrame(ddl)
dfo=pd.concat([df3,df2,df1],axis=1)
dfo.columns=['weeks','prices','Demandfunctions']
demand=[]
for rows in dfo.values:
w=int(rows[0])
p=int(rows[1])
d=int(rows[2])
demand.append([w,p,d])
Co_eff=pd.DataFrame(res.params.values)#intercept
standard_error=pd.DataFrame(res.bse.values)#standard error
p_values=pd.DataFrame(res.pvalues.values)
conf_lower =pd.DataFrame(res.conf_int()[0].values)
conf_higher =pd.DataFrame(res.conf_int()[1].values)
R_square=res.rsquared
atr=['Intercept','DeltaPrice','Promo1','Promo2','Week']
atribute=pd.DataFrame(atr)
SummaryTable=pd.concat([atribute,Co_eff,standard_error,p_values,conf_lower,conf_higher],axis=1)
SummaryTable.columns=['Atributes','Co_eff','Standard_error','P_values','conf_lower','conf_higher']
reshapedf=df1.values.reshape(len(Modeldata),(-Product_Price_min+(Product_Price_max+1)))
dataofmas=pd.DataFrame(reshapedf)
maxv=dataofmas.apply( max, axis=1 )
minv=dataofmas.apply(min,axis=1)
avgv=dataofmas.sum(axis=1)/(-Product_Price_min+(Product_Price_max+1))
wks=pd.DataFrame(weeks)
ddofs=pd.concat([wks,minv,avgv,maxv],axis=1)
dataofmas=pd.DataFrame(reshapedf)
kk=pd.DataFrame()
sums=0
for i in range(0,len(dataofmas.columns)):
sums=sums+i
vv=i*dataofmas[[i]]
kk=pd.concat([kk,vv],axis=1)
dfr=pd.DataFrame(kk)
mrevenue=dfr.apply( max, axis=1 )
prices=dfr.idxmax(axis=1)
wks=pd.DataFrame(weeks)
revenuedf=pd.concat([wks,mrevenue,prices],axis=1)
return render_template('Optimisation_heatmap_revenue.html',revenuedf=revenuedf.values,ddofs=ddofs.values,SummaryTable=SummaryTable.to_html(index=False),ss=1,weeks=weeks,demand=demand,pricev=pricev,R_square=R_square)
@app.route('/inputtomaxm',methods=["GET","POST"])
def inputtomaxm():
return render_template("Optimize.html")
@app.route("/maxm",methods=["GET","POST"])
def maxm():
if request.method=="POST":
week=request.form['TimePeriod']
price_low=request.form['Price_Lower']
price_max=request.form['Price_Upper']
promofirst=request.form['Promotion_1']
promosecond=request.form['Promotion_2']
# week=24
# price_low=6
# price_max=20
# promofirst=1
# promosecond=0
#
# time_period=24
#
# global a
# a=243.226225
# global b
# b=-9.699634
# global d
# d=1.671505
# global pr1
# pr1=21.866260
# global pr2
# pr2=-0.511606
# global cm
# cm=-14.559594
# global s_0
# s_0= 2000
# promo1=1
# promo2=0
time_period=int(week)
global a
a=intercept
global b
b=diffpriceprodvscomp_param
global d
d=week_param
global pr1
pr1=promo1_param
global pr2
pr2=promo2_param
global s_0
s_0= 2000
promo1=int(promofirst)
promo2=int(promosecond)
global comp
comp=np.random.randint(7,15,time_period)
def demand(p, a=a, b=b, d=d, promo1=promo1,promo2_param=promo2,comp=comp, t=np.linspace(1,time_period,time_period)):
""" Return demand given an array of prices p for times t
(see equation 5 above)"""
return a+(b*(p-comp))+(d*t)+(promo1*pr1)+(promo2*pr2)
def objective(p_t, a, b, d,promo1,promo2, comp, t=np.linspace(1,time_period,time_period)):
return -1.0 * np.sum( p_t * demand(p_t, a, b, d,promo1,promo2, comp, t) )
def constraint_1(p_t, s_0, a, b, d, promo1,promo2, comp, t=np.linspace(1,time_period,time_period)):
""" Inventory constraint. s_0 - np.sum(x_t) >= 0.
This is an inequality constraint. See more below.
"""
return s_0 - np.sum(demand(p_t, a, b, d,promo1,promo2, comp, t))
def constraint_2(p_t):
#""" Positive demand. Another inequality constraint x_t >= 0 """
return p_t
t = np.linspace(1,time_period,time_period)
# Starting values :
b_min=int(price_low)
p_start = b_min * np.ones(len(t))
# bounds on the values :
bmax=int(price_max)
bounds = tuple((0,bmax) for x in p_start)
import scipy.optimize as optimize
# Constraints :
constraints = ({'type': 'ineq', 'fun': lambda x, s_0=s_0: constraint_1(x,s_0, a, b, d,promo1,promo2, comp, t=t)},
{'type': 'ineq', 'fun': lambda x: constraint_2(x)}
)
opt_results = optimize.minimize(objective, p_start, args=(a, b, d,promo1,promo2, comp, t),
method='SLSQP', bounds=bounds, constraints=constraints)
np.sum(opt_results['x'])
opt_price=opt_results['x']
opt_demand=demand(opt_results['x'], a, b, d, promo1,promo2_param, comp, t=t)
weeks=[]
for row in range(1,len(opt_price)+1):
weeks.append(row)
d=pd.DataFrame(weeks).astype(int)
dd=pd.DataFrame(opt_price)
optimumumprice_perweek=pd.concat([d,dd,pd.DataFrame(opt_demand).astype(int)],axis=1)
optimumumprice_perweek.columns=['Week','Price','Demand']
dataval=optimumumprice_perweek
diff=[]
diffs=[]
for i in range(0,len(opt_demand)-1):
valss=opt_demand[i]-opt_demand[i+1]
diff.append(valss)
diffs.append(i+1)
differenceofdemand_df=pd.concat([pd.DataFrame(diffs),pd.DataFrame(diff)],axis=1)
MP=round(optimumumprice_perweek.loc[optimumumprice_perweek['Price'].idxmin()],1)
minimumprice=pd.DataFrame(MP).T
MaxP=round(optimumumprice_perweek.loc[optimumumprice_perweek['Price'].idxmax()],1)
maximumprice=pd.DataFrame(MaxP).T
averageprice=round((optimumumprice_perweek['Price'].sum()/len(optimumumprice_perweek)),2)
MD=round(optimumumprice_perweek.loc[optimumumprice_perweek['Demand'].idxmin()],0)
minimumDemand=pd.DataFrame(MD).T
MaxD=round(optimumumprice_perweek.loc[optimumumprice_perweek['Demand'].idxmax()],0)
maximumDemand=pd.DataFrame(MaxD).T
averageDemand=round((optimumumprice_perweek['Demand'].sum()/len(optimumumprice_perweek)),0)
totaldemand=round(optimumumprice_perweek['Demand'].sum(),0)
return render_template("Optimize.html",totaldemand=totaldemand,averageDemand=averageDemand,maximumDemand=maximumDemand.values,minimumDemand=minimumDemand.values,averageprice=averageprice,maximumprice=maximumprice.values,minimumprice=minimumprice.values,dataval=dataval.values,differenceofdemand_df=differenceofdemand_df.values,optimumumprice_perweek=optimumumprice_perweek.to_html(index=False),ll=1)
@app.route("/Inventorymanagment",methods=["GET","POST"])
def Inventorymanagment():
return render_template("Inventory_Management.html")
@app.route("/DISTRIBUTION_NETWORK_OPT",methods=["GET","POST"])
def DISTRIBUTION_NETWORK_OPT():
return render_template("DISTRIBUTION_NETWORK_OPTIMIZATION.html")
@app.route("/Procurement_Plan",methods=["GET","POST"])
def Procurement_Plan():
return render_template("Procurement_Planning.html")
#<NAME>
@app.route("/fleetallocation")
def fleetallocation():
return render_template('fleetallocation.html')
@app.route("/reset")
def reset():
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("DELETE FROM `input`")
cur.execute("DELETE FROM `output`")
cur.execute("DELETE FROM `Scenario`")
conn.commit()
conn.close()
open(localaddress+'\\static\\demodata.txt', 'w').close()
return render_template('fleetallocation.html')
@app.route("/dalink",methods = ['GET','POST'])
def dalink():
sql = "INSERT INTO `input` (`Route`,`SLoc`,`Ship-to Abb`,`Primary Equipment`,`Batch`,`Prod Dt`,`SW`,`Met Held`,`Heat No`,`Delivery Qty`,`Width`,`Length`,`Test Cut`,`Customer Priority`) VALUES( %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
if request.method == 'POST':
typ = request.form.get('type')
frm = request.form.get('from')
to = request.form.get('to')
if typ and frm and to:
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
cur.execute("SELECT * FROM `inventory_data` WHERE `Primary Equipment` = '" + typ + "' AND `Prod Dt` BETWEEN '" + frm + "' AND '" + to + "'")
res = cur.fetchall()
if len(res)==0:
conn.close()
return render_template('fleetallocation.html',alert='No data available')
sfile = pd.DataFrame(res)
df1 = pd.DataFrame(sfile)
df1['Prod Dt'] =df1['Prod Dt'].astype(object)
for index, i in df1.iterrows():
data = (i['Route'],i['SLoc'],i['Ship-to Abb'],i['Primary Equipment'],i['Batch'],i['Prod Dt'],i['SW'],i['Met Held'],i['Heat No'],i['Delivery Qty'],i['Width'],i['Length'],i['Test Cut'],i['Customer Priority'])
curr.execute(sql,data)
conn.commit()
conn.close()
return render_template('fleetallocation.html',typ=" Equipment type: "+typ,frm="From: "+frm,to=" To:"+to,data = sfile.to_html(index=False))
else:
return render_template('fleetallocation.html',alert ='All input fields are required')
return render_template('fleetallocation.html')
@app.route('/optimise', methods=['GET', 'POST'])
def optimise():
open(localaddress+'\\static\\demodata.txt', 'w').close()
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
cur.execute("DELETE FROM `output`")
conn.commit()
os.system('python optimising.py')
sa=1
cur.execute("SELECT * FROM `output`")
result = cur.fetchall()
if len(result)==0:
say=0
else:
say=1
curr.execute("SELECT * FROM `input`")
sfile = curr.fetchall()
if len(sfile)==0:
conn.close()
return render_template('fleetallocation.html',say=say,sa=sa,alert='No data available')
sfile = pd.DataFrame(sfile)
conn.close()
with open(localaddress+"\\static\\demodata.txt", "r") as f:
content = f.read()
return render_template('fleetallocation.html',say=say,sa=sa,data = sfile.to_html(index=False),content=content)
@app.route("/scenario")
def scenario():
return render_template('scenario.html')
@app.route("/scenario_insert", methods=['GET','POST'])
def scenario_insert():
if request.method == 'POST':
scenario = request.form.getlist("scenario[]")
customer_priority = request.form.getlist("customer_priority[]")
oldest_sw = request.form.getlist("oldest_sw[]")
production_date = request.form.getlist("production_date[]")
met_held_group = request.form.getlist("met_held_group[]")
test_cut_group = request.form.getlist("test_cut_group[]")
sub_grouping_rules = request.form.getlist("sub_grouping_rules[]")
load_lower_bounds = request.form.getlist("load_lower_bounds[]")
load_upper_bounds = request.form.getlist("load_upper_bounds[]")
width_bounds = request.form.getlist("width_bounds[]")
length_bounds = request.form.getlist("length_bounds[]")
description = request.form.getlist("description[]")
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
lngth = len(scenario)
curr.execute("DELETE FROM `scenario`")
if scenario and customer_priority and oldest_sw and production_date and met_held_group and test_cut_group and sub_grouping_rules and load_lower_bounds and load_upper_bounds and width_bounds and length_bounds and description:
say=0
for i in range(lngth):
scenario_clean = scenario[i]
customer_priority_clean = customer_priority[i]
oldest_sw_clean = oldest_sw[i]
production_date_clean = production_date[i]
met_held_group_clean = met_held_group[i]
test_cut_group_clean = test_cut_group[i]
sub_grouping_rules_clean = sub_grouping_rules[i]
load_lower_bounds_clean = load_lower_bounds[i]
load_upper_bounds_clean = load_upper_bounds[i]
width_bounds_clean = width_bounds[i]
length_bounds_clean = length_bounds[i]
description_clean = description[i]
if scenario_clean and customer_priority_clean and oldest_sw_clean and production_date_clean and met_held_group_clean and test_cut_group_clean and sub_grouping_rules_clean and load_lower_bounds_clean and load_upper_bounds_clean and width_bounds_clean and length_bounds_clean:
cur.execute("INSERT INTO `scenario`(scenario, customer_priority, oldest_sw, production_date, met_held_group, test_cut_group, sub_grouping_rules, load_lower_bounds, load_upper_bounds, width_bounds, length_bounds, description) VALUES('"+scenario_clean+"' ,'"+customer_priority_clean+"','"+oldest_sw_clean+"','"+production_date_clean+"','"+met_held_group_clean+"','"+test_cut_group_clean+"', '"+sub_grouping_rules_clean+"','"+load_lower_bounds_clean+"', '"+load_upper_bounds_clean+"','"+width_bounds_clean+"','"+length_bounds_clean+"','"+description_clean+"')")
else:
say = 1
conn.commit()
if(say==0):
alert='All Scenarios inserted'
else:
alert='Some scenarios were not inserted'
return (alert)
conn.close()
return ('All fields are required!')
return ('Failed!!!')
@app.route("/fetch", methods=['GET','POST'])
def fetch():
if request.method == 'POST':
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("SELECT * FROM scenario")
result = cur.fetchall()
if len(result)==0:
conn.close()
return render_template('scenario.html',alert1='No scenarios Available')
result1 = pd.DataFrame(result)
result1 = result1.drop('Sub-grouping rules', axis=1)
conn.close()
return render_template('scenario.html',sdata = result1.to_html(index=False))
return ("Error")
@app.route("/delete", methods=['GET','POST'])
def delete():
if request.method == 'POST':
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("DELETE FROM scenario")
conn.commit()
conn.close()
return render_template('scenario.html',alert1="All the scenerios were dropped!")
return ("Error")
@app.route('/papadashboard', methods=['GET', 'POST'])
def papadashboard():
sql1 = "SELECT `Scenario`, MAX(`Wagon-No`) AS 'Wagon Used', COUNT(`Batch`) AS 'Products Allocated', SUM(`Delivery Qty`) AS 'Total Product Allocated', SUM(`Delivery Qty`)/(MAX(`Wagon-No`)) AS 'Average Load Carried', SUM(`Width`)/(MAX(`Wagon-No`)) AS 'Average Width Used' FROM `output` WHERE `Wagon-No`>0 GROUP BY `Scenario`"
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
curs = conn.cursor()
curs.execute("SELECT `scenario` FROM `scenario`")
sdata = curs.fetchall()
if len(sdata)==0:
conn.close()
return render_template('warning.html',alert='No data available')
cur1 = conn.cursor()
cur1.execute(sql1)
data1 = cur1.fetchall()
if len(data1)==0:
conn.close()
return render_template('warning.html',alert='Infeasible to due Insufficient Load')
cu = conn.cursor()
cu.execute("SELECT `length_bounds`,`width_bounds`,`load_lower_bounds`,`load_upper_bounds` FROM `scenario`")
sdaa = cu.fetchall()
sdaa = pd.DataFrame(sdaa)
asa=list()
for index, i in sdaa.iterrows():
hover = "Length Bound:"+str(i['length_bounds'])+", Width Bound:"+str(i['width_bounds'])+", Load Upper Bound:"+str(i['load_upper_bounds'])+", Load Lower Bound:"+str(i['load_lower_bounds'])
asa.append(hover)
asa=pd.DataFrame(asa)
asa.columns=['Details']
data1 = pd.DataFrame(data1)
data1['Average Width Used'] = data1['Average Width Used'].astype(int)
data1['Total Product Allocated'] = data1['Total Product Allocated'].astype(int)
data1['Average Load Carried'] = data1['Average Load Carried'].astype(float)
data1['Average Load Carried'] = round(data1['Average Load Carried'],2)
data1['Average Load Carried'] = data1['Average Load Carried'].astype(str)
fdata = pd.DataFrame(columns=['Scenario','Wagon Used','Products Allocated','Total Product Allocated','Average Load Carried','Average Width Used','Details'])
fdata[['Scenario','Wagon Used','Products Allocated','Total Product Allocated','Average Load Carried','Average Width Used']] = data1[['Scenario','Wagon Used','Products Allocated','Total Product Allocated','Average Load Carried','Average Width Used']]
fdata['Details'] = asa['Details']
fdata = fdata.values
sql11 = "SELECT `Scenario`, SUM(`Delivery Qty`)/(MAX(`Wagon-No`)) AS 'Average Load Carried', COUNT(`Batch`) AS 'Allocated', SUM(`Delivery Qty`) AS 'Load Allocated' FROM `output`WHERE `Wagon-No`>0 GROUP BY `Scenario`"
sql21 = "SELECT COUNT(`Batch`) AS 'Total Allocated' FROM `output` GROUP BY `Scenario`"
sql31 = "SELECT `load_upper_bounds` FROM `scenario`"
conn1 = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur11 = conn1.cursor()
cur21 = conn1.cursor()
cur31 = conn1.cursor()
cur11.execute(sql11)
data11 = cur11.fetchall()
data11 = pd.DataFrame(data11)
cur21.execute(sql21)
data21 = cur21.fetchall()
data21 = pd.DataFrame(data21)
cur31.execute(sql31)
data31 = cur31.fetchall()
data31 = pd.DataFrame(data31)
data11['Average Load Carried']=data11['Average Load Carried'].astype(float)
fdata1 = pd.DataFrame(columns=['Scenario','Utilisation Percent','Allocation Percent','Total Load Allocated'])
fdata1['Utilisation Percent'] = round(100*(data11['Average Load Carried']/data31['load_upper_bounds']),2)
data11['Load Allocated']=data11['Load Allocated'].astype(int)
fdata1[['Scenario','Total Load Allocated']]=data11[['Scenario','Load Allocated']]
data11['Allocated']=data11['Allocated'].astype(float)
data21['Total Allocated']=data21['Total Allocated'].astype(float)
fdata1['Allocation Percent'] = round(100*(data11['Allocated']/data21['Total Allocated']),2)
fdata1['Allocation Percent'] = fdata1['Allocation Percent'].astype(str)
fdat1 = fdata1.values
conn1.close()
if request.method == 'POST':
conn2 = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn2.cursor()
ata = request.form['name']
cur.execute("SELECT * FROM `output` WHERE `Scenario` = '"+ata+"' ")
ssdata = cur.fetchall()
datasss = pd.DataFrame(ssdata)
data=datasss.replace("Not Allocated", 0)
df=data[['Delivery Qty','Wagon-No','Width','Group-Number']]
df['Wagon-No']=df['Wagon-No'].astype(int)
a=df['Wagon-No'].max()
##bar1
result_array = np.array([])
for i in range (a):
data_i = df[df['Wagon-No'] == i+1]
del_sum_i = data_i['Delivery Qty'].sum()
per_i=[((del_sum_i)/(205000)*100)]
result_array = np.append(result_array, per_i)
result_array1 = np.array([])
for j in range (a):
data_j = df[df['Wagon-No'] == j+1]
del_sum_j = data_j['Width'].sum()
per_util_j=[((del_sum_j)/(370)*100)]
result_array1 = np.append(result_array1, per_util_j)
##pie1
df112 = df[df['Wagon-No'] == 0]
pie1 = df112 ['Width'].sum()
df221 = df[df['Wagon-No'] > 0]
pie11 = df221['Width'].sum()
df1=data[['SW','Group-Number']]
dff1 = df1[data['Wagon-No'] == 0]
da1 =dff1.groupby(['SW']).count()
re11 = np.array([])
res12 = np.append(re11,da1)
da1['SW'] = da1.index
r1 = np.array([])
r12 = np.append(r1, da1['SW'])
df0=data[['Group-Number','Route','SLoc','Ship-to Abb','Wagon-No','Primary Equipment']]
df1=df0.replace("Not Allocated", 0)
f2 = pd.DataFrame(df1)
f2['Wagon-No']=f2['Wagon-No'].astype(int)
####Not-Allocated
f2['Group']=data['Group-Number']
df=f2[['Group','Wagon-No']]
dee = df[df['Wagon-No'] == 0]
deer =dee.groupby(['Group']).count()##Not Allocated
deer['Group'] = deer.index
##Total-Data
f2['Group1']=data['Group-Number']
dfc=f2[['Group1','Wagon-No']]
dfa=pd.DataFrame(dfc)
der = dfa[dfa['Wagon-No'] >= 0]
dear =der.groupby(['Group1']).count()##Wagons >1
dear['Group1'] = dear.index
dear.rename(columns={'Wagon-No': 'Allocated'}, inplace=True)
result = pd.concat([deer, dear], axis=1, join_axes=[dear.index])
resu=result[['Group1','Wagon-No','Allocated']]
result1=resu.fillna(00)
r5 = np.array([])
r6 = np.append(r5, result1['Wagon-No'])
r66=r6[0:73]###Not Allocated
r7 = np.append(r5, result1['Allocated'])
r77=r7[0:73]####total
r8 = np.append(r5, result1['Group1'])
r88=r8[0:73]###group
conn2.close()
return render_template('papadashboard.html',say=1,data=fdata,data1=fdat1,ata=ata,bar1=result_array,bar11=result_array1,pie11=pie1,pie111=pie11,x=r12,y=res12,xname=r88, bar7=r77,bar8=r66)
conn.close()
return render_template('papadashboard.html',data=fdata,data1=fdat1)
@app.route('/facilityallocation')
def facilityallocation():
return render_template('facilityhome.html')
@app.route('/dataimport')
def dataimport():
return render_template('facilityimport.html')
@app.route('/dataimport1')
def dataimport1():
return redirect(url_for('dataimport'))
@app.route('/facility_location')
def facility_location():
return render_template('facility_location.html')
@app.route('/facility')
def facility():
return redirect(url_for('facilityallocation'))
@app.route("/imprt", methods=['GET','POST'])
def imprt():
global customerdata
global factorydata
global Facyy
global Custo
customerfile = request.files['CustomerData'].read()
factoryfile = request.files['FactoryData'].read()
if len(customerfile)==0 or len(factoryfile)==0:
return render_template('facilityhome.html',warning='Data Invalid')
cdat=pd.read_csv(io.StringIO(customerfile.decode('utf-8')))
customerdata=pd.DataFrame(cdat)
fdat=pd.read_csv(io.StringIO(factoryfile.decode('utf-8')))
factorydata=pd.DataFrame(fdat)
Custo=customerdata.drop(['Lat','Long'],axis=1)
Facyy=factorydata.drop(['Lat','Long'],axis=1)
return render_template('facilityimport1.html',loc1=factorydata.values,loc2=customerdata.values,factory=Facyy.to_html(index=False),customer=Custo.to_html(index=False))
@app.route("/gmap")
def gmap():
custdata=customerdata
Factorydata=factorydata
price=1
#to get distance beetween customer and factory
#first get the Dimension
#get no of factories
Numberoffact=len(Factorydata)
#get Number of Customer
Numberofcust=len(custdata)
#Get The dist/unit cost
cost=price
#def function for distance calculation
# approximate radius of earth in km
def dist(lati1,long1,lati2,long2,cost):
R = 6373.0
lat1 = radians(lati1)
lon1 = radians(long1)
lat2 = radians(lati2)
lon2 = radians(long2)
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
distance =round(R * c,2)
return distance*cost
#Create a list for customer and factory
def costtable(custdata,Factorydata):
distance=list()
for lat1,long1 in zip(custdata.Lat, custdata.Long):
for lat2,long2 in zip(Factorydata.Lat, Factorydata.Long):
distance.append(dist(lat1,long1,lat2,long2,cost))
distable=np.reshape(distance, (Numberofcust,Numberoffact)).T
tab=pd.DataFrame(distable,index=[Factorydata.Factory],columns=[custdata.Customer])
return tab
DelCost=costtable(custdata,Factorydata)#return cost table of the customer and factoery
#creating Demand Table
demand=np.array(custdata.Demand)
col1=np.array(custdata.Customer)
Demand=pd.DataFrame(demand,col1).T
cols=sorted(col1)
#Creating capacity table
fact=np.array(Factorydata.Capacity)
col2=np.array(Factorydata.Factory)
Capacity=pd.DataFrame(fact,index=col2).T
colo=sorted(col2)
#creating Fixed cost table
fixed_c=np.array(Factorydata.FixedCost)
col3=np.array(Factorydata.Factory)
FixedCost= pd.DataFrame(fixed_c,index=col3)
# Create the 'prob' variable to contain the problem data
model = LpProblem("Min Cost Facility Location problem",LpMinimize)
production = pulp.LpVariable.dicts("Production",
((factory, cust) for factory in Capacity for cust in Demand),
lowBound=0,
cat='Integer')
factory_status =pulp.LpVariable.dicts("factory_status", (factory for factory in Capacity),
cat='Binary')
cap_slack =pulp.LpVariable.dicts("capslack",
(cust for cust in Demand),
lowBound=0,
cat='Integer')
model += pulp.lpSum(
[DelCost.loc[factory, cust] * production[factory, cust] for factory in Capacity for cust in Demand]
+ [FixedCost.loc[factory] * factory_status[factory] for factory in Capacity] + 5000000*cap_slack[cust] for cust in Demand)
for cust in Demand:
model += pulp.lpSum(production[factory, cust] for factory in Capacity)+cap_slack[cust] == Demand[cust]
for factory in Capacity:
model += pulp.lpSum(production[factory, cust] for cust in Demand) <= Capacity[factory]*factory_status[factory]
model.solve()
print("Status:", LpStatus[model.status])
for v in model.variables():
print(v.name, "=", v.varValue)
print("Total Cost of Ingredients per can = ", value(model.objective))
# Getting the table for the Factorywise Allocation
def factoryalloc(model,Numberoffact,Numberofcust,listoffac,listofcus):
listj=list()
listk=list()
listcaps=list()
for v in model.variables():
listj.append(v.varValue)
customer=listj[(len(listj)-Numberofcust-Numberoffact):(len(listj)-Numberoffact)]
del listj[(len(listj)-Numberoffact-Numberofcust):len(listj)]
for row in listj:
if row==0:
listk.append(0)
else:
listk.append(1)
x=np.reshape(listj,(Numberoffact,Numberofcust))
y=np.reshape(listk,(Numberoffact,Numberofcust))
FactoryAlloc_table=pd.DataFrame(x,index=listoffac,columns=listofcus)
Factorystatus=pd.DataFrame(y,index=listoffac,columns=listofcus)
return FactoryAlloc_table,Factorystatus,customer
Alltable,FactorystatusTable,ded=factoryalloc(model,Numberoffact,Numberofcust,colo,cols)
Allstatus=list()
dede=pd.DataFrame(ded,columns=['UnSatisfied'])
finaldede=dede[dede.UnSatisfied != 0]
colss=pd.DataFrame(cols,columns=['CustomerLocation'])
fina=pd.concat([colss,finaldede],axis=1, join='inner')
print(fina)
for i in range(len(Alltable)):
for j in range(len(Alltable.columns)):
if (Alltable.loc[Alltable.index[i], Alltable.columns[j]]>0):
all=[Alltable.index[i], Alltable.columns[j], Alltable.loc[Alltable.index[i], Alltable.columns[j]]]
Allstatus.append(all)
Status=pd.DataFrame(Allstatus,columns=['Factory','Customer','Allocation']).astype(str)
#To get the Factory Data
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
#Making Connection to the Database
cur = con.cursor()
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Status.to_sql(con=engine, name='facilityallocation',index=False, if_exists='replace')
cur = con.cursor()
cur1 = con.cursor()
cur.execute("SELECT * FROM `facilityallocation`")
file=cur.fetchall()
dat=pd.DataFrame(file)
lst=dat[['Factory','Customer']]
mlst=[]
names=lst['Factory'].unique().tolist()
for name in names:
lsty=lst.loc[lst.Factory==name]
mlst.append(lsty.values)
data=dat[['Factory','Customer','Allocation']]
sql="SELECT SUM(`Allocation`) AS 'UseCapacity', `Factory` FROM `facilityallocation` GROUP BY `Factory`"
cur1.execute(sql)
file2=cur1.fetchall()
udata=pd.DataFrame(file2)
bdata=factorydata.sort_values(by=['Factory'])
adata=bdata['Capacity']
con.close()
infdata=dat[['Customer','Factory','Allocation']]
infodata=infdata.sort_values(by=['Customer'])
namess=infodata.Customer.unique()
lstyy=[]
for nam in namess:
bb=infodata[infodata.Customer==nam]
comment=bb['Factory']+":"+bb['Allocation']
prin=[nam,str(comment.values).strip('[]')]
lstyy.append(prin)
return render_template('facilityoptimise.html',say=1,lstyy=lstyy,x1=adata.values,x2=udata.values,dat=mlst,loc1=factorydata.values,
loc2=customerdata.values,factory=Facyy.to_html(index=False),customer=Custo.to_html(index=False),summary=data.to_html(index=False))
#Demand Forecast
@app.route('/demandforecast')
def demandforecast():
return render_template('demandforecast.html')
@app.route("/demandforecastdataimport",methods = ['GET','POST'])
def demandforecastdataimport():
if request.method== 'POST':
global actualforecastdata
flat=request.files['flat'].read()
if len(flat)==0:
return('No Data Selected')
cdat=pd.read_csv(io.StringIO(flat.decode('utf-8')))
actualforecastdata=pd.DataFrame(cdat)
return render_template('demandforecast.html',data=actualforecastdata.to_html(index=False))
@app.route('/demandforecastinput', methods = ['GET', 'POST'])
def demandforecastinput():
if request.method=='POST':
global demandforecastfrm
global demandforecasttoo
global demandforecastinputdata
demandforecastfrm=request.form['from']
demandforecasttoo=request.form['to']
value=request.form['typedf']
demandforecastinputdata=actualforecastdata[(actualforecastdata['Date'] >= demandforecastfrm) & (actualforecastdata['Date'] <= demandforecasttoo)]
if value=='monthly': ##monthly
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
demandforecastinputdata.to_sql(con=engine, name='demandforecastinputdata', index=False,if_exists='replace')
return redirect(url_for('monthlyforecast'))
if value=='quarterly': ##quarterly
global Quaterdata
dated2 = demandforecastinputdata['Date']
nlst=[]
for var in dated2:
var1 = int(var[5:7])
if var1 >=1 and var1 <4:
varr=var[:4]+'-01-01'
elif var1 >=4 and var1 <7:
varr=var[:4]+'-04-01'
elif var1 >=7 and var1 <10:
varr=var[:4]+'-07-01'
else:
varr=var[:4]+'-10-01'
nlst.append(varr)
nwlst=pd.DataFrame(nlst,columns=['Newyear'])
demandforecastinputdata=demandforecastinputdata.reset_index()
demandforecastinputdata['Date']=nwlst['Newyear']
Quaterdata=demandforecastinputdata.groupby(['Date']).sum()
Quaterdata=Quaterdata.reset_index()
Quaterdata=Quaterdata.drop('index',axis=1)
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Quaterdata.to_sql(con=engine, name='demandforecastinputdata', index=False,if_exists='replace')
return redirect(url_for('quarterlyforecast'))
if value=='yearly': ##yearly
global Yeardata
#copydata=demandforecastinputdata
dated1 = demandforecastinputdata['Date']
lst=[]
for var in dated1:
var1 = var[:4]+'-01-01'
lst.append(var1)
newlst=pd.DataFrame(lst,columns=['NewYear'])
demandforecastinputdata=demandforecastinputdata.reset_index()
demandforecastinputdata['Date']=newlst['NewYear']
Yeardata=demandforecastinputdata.groupby(['Date']).sum()
Yeardata=Yeardata.reset_index()
Yeardata=Yeardata.drop('index',axis=1)
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Yeardata.to_sql(con=engine, name='demandforecastinputdata', index=False,if_exists='replace')
return redirect(url_for('yearlyforecast'))
#if value=='weakly': ##weakly
# return redirect(url_for('output4'))
return render_template('demandforecast.html')
@app.route("/monthlyforecast",methods = ['GET','POST'])
def monthlyforecast():
data = pd.DataFrame(demandforecastinputdata)
# container1
a1=data.sort_values(['GDP','TotalDemand'], ascending=[True,True])
# container2
a2=data.sort_values(['Pi_Exports','TotalDemand'], ascending=[True,True])
# container3
a3=data.sort_values(['Market_Share','TotalDemand'], ascending=[True,True])
# container4
a4=data.sort_values(['Advertisement_Expense','TotalDemand'], ascending=[True,True])
# container1
df=a1[['GDP']]
re11 = np.array([])
res11 = np.append(re11,df)
df1=a1[['TotalDemand']]
r1 = np.array([])
r11 = np.append(r1, df1)
# top graph
tdf=data['Date'].astype(str)
tre11 = np.array([])
tres11 = np.append(tre11,tdf)
tr1 = np.array([])
tr11 = np.append(tr1, df1)
# container2
udf=a2[['Pi_Exports']]
ure11 = np.array([])
ures11 = np.append(ure11,udf)
ur1 = np.array([])
ur11 = np.append(ur1, df1)
# container3
vdf=a3[['Market_Share']]
vre11 = np.array([])
vres11 = np.append(vre11,vdf)
vr1 = np.array([])
vr11 = np.append(vr1, df1)
# container4
wdf=a4[['Advertisement_Expense']]
wre11 = np.array([])
wres11 = np.append(wre11,wdf)
wr1 = np.array([])
wr11 = np.append(wr1, df1)
if request.method == 'POST':
mov=0
exp=0
reg=0
ari=0
arx=0
till = request.form.get('till')
if request.form.get('moving'):
mov=1
if request.form.get('ESPO'):
exp=1
if request.form.get('regression'):
reg=1
if request.form.get('ARIMA'):
ari=1
if request.form.get('ARIMAX'):
arx=1
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS `ftech` (`mov` VARCHAR(1),`exp` VARCHAR(1), `reg` VARCHAR(1),`ari` VARCHAR(1),`arx` VARCHAR(1),`till` VARCHAR(10))")
cur.execute("DELETE FROM `ftech`")
con.commit()
cur.execute("INSERT INTO `ftech` VALUES('"+str(mov)+"','"+str(exp)+"','"+str(reg)+"','"+str(ari)+"','"+str(arx)+"','"+str(till)+"')")
con.commit()
cur.execute("CREATE TABLE IF NOT EXISTS `forecastoutput`(`Model` VARCHAR(25),`Date` VARCHAR(10),`TotalDemand` VARCHAR(10),`RatioIncrease` VARCHAR(10),`Spain` VARCHAR(10),`Austria` VARCHAR(10),`Japan` VARCHAR(10),`Hungary` VARCHAR(10),`Germany` VARCHAR(10),`Polland` VARCHAR(10),`UK` VARCHAR(10),`France` VARCHAR(10),`Romania` VARCHAR(10),`Italy` VARCHAR(10),`Greece` VARCHAR(10),`Crotia` VARCHAR(10),`Holland` VARCHAR(10),`Finland` VARCHAR(10),`Hongkong` VARCHAR(10))")
con.commit()
cur.execute("DELETE FROM `forecastoutput`")
con.commit()
sql = "INSERT INTO `forecastoutput` (`Model`,`Date`,`TotalDemand`,`RatioIncrease`,`Spain`,`Austria`,`Japan`,`Hungary`,`Germany`,`Polland`,`UK`,`France`,`Romania`,`Italy`,`Greece`,`Crotia`,`Holland`,`Finland`,`Hongkong`) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
#read the monthly file and index that with time
df=data.set_index('Date')
split_point =int(0.7*len(df))
D, V = df[0:split_point],df[split_point:]
data=pd.DataFrame(D)
#Functions for ME, MAE, MAPE
#ME
def ME(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(y_true - y_pred)
#MAE
def MAE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs(y_true - y_pred))
#MAPE
def MAPE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_pred)) * 100
cur1=con.cursor()
cur1.execute("SELECT * FROM `ftech`")
ftech=pd.DataFrame(cur1.fetchall())
ari=int(ftech['ari'])
arx=int(ftech['arx'])
exp=int(ftech['exp'])
mov=int(ftech['mov'])
reg=int(ftech['reg'])
start_index1=str(D['GDP'].index[-1])
end_index1=str(ftech['till'][0])
#end_index1=indx[:4]
df2 = pd.DataFrame(data=0,index=["ME","MAE","MAPE"],columns=["Moving Average","ARIMA","Exponential Smoothing","Regression"])
if mov==1:
#2---------------simple moving average-------------------------
#################################MovingAverage#######################
list1=list()
def mavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(0,0,1))
results_ARIMA1=model1.fit(disp=0)
# start_index1 = '2017-01-01'
# end_index1 = '2022-01-01' #4 year forecast
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list1.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["Moving Average"].iloc[0]=s
df2["Moving Average"].iloc[1]=so
df2["Moving Average"].iloc[2]=son
s=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(s)+1):
a=s.iloc[j-2]
b=s.iloc[j-1]
ratio_inc.append(int(((b-a)/a)*100))
return list1,ratio_inc
print(data)
Ma_Out,ratio_incma=mavg(data)
dfs=pd.DataFrame(Ma_Out)
tdfs=dfs.T
print(tdfs)
tdfs.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
tdfs['Model']='Moving Average'
tdfs['RatioIncrease']=ratio_incma
tdfs['Date']=(tdfs.index).strftime("20%y-%m-%d")
tdfs.astype(str)
for index, i in tdfs.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if ari==1:
##--------------min errors--ARIMA (1,0,0)-----------------------------
############################for Total Demand Monthly####################################
list2=list()
def AutoRimavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(1,0,0))
results_ARIMA1=model1.fit(disp=0)
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list2.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["ARIMA"].iloc[0]=s
df2["ARIMA"].iloc[1]=so
df2["ARIMA"].iloc[2]=son
Ars=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(Ars)+1):
As=(Ars.iloc[j-2])
bs=(Ars.iloc[j-1])
ratio_inc.append(int(((As-bs)/As)*100))
return list1,ratio_inc
Arimamodel,ratio_inc=AutoRimavg(data)
Amodel=pd.DataFrame(Arimamodel)
Results=Amodel.T
Results.astype(str)
Results.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
Results['Model']="ARIMA"
Results['RatioIncrease']=ratio_inc
Results['Date']=Results.index.astype(str)
for index, i in Results.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if reg==1:
#Linear Regression
#Regression Modeling
dates=pd.date_range(start_index1,end_index1,freq='M')
lprd=len(dates)
dateofterms= pd.PeriodIndex(freq='M', start=start_index1, periods=lprd+1)
dofterm=dateofterms.strftime("20%y-%m-%d")
Rdate=pd.DataFrame(dofterm)
noofterms=len(dofterm)
def regression(data,V,noofterms):
#Getting length of Data Frame
lenofdf=len(data.columns.tolist())
#Getting List Of Atributes in Data Frame
listofatr=list()
listofatr=data.columns.tolist()
#making list of pred
pred=pd.DataFrame()
#now riun for each row
for i in range(0,(lenofdf)-5):
df=pd.DataFrame(data[data.columns.tolist()[i]].reset_index())
xvar=list()
for row in df[listofatr[i]]:
xvar.append(row)
df5=pd.DataFrame(xvar)
yvar=list()
for j in range(0,len(df[listofatr[i]])):
yvar.append(j)
dfss=pd.DataFrame(yvar)
clf = linear_model.LinearRegression()
clf.fit(dfss,df5)
# Make predictions using the testing set
dfv=pd.DataFrame(V[V.columns.tolist()[i]].reset_index())
k=list()
for l in range(len(df[listofatr[i]]),len(df[listofatr[i]])+len(dfv)):
k.append(l)
ks=pd.DataFrame(k)
#Future prediction
predlist=list()
for j in range(len(df[listofatr[i]]),len(df[listofatr[i]])+noofterms):
predlist.append(j)
dataframeoflenofpred=pd.DataFrame(predlist)
dateframeofpred=pd.DataFrame(clf.predict(dataframeoflenofpred))
pred=pd.concat([pred,dateframeofpred],axis=1)
#Accuracy Of the mODEL
y_pred = clf.predict(ks)
if(i==0):
meanerror=ME(dfv[listofatr[i]], y_pred)
mae=MAE(dfv[listofatr[i]], y_pred)
mape=MAPE(dfv[listofatr[i]],y_pred)
df2["Regression"].iloc[0]=meanerror
df2["Regression"].iloc[1]=mae
df2["Regression"].iloc[2]=mape
regp=pd.DataFrame(pred)
ratio_incrr=[]
ratio_incrr.append(0)
for j in range(2,len(regp)+1):
Ra=regp.iloc[j-2]
Rb=regp.iloc[j-1]
ratio_incrr.append(int(((Rb-Ra)/Ra)*100))
return pred,ratio_incrr
monthlyRegression,ratio_incrr=regression(data,V,noofterms)
r=pd.DataFrame(monthlyRegression)
r.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
r['Model']="Regression"
r['Date']=Rdate
r['RatioIncrease']=ratio_incrr
r.astype(str)
for index, i in r.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if exp==1:
#Exponential Smoothing
dates=pd.date_range(start_index1,end_index1,freq='M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
Edate=pd.DataFrame(dateofterms)
predictonterm=len(Edate)
def exponential_smoothing(series, alpha,predictonterm):
result = [series[0]] # first value is same as series
for i in range(1,len(series)):
result.append(alpha * series[i] + (1 - alpha) * result[i-1])
preds=result[len(series)-1]#pred
actual=series[len(series)-1]#actual
forecastlist=[]
for i in range(0,predictonterm):
forecast=(alpha*actual)+((1-alpha)*preds)
forecastlist.append(forecast)
actual=preds
preds=forecast
return result,forecastlist
def Exponentialmooth(data,alpha,predicterm):
predexp=list()
forecaste=pd.DataFrame()
m=len(data.columns.tolist())
for i in range(0,m-5):
pred,forecasts=exponential_smoothing(data[data.columns.tolist()[i]],0.5,predictonterm)
ss=pd.DataFrame(forecasts)
predexp.append(pred)
forecaste=pd.concat([forecaste,ss],axis=1)
if(i==0):
meanerr=ME(len(data[data.columns.tolist()[i]]),predexp)
meanaverr=MAE(data[data.columns.tolist()[i]],predexp)
mperr=MAPE(data[data.columns.tolist()[i]],predexp)
df2["Exponential Smoothing"].iloc[0]=meanerr
df2["Exponential Smoothing"].iloc[1]=meanaverr
df2["Exponential Smoothing"].iloc[2]=mperr
Exponentials=pd.DataFrame(forecaste)
ratio_incex=[]
ratio_incex.append(0)
for j in range(2,len(Exponentials)+1):
Ea=Exponentials.iloc[j-2]
Eb=Exponentials.iloc[j-1]
ratio_incex.append(int(((Eb-Ea)/Ea)*100))
return forecaste,ratio_incex
fore,ratio_incex=Exponentialmooth(data,0.5,predictonterm)
skf=pd.DataFrame(fore)
skf.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
skf['Model']="Exponential Smoothing"
skf['Date']=Edate
skf['RatioIncrease']=ratio_incex
skf.astype(str)
for index, i in skf.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
dates=pd.date_range(start_index1,end_index1,freq='M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
ss=pd.DataFrame(dateofterms,columns=['Date'])
dataframeforsum=pd.concat([ss])
if mov==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'Moving Average'" )
Xmdata = cur.fetchall()
Xmadata = pd.DataFrame(Xmdata)
movsummm=pd.DataFrame(Xmadata)
movsummm.columns=['Moving Average']
dataframeforsum=pd.concat([dataframeforsum,movsummm],axis=1)
if ari==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'ARIMA'" )
Xadata = cur.fetchall()
Xardata = pd.DataFrame(Xadata)
movsumma=pd.DataFrame(Xardata)
movsumma.columns=['ARIMA']
dataframeforsum=pd.concat([dataframeforsum,movsumma],axis=1)
if exp==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'Exponential Smoothing'" )
Xedata = cur.fetchall()
Xesdata = pd.DataFrame(Xedata)
exp=pd.DataFrame(Xesdata)
exp.columns=['Exponential Smoothing']
dataframeforsum=pd.concat([dataframeforsum,exp],axis=1)
if reg==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'Regression'" )
Xrdata = cur.fetchall()
Xredata = pd.DataFrame(Xrdata)
regr=pd.DataFrame(Xredata)
regr.columns=['Regression']
dataframeforsum=pd.concat([dataframeforsum,regr],axis=1)
dataframeforsum.astype(str)
from pandas.io import sql
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
dataframeforsum.to_sql(con=engine, name='summaryoutput',index=False, if_exists='replace')
engine2 = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
df2.to_sql(con=engine2, name='summaryerror',index=False, if_exists='replace')
con.commit()
cnr=con.cursor()
cnr.execute("SELECT * FROM `summaryoutput`")
sdata = cnr.fetchall()
summaryq = pd.DataFrame(sdata)
con.close()
return render_template('monthly.html',summaryq=summaryq.to_html(index=False),sayy=1,smt='Monthly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
return render_template('monthly.html',sayy=1,smt='Monthly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
##quarterly
@app.route("/quarterlyforecast",methods = ['GET','POST'])
def quarterlyforecast():
data = pd.DataFrame(Quaterdata)
a1=data.sort_values(['GDP','TotalDemand'], ascending=[True,True])# container1
a2=data.sort_values(['Pi_Exports','TotalDemand'], ascending=[True,True])# container2
a3=data.sort_values(['Market_Share','TotalDemand'], ascending=[True,True])# container3
a4=data.sort_values(['Advertisement_Expense','TotalDemand'], ascending=[True,True])# container4
# container1
df=a1[['GDP']]/3
re11 = np.array([])
res11 = np.append(re11,df)
df1=a1[['TotalDemand']]
r1 = np.array([])
r11 = np.append(r1, df1)
# top graph
tdf=data['Date'].astype(str)
tre11 = np.array([])
tres11 = np.append(tre11,tdf)
tr1 = np.array([])
tr11 = np.append(tr1, df1)
# container2
udf=a2[['Pi_Exports']]
ure11 = np.array([])
ures11 = np.append(ure11,udf)
ur1 = np.array([])
ur11 = np.append(ur1, df1)
# container3
vdf=a3[['Market_Share']]/3
vre11 = np.array([])
vres11 = np.append(vre11,vdf)
vr1 = np.array([])
vr11 = np.append(vr1, df1)
# container4
wdf=a4[['Advertisement_Expense']]
wre11 = np.array([])
wres11 = np.append(wre11,wdf)
wr1 = np.array([])
wr11 = np.append(wr1, df1)
if request.method == 'POST':
mov=0
exp=0
reg=0
ari=0
arx=0
till = request.form.get('till')
if request.form.get('moving'):
mov=1
if request.form.get('ESPO'):
exp=1
if request.form.get('regression'):
reg=1
if request.form.get('ARIMA'):
ari=1
if request.form.get('ARIMAX'):
arx=1
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS `ftech` (`mov` VARCHAR(1),`exp` VARCHAR(1), `reg` VARCHAR(1),`ari` VARCHAR(1),`arx` VARCHAR(1),`till` VARCHAR(10))")
cur.execute("DELETE FROM `ftech`")
con.commit()
cur.execute("INSERT INTO `ftech` VALUES('"+str(mov)+"','"+str(exp)+"','"+str(reg)+"','"+str(ari)+"','"+str(arx)+"','"+str(till)+"')")
con.commit()
cur.execute("CREATE TABLE IF NOT EXISTS `forecastoutputq`(`Model` VARCHAR(25),`Date` VARCHAR(10),`TotalDemand` VARCHAR(10),`RatioIncrease` VARCHAR(10),`Spain` VARCHAR(10),`Austria` VARCHAR(10),`Japan` VARCHAR(10),`Hungary` VARCHAR(10),`Germany` VARCHAR(10),`Polland` VARCHAR(10),`UK` VARCHAR(10),`France` VARCHAR(10),`Romania` VARCHAR(10),`Italy` VARCHAR(10),`Greece` VARCHAR(10),`Crotia` VARCHAR(10),`Holland` VARCHAR(10),`Finland` VARCHAR(10),`Hongkong` VARCHAR(10))")
con.commit()
cur.execute("DELETE FROM `forecastoutputq`")
con.commit()
sql = "INSERT INTO `forecastoutputq` (`Model`,`Date`,`TotalDemand`,`RatioIncrease`,`Spain`,`Austria`,`Japan`,`Hungary`,`Germany`,`Polland`,`UK`,`France`,`Romania`,`Italy`,`Greece`,`Crotia`,`Holland`,`Finland`,`Hongkong`) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
#read the monthly file and index that with time
df=data.set_index('Date')
split_point =int(0.7*len(df))
D, V = df[0:split_point],df[split_point:]
data=pd.DataFrame(D)
#Functions for ME, MAE, MAPE
#ME
def ME(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(y_true - y_pred)
#MAE
def MAE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs(y_true - y_pred))
#MAPE
def MAPE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_pred)) * 100
cur1=con.cursor()
cur1.execute("SELECT * FROM `ftech`")
ftech=pd.DataFrame(cur1.fetchall())
ari=int(ftech['ari'])
arx=int(ftech['arx'])
exp=int(ftech['exp'])
mov=int(ftech['mov'])
reg=int(ftech['reg'])
start_index1=str(D['GDP'].index[-1])
end_index1=str(ftech['till'][0])
#end_index1=indx[:4]
df2 = pd.DataFrame(data=0,index=["ME","MAE","MAPE"],columns=["Moving Average","ARIMA","Exponential Smoothing","Regression"])
if mov==1:
#2---------------simple moving average-------------------------
#################################MovingAverage#######################
list1=list()
def mavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(0,0,1))
results_ARIMA1=model1.fit(disp=0)
# start_index1 = '2017-01-01'
# end_index1 = '2022-01-01' #4 year forecast
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list1.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["Moving Average"].iloc[0]=s
df2["Moving Average"].iloc[1]=so
df2["Moving Average"].iloc[2]=son
s=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(s)+1):
a=s.iloc[j-2]
b=s.iloc[j-1]
ratio_inc.append(int(((b-a)/a)*100))
return list1,ratio_inc
print(data)
Ma_Out,ratio_incma=mavg(data)
dfs=pd.DataFrame(Ma_Out)
tdfs=dfs.T
print(tdfs)
tdfs.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
tdfs['Model']='Moving Average'
tdfs['RatioIncrease']=ratio_incma
tdfs['Date']=(tdfs.index).strftime("20%y-%m-%d")
tdfs.astype(str)
for index, i in tdfs.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if ari==1:
##--------------min errors--ARIMA (1,0,0)-----------------------------
############################for Total Demand Monthly####################################
list2=list()
def AutoRimavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(1,0,0))
results_ARIMA1=model1.fit(disp=0)
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list2.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["ARIMA"].iloc[0]=s
df2["ARIMA"].iloc[1]=so
df2["ARIMA"].iloc[2]=son
Ars=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(Ars)+1):
As=(Ars.iloc[j-2])
bs=(Ars.iloc[j-1])
ratio_inc.append(int(((As-bs)/As)*100))
return list1,ratio_inc
Arimamodel,ratio_inc=AutoRimavg(data)
Amodel=pd.DataFrame(Arimamodel)
Results=Amodel.T
Results.astype(str)
Results.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
Results['Model']="ARIMA"
Results['RatioIncrease']=ratio_inc
Results['Date']=Results.index.astype(str)
for index, i in Results.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if reg==1:
#Linear Regression
#Regression Modeling
dates=pd.date_range(start_index1,end_index1,freq='3M')
lprd=len(dates)
dateofterms= pd.PeriodIndex(freq='3M', start=start_index1, periods=lprd+1)
dofterm=dateofterms.strftime("20%y-%m-%d")
Rdate=pd.DataFrame(dofterm)
noofterms=len(dofterm)
def regression(data,V,noofterms):
#Getting length of Data Frame
lenofdf=len(data.columns.tolist())
#Getting List Of Atributes in Data Frame
listofatr=list()
listofatr=data.columns.tolist()
#making list of pred
pred=pd.DataFrame()
#now riun for each row
for i in range(0,(lenofdf)-5):
df=pd.DataFrame(data[data.columns.tolist()[i]].reset_index())
xvar=list()
for row in df[listofatr[i]]:
xvar.append(row)
df5=pd.DataFrame(xvar)
yvar=list()
for j in range(0,len(df[listofatr[i]])):
yvar.append(j)
dfss=pd.DataFrame(yvar)
clf = linear_model.LinearRegression()
clf.fit(dfss,df5)
# Make predictions using the testing set
dfv=pd.DataFrame(V[V.columns.tolist()[i]].reset_index())
k=list()
for l in range(len(df[listofatr[i]]),len(df[listofatr[i]])+len(dfv)):
k.append(l)
ks=pd.DataFrame(k)
#Future prediction
predlist=list()
for j in range(len(df[listofatr[i]]),len(df[listofatr[i]])+noofterms):
predlist.append(j)
dataframeoflenofpred=pd.DataFrame(predlist)
dateframeofpred=pd.DataFrame(clf.predict(dataframeoflenofpred))
pred=pd.concat([pred,dateframeofpred],axis=1)
#Accuracy Of the mODEL
y_pred = clf.predict(ks)
if(i==0):
meanerror=ME(dfv[listofatr[i]], y_pred)
mae=MAE(dfv[listofatr[i]], y_pred)
mape=MAPE(dfv[listofatr[i]],y_pred)
df2["Regression"].iloc[0]=meanerror
df2["Regression"].iloc[1]=mae
df2["Regression"].iloc[2]=mape
regp=pd.DataFrame(pred)
ratio_incrr=[]
ratio_incrr.append(0)
for j in range(2,len(regp)+1):
Ra=regp.iloc[j-2]
Rb=regp.iloc[j-1]
ratio_incrr.append(int(((Rb-Ra)/Ra)*100))
return pred,ratio_incrr
monthlyRegression,ratio_incrr=regression(data,V,noofterms)
r=pd.DataFrame(monthlyRegression)
r.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
r['Model']="Regression"
r['Date']=Rdate
r['RatioIncrease']=ratio_incrr
r.astype(str)
for index, i in r.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if exp==1:
#Exponential Smoothing
dates=pd.date_range(start_index1,end_index1,freq='3M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='3M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
Edate=pd.DataFrame(dateofterms)
predictonterm=len(Edate)
def exponential_smoothing(series, alpha,predictonterm):
result = [series[0]] # first value is same as series
for i in range(1,len(series)):
result.append(alpha * series[i] + (1 - alpha) * result[i-1])
preds=result[len(series)-1]#pred
actual=series[len(series)-1]#actual
forecastlist=[]
for i in range(0,predictonterm):
forecast=(alpha*actual)+((1-alpha)*preds)
forecastlist.append(forecast)
actual=preds
preds=forecast
return result,forecastlist
def Exponentialmooth(data,alpha,predicterm):
predexp=list()
forecaste=pd.DataFrame()
m=len(data.columns.tolist())
for i in range(0,m-5):
pred,forecasts=exponential_smoothing(data[data.columns.tolist()[i]],0.5,predictonterm)
ss=pd.DataFrame(forecasts)
predexp.append(pred)
forecaste=pd.concat([forecaste,ss],axis=1)
if(i==0):
meanerr=ME(len(data[data.columns.tolist()[i]]),predexp)
meanaverr=MAE(data[data.columns.tolist()[i]],predexp)
mperr=MAPE(data[data.columns.tolist()[i]],predexp)
df2["Exponential Smoothing"].iloc[0]=meanerr
df2["Exponential Smoothing"].iloc[1]=meanaverr
df2["Exponential Smoothing"].iloc[2]=mperr
Exponentials=pd.DataFrame(forecaste)
ratio_incex=[]
ratio_incex.append(0)
for j in range(2,len(Exponentials)+1):
Ea=Exponentials.iloc[j-2]
Eb=Exponentials.iloc[j-1]
ratio_incex.append(int(((Eb-Ea)/Ea)*100))
return forecaste,ratio_incex
fore,ratio_incex=Exponentialmooth(data,0.5,predictonterm)
skf=pd.DataFrame(fore)
skf.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
skf['Model']="Exponential Smoothing"
skf['Date']=Edate
skf['RatioIncrease']=ratio_incex
skf.astype(str)
for index, i in skf.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
dates=pd.date_range(start_index1,end_index1,freq='3M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='3M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
ss=pd.DataFrame(dateofterms,columns=['Date'])
dataframeforsum=pd.concat([ss])
if mov==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputq` WHERE `Model`= 'Moving Average'" )
Xmdata = cur.fetchall()
Xmadata = pd.DataFrame(Xmdata)
movsummm=pd.DataFrame(Xmadata)
movsummm.columns=['Moving Average']
dataframeforsum=pd.concat([dataframeforsum,movsummm],axis=1)
if ari==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputq` WHERE `Model`= 'ARIMA'" )
Xadata = cur.fetchall()
Xardata = pd.DataFrame(Xadata)
movsumma=pd.DataFrame(Xardata)
movsumma.columns=['ARIMA']
dataframeforsum=pd.concat([dataframeforsum,movsumma],axis=1)
if exp==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputq` WHERE `Model`= 'Exponential Smoothing'" )
Xedata = cur.fetchall()
Xesdata = pd.DataFrame(Xedata)
exp=pd.DataFrame(Xesdata)
exp.columns=['Exponential Smoothing']
dataframeforsum=pd.concat([dataframeforsum,exp],axis=1)
if reg==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputq` WHERE `Model`= 'Regression'" )
Xrdata = cur.fetchall()
Xredata = pd.DataFrame(Xrdata)
regr=pd.DataFrame(Xredata)
regr.columns=['Regression']
dataframeforsum=pd.concat([dataframeforsum,regr],axis=1)
dataframeforsum.astype(str)
from pandas.io import sql
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
dataframeforsum.to_sql(con=engine, name='summaryoutputq',index=False, if_exists='replace')
engine2 = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
df2.to_sql(con=engine2, name='summaryerror',index=False, if_exists='replace')
con.commit()
cnr=con.cursor()
cnr.execute("SELECT * FROM `summaryoutputq`")
sdata = cnr.fetchall()
summaryq = pd.DataFrame(sdata)
con.close()
return render_template('quarterly.html',summaryq=summaryq.to_html(index=False),sayy=1,smt='Quarterly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
return render_template('quarterly.html',sayy=1,smt='Quarterly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
##yearly
@app.route("/yearlyforecast",methods = ['GET','POST'])
def yearlyforecast():
data = pd.DataFrame(Yeardata)
a1=data.sort_values(['GDP','TotalDemand'], ascending=[True,True])# container1
a2=data.sort_values(['Pi_Exports','TotalDemand'], ascending=[True,True])# container2
a3=data.sort_values(['Market_Share','TotalDemand'], ascending=[True,True])# container3
a4=data.sort_values(['Advertisement_Expense','TotalDemand'], ascending=[True,True])# container4
# container1
df=a1[['GDP']]/12
re11 = np.array([])
res11 = np.append(re11,df)
df1=a1[['TotalDemand']]
r1 = np.array([])
r11 = np.append(r1, df1)
# top graph
tdf=data['Date']
vari=[]
for var in tdf:
vari.append(var[:4])
tres11 = vari
tr1 = np.array([])
tr11 = np.append(tr1, df1)
# container2
udf=a2[['Pi_Exports']]
ure11 = np.array([])
ures11 = np.append(ure11,udf)
ur1 = np.array([])
ur11 = np.append(ur1, df1)
# container3
vdf=a3[['Market_Share']]/12
vre11 = np.array([])
vres11 = np.append(vre11,vdf)
vr1 = np.array([])
vr11 = np.append(vr1, df1)
# container4
wdf=a4[['Advertisement_Expense']]
wre11 = np.array([])
wres11 = np.append(wre11,wdf)
wr1 = np.array([])
wr11 = np.append(wr1, df1)
if request.method == 'POST':
mov=0
exp=0
reg=0
ari=0
arx=0
till = request.form.get('till')
if request.form.get('moving'):
mov=1
if request.form.get('ESPO'):
exp=1
if request.form.get('regression'):
reg=1
if request.form.get('ARIMA'):
ari=1
if request.form.get('ARIMAX'):
arx=1
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS `ftech` (`mov` VARCHAR(1),`exp` VARCHAR(1), `reg` VARCHAR(1),`ari` VARCHAR(1),`arx` VARCHAR(1),`till` VARCHAR(10))")
cur.execute("DELETE FROM `ftech`")
con.commit()
cur.execute("INSERT INTO `ftech` VALUES('"+str(mov)+"','"+str(exp)+"','"+str(reg)+"','"+str(ari)+"','"+str(arx)+"','"+str(till)+"')")
con.commit()
cur.execute("CREATE TABLE IF NOT EXISTS `forecastoutputy`(`Model` VARCHAR(25),`Date` VARCHAR(10),`TotalDemand` VARCHAR(10),`RatioIncrease` VARCHAR(10),`Spain` VARCHAR(10),`Austria` VARCHAR(10),`Japan` VARCHAR(10),`Hungary` VARCHAR(10),`Germany` VARCHAR(10),`Polland` VARCHAR(10),`UK` VARCHAR(10),`France` VARCHAR(10),`Romania` VARCHAR(10),`Italy` VARCHAR(10),`Greece` VARCHAR(10),`Crotia` VARCHAR(10),`Holland` VARCHAR(10),`Finland` VARCHAR(10),`Hongkong` VARCHAR(10))")
con.commit()
cur.execute("DELETE FROM `forecastoutputy`")
con.commit()
sql = "INSERT INTO `forecastoutputy` (`Model`,`Date`,`TotalDemand`,`RatioIncrease`,`Spain`,`Austria`,`Japan`,`Hungary`,`Germany`,`Polland`,`UK`,`France`,`Romania`,`Italy`,`Greece`,`Crotia`,`Holland`,`Finland`,`Hongkong`) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
#read the monthly file and index that with time
df=data.set_index('Date')
split_point =int(0.7*len(df))
D, V = df[0:split_point],df[split_point:]
data=pd.DataFrame(D)
#Functions for ME, MAE, MAPE
#ME
def ME(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(y_true - y_pred)
#MAE
def MAE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs(y_true - y_pred))
#MAPE
def MAPE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_pred)) * 100
cur1=con.cursor()
cur1.execute("SELECT * FROM `ftech`")
ftech=pd.DataFrame(cur1.fetchall())
ari=int(ftech['ari'])
arx=int(ftech['arx'])
exp=int(ftech['exp'])
mov=int(ftech['mov'])
reg=int(ftech['reg'])
start_index1=str(D['GDP'].index[-1])
end_index1=str(ftech['till'][0])
#end_index1=indx[:4]
df2 = pd.DataFrame(data=0,index=["ME","MAE","MAPE"],columns=["Moving Average","ARIMA","Exponential Smoothing","Regression"])
if mov==1:
#2---------------simple moving average-------------------------
#################################MovingAverage#######################
list1=list()
def mavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(0,0,1))
results_ARIMA1=model1.fit(disp=0)
# start_index1 = '2017-01-01'
# end_index1 = '2022-01-01' #4 year forecast
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list1.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["Moving Average"].iloc[0]=s
df2["Moving Average"].iloc[1]=so
df2["Moving Average"].iloc[2]=son
s=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(s)+1):
a=s.iloc[j-2]
b=s.iloc[j-1]
ratio_inc.append(int(((b-a)/a)*100))
return list1,ratio_inc
print(data)
Ma_Out,ratio_incma=mavg(data)
dfs=pd.DataFrame(Ma_Out)
tdfs=dfs.T
print(tdfs)
tdfs.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
tdfs['Model']='Moving Average'
tdfs['RatioIncrease']=ratio_incma
dindex=(tdfs.index).strftime("20%y")
tdfs['Date']=(dindex)
tdfs.astype(str)
for index, i in tdfs.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if ari==1:
##--------------min errors--ARIMA (1,0,0)-----------------------------
############################for Total Demand Monthly####################################
list2=list()
def AutoRimavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(1,0,0))
results_ARIMA1=model1.fit(disp=0)
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list2.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["ARIMA"].iloc[0]=s
df2["ARIMA"].iloc[1]=so
df2["ARIMA"].iloc[2]=son
Ars=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(Ars)+1):
As=(Ars.iloc[j-2])
bs=(Ars.iloc[j-1])
ratio_inc.append(int(((As-bs)/As)*100))
return list1,ratio_inc
Arimamodel,ratio_inc=AutoRimavg(data)
Amodel=pd.DataFrame(Arimamodel)
Results=Amodel.T
Results.astype(str)
Results.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
Results['Model']="ARIMA"
Results['RatioIncrease']=ratio_inc
Results['Date']=Results.index.astype(str)
for index, i in Results.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if reg==1:
#Linear Regression
#Regression Modeling
dates=pd.date_range(start_index1,end_index1,freq='A')
lprd=len(dates)
dateofterms= pd.PeriodIndex(freq='A', start=start_index1, periods=lprd+1)
dofterm=dateofterms.strftime("20%y")
Rdate=pd.DataFrame(dofterm)
noofterms=len(dofterm)
def regression(data,V,noofterms):
#Getting length of Data Frame
lenofdf=len(data.columns.tolist())
#Getting List Of Atributes in Data Frame
listofatr=list()
listofatr=data.columns.tolist()
#making list of pred
pred=pd.DataFrame()
#now riun for each row
for i in range(0,(lenofdf)-5):
df=pd.DataFrame(data[data.columns.tolist()[i]].reset_index())
xvar=list()
for row in df[listofatr[i]]:
xvar.append(row)
df5=pd.DataFrame(xvar)
yvar=list()
for j in range(0,len(df[listofatr[i]])):
yvar.append(j)
dfss=pd.DataFrame(yvar)
clf = linear_model.LinearRegression()
clf.fit(dfss,df5)
# Make predictions using the testing set
dfv=pd.DataFrame(V[V.columns.tolist()[i]].reset_index())
k=list()
for l in range(len(df[listofatr[i]]),len(df[listofatr[i]])+len(dfv)):
k.append(l)
ks=pd.DataFrame(k)
#Future prediction
predlist=list()
for j in range(len(df[listofatr[i]]),len(df[listofatr[i]])+noofterms):
predlist.append(j)
dataframeoflenofpred=pd.DataFrame(predlist)
dateframeofpred=pd.DataFrame(clf.predict(dataframeoflenofpred))
pred=pd.concat([pred,dateframeofpred],axis=1)
#Accuracy Of the mODEL
y_pred = clf.predict(ks)
if(i==0):
meanerror=ME(dfv[listofatr[i]], y_pred)
mae=MAE(dfv[listofatr[i]], y_pred)
mape=MAPE(dfv[listofatr[i]],y_pred)
df2["Regression"].iloc[0]=meanerror
df2["Regression"].iloc[1]=mae
df2["Regression"].iloc[2]=mape
regp=pd.DataFrame(pred)
ratio_incrr=[]
ratio_incrr.append(0)
for j in range(2,len(regp)+1):
Ra=regp.iloc[j-2]
Rb=regp.iloc[j-1]
ratio_incrr.append(int(((Rb-Ra)/Ra)*100))
return pred,ratio_incrr
monthlyRegression,ratio_incrr=regression(data,V,noofterms)
r=pd.DataFrame(monthlyRegression)
r.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
r['Model']="Regression"
r['Date']=Rdate
r['RatioIncrease']=ratio_incrr
r.astype(str)
for index, i in r.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if exp==1:
#Exponential Smoothing
dates=pd.date_range(start_index1,end_index1,freq='A')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='A', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y")
Edate=pd.DataFrame(dateofterms)
predictonterm=len(Edate)
def exponential_smoothing(series, alpha,predictonterm):
result = [series[0]] # first value is same as series
for i in range(1,len(series)):
result.append(alpha * series[i] + (1 - alpha) * result[i-1])
preds=result[len(series)-1]#pred
actual=series[len(series)-1]#actual
forecastlist=[]
for i in range(0,predictonterm):
forecast=(alpha*actual)+((1-alpha)*preds)
forecastlist.append(forecast)
actual=preds
preds=forecast
return result,forecastlist
def Exponentialmooth(data,alpha,predicterm):
predexp=list()
forecaste=pd.DataFrame()
m=len(data.columns.tolist())
for i in range(0,m-5):
pred,forecasts=exponential_smoothing(data[data.columns.tolist()[i]],0.5,predictonterm)
ss=pd.DataFrame(forecasts)
predexp.append(pred)
forecaste=pd.concat([forecaste,ss],axis=1)
if(i==0):
meanerr=ME(len(data[data.columns.tolist()[i]]),predexp)
meanaverr=MAE(data[data.columns.tolist()[i]],predexp)
mperr=MAPE(data[data.columns.tolist()[i]],predexp)
df2["Exponential Smoothing"].iloc[0]=meanerr
df2["Exponential Smoothing"].iloc[1]=meanaverr
df2["Exponential Smoothing"].iloc[2]=mperr
Exponentials=pd.DataFrame(forecaste)
ratio_incex=[]
ratio_incex.append(0)
for j in range(2,len(Exponentials)+1):
Ea=Exponentials.iloc[j-2]
Eb=Exponentials.iloc[j-1]
ratio_incex.append(int(((Eb-Ea)/Ea)*100))
return forecaste,ratio_incex
fore,ratio_incex=Exponentialmooth(data,0.5,predictonterm)
skf=pd.DataFrame(fore)
skf.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
skf['Model']="Exponential Smoothing"
skf['Date']=Edate
skf['RatioIncrease']=ratio_incex
skf.astype(str)
for index, i in skf.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
dates=pd.date_range(start_index1,end_index1,freq='A')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='A', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y")
ss=pd.DataFrame(dateofterms,columns=['Date'])
dataframeforsum=pd.concat([ss])
if mov==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputy` WHERE `Model`= 'Moving Average'" )
Xmdata = cur.fetchall()
Xmadata = pd.DataFrame(Xmdata)
movsummm=pd.DataFrame(Xmadata)
movsummm.columns=['Moving Average']
dataframeforsum=pd.concat([dataframeforsum,movsummm],axis=1)
if ari==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputy` WHERE `Model`= 'ARIMA'" )
Xadata = cur.fetchall()
Xardata = pd.DataFrame(Xadata)
movsumma=pd.DataFrame(Xardata)
movsumma.columns=['ARIMA']
dataframeforsum=pd.concat([dataframeforsum,movsumma],axis=1)
if exp==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputy` WHERE `Model`= 'Exponential Smoothing'" )
Xedata = cur.fetchall()
Xesdata = pd.DataFrame(Xedata)
exp=pd.DataFrame(Xesdata)
exp.columns=['Exponential Smoothing']
dataframeforsum=pd.concat([dataframeforsum,exp],axis=1)
if reg==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputy` WHERE `Model`= 'Regression'" )
Xrdata = cur.fetchall()
Xredata = pd.DataFrame(Xrdata)
regr=pd.DataFrame(Xredata)
regr.columns=['Regression']
dataframeforsum=pd.concat([dataframeforsum,regr],axis=1)
dataframeforsum.astype(str)
from pandas.io import sql
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
dataframeforsum.to_sql(con=engine, name='summaryoutputy',index=False, if_exists='replace')
engine2 = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
df2.to_sql(con=engine2, name='summaryerror',index=False, if_exists='replace')
con.commit()
cnr=con.cursor()
cnr.execute("SELECT * FROM `summaryoutputy`")
sdata = cnr.fetchall()
summaryq = pd.DataFrame(sdata)
con.close()
return render_template('yearly.html',summaryq=summaryq.to_html(index=False),sayy=1,smt='Yearly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
return render_template('yearly.html',sayy=1,smt='Yearly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
#############################Dashboard#######################################
#yearly
@app.route('/youtgraph', methods = ['GET','POST'])
def youtgraph():
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("SELECT `Model` FROM `forecastoutputy` GROUP BY `Model`")
sfile=cur.fetchall()
global yqst
qlist=pd.DataFrame(sfile)
qlst=qlist['Model'].astype(str)
yqst=qlst.values
con.close()
return render_template('ydashboard.html',qulist=yqst)
@app.route('/youtgraph1', methods = ['GET', 'POST'])
def youtgraph1():
if request.method=='POST':
value=request.form['item']
qconn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
qcur = qconn.cursor()
qcur.execute("SELECT * FROM `demandforecastinputdata")
qsdata = qcur.fetchall()
qdata = pd.DataFrame(qsdata)
#graph1
adata=qdata['TotalDemand']
x_axis=qdata['Date'].astype(str)
#predictedgraph1
pcur = qconn.cursor()
pcur.execute("SELECT * FROM `forecastoutputy` WHERE `Model`='"+value+"'")
psdata = pcur.fetchall()
edata = pd.DataFrame(psdata)
eedata=edata['TotalDemand'].astype(float)
ldata=eedata.values
nur = qconn.cursor()
nur.execute("SELECT MIN(`Date`) AS 'MIN' FROM `forecastoutputy` WHERE `Model`='"+value+"'")
MIN=nur.fetchone()
pdata=[]
i=0
k=0
a="null"
while(x_axis[i]<MIN['MIN']):
pdata.append(a)
i=i+1
k=k+1
ata=np.concatenate((pdata,ldata),axis=0)
#x axis
fcur = qconn.cursor()
fcur.execute("SELECT `Date` FROM `demandforecastinputdata` WHERE `Date`<'"+MIN['MIN']+"'")
fsdata = fcur.fetchall()
indx = pd.DataFrame(fsdata)
indx=indx['Date']
index=np.concatenate((indx,edata['Date'].values),axis=0)
yindx=[]
for var in index:
var1 = var[:4]
yindx.append(var1)
#bargraph
bcur = qconn.cursor()
bcur.execute("SELECT * FROM `forecastoutputy` WHERE `Model`='"+value+"'")
bsdata = bcur.fetchall()
bdata = pd.DataFrame(bsdata)
btdf=bdata['Date'].astype(str)
btre11 = np.array([])
btres11 = np.append(btre11,btdf)
b1tdf1=bdata[['Spain']] #spain
b1tr1 = np.array([])
b1tr11 = np.append(b1tr1, b1tdf1)
b2tdf1=bdata[['Austria']] #austria
b2tr1 = np.array([])
b2tr11 = np.append(b2tr1, b2tdf1)
b3tdf1=bdata[['Japan']] #japan
b3tr1 = np.array([])
b3tr11 = np.append(b3tr1, b3tdf1)
b4tdf1=bdata[['Hungary']] #hungry
b4tr1 = np.array([])
b4tr11 = np.append(b4tr1, b4tdf1)
b5tdf1=bdata[['Germany']] #germany
b5tr1 = np.array([])
b5tr11 = np.append(b5tr1, b5tdf1)
b6tdf1=bdata[['TotalDemand']] #total
b6tr1 = np.array([])
b6tr11 = np.append(b6tr1, b6tdf1)
#comparisonbar
ccur = qconn.cursor()
ccur.execute("SELECT * FROM `forecastoutputy` WHERE `Model`='"+value+"'")
csdata = ccur.fetchall()
cdata = pd.DataFrame(csdata)
ctdf=cdata['Date'].astype(str)
ctre11 = np.array([])
ctres11 = np.append(ctre11,ctdf)
c1tdf1=cdata[['RatioIncrease']] #ratioincrease
c1tr1 = np.array([])
c1tr11 = np.append(c1tr1, c1tdf1)
qcur.execute("SELECT * FROM `summaryerror`")
sdata = qcur.fetchall()
mape = pd.DataFrame(sdata)
qconn.close()
return render_template('ydashboard.html',mon=value,qulist=yqst,mape=mape.to_html(index=False),say=1,pdata=ata,adata=adata.values,x_axis=yindx,frm=len(qdata)-1,to=k,x13=btres11,x14=ctres11,y13=b1tr11,y14=b2tr11,y15=b3tr11,y16=b4tr11,y17=b5tr11,y18=b6tr11,y19=c1tr11)
#monthly
@app.route('/moutgraph', methods = ['GET','POST'])
def moutgraph():
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("SELECT `Model` FROM `forecastoutput` GROUP BY `Model`")
sfile=cur.fetchall()
global mqst
qlist=pd.DataFrame(sfile)
qlst=qlist['Model'].astype(str)
mqst=qlst.values
con.close()
return render_template('mdashboard.html',qulist=mqst)
@app.route('/moutgraph1', methods = ['GET', 'POST'])
def moutgraph1():
if request.method=='POST':
value=request.form['item']
qconn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
qcur = qconn.cursor()
qcur.execute("SELECT * FROM `demandforecastinputdata")
qsdata = qcur.fetchall()
qdata = pd.DataFrame(qsdata)
#graph1
adata=qdata['TotalDemand']
x_axis=qdata['Date'].astype(str)
#predictedgraph1
pcur = qconn.cursor()
pcur.execute("SELECT * FROM `forecastoutput` WHERE `Model`='"+value+"'")
psdata = pcur.fetchall()
edata = pd.DataFrame(psdata)
eedata=edata['TotalDemand'].astype(float)
ldata=eedata.values
nur = qconn.cursor()
nur.execute("SELECT MIN(`Date`) AS 'MIN' FROM `forecastoutput` WHERE `Model`='"+value+"'")
MIN=nur.fetchone()
pdata=[]
i=0
k=0
a="null"
while(x_axis[i]<MIN['MIN']):
pdata.append(a)
i=i+1
k=k+1
ata=np.concatenate((pdata,ldata),axis=0)
#x axis
fcur = qconn.cursor()
fcur.execute("SELECT `Date` FROM `demandforecastinputdata` WHERE `Date`<'"+MIN['MIN']+"'")
fsdata = fcur.fetchall()
indx = pd.DataFrame(fsdata)
indx=indx['Date'].astype(str).values
index=np.concatenate((indx,edata['Date'].values),axis=0)
#bargraph
bcur = qconn.cursor()
bcur.execute("SELECT * FROM `forecastoutput` WHERE `Model`='"+value+"'")
bsdata = bcur.fetchall()
bdata = pd.DataFrame(bsdata)
btdf=bdata['Date'].astype(str)
btre11 = np.array([])
btres11 = np.append(btre11,btdf)
b1tdf1=bdata[['Spain']] #spain
b1tr1 = np.array([])
b1tr11 = np.append(b1tr1, b1tdf1)
b2tdf1=bdata[['Austria']] #austria
b2tr1 = np.array([])
b2tr11 = np.append(b2tr1, b2tdf1)
b3tdf1=bdata[['Japan']] #japan
b3tr1 = np.array([])
b3tr11 = np.append(b3tr1, b3tdf1)
b4tdf1=bdata[['Hungary']] #hungry
b4tr1 = np.array([])
b4tr11 = np.append(b4tr1, b4tdf1)
b5tdf1=bdata[['Germany']] #germany
b5tr1 = np.array([])
b5tr11 = np.append(b5tr1, b5tdf1)
b6tdf1=bdata[['TotalDemand']] #total
b6tr1 = np.array([])
b6tr11 = np.append(b6tr1, b6tdf1)
#comparisonbar
ccur = qconn.cursor()
ccur.execute("SELECT * FROM `forecastoutput` WHERE `Model`='"+value+"'")
csdata = ccur.fetchall()
cdata = pd.DataFrame(csdata)
ctdf=cdata['Date'].astype(str)
ctre11 = np.array([])
ctres11 = np.append(ctre11,ctdf)
c1tdf1=cdata[['RatioIncrease']] #ratioincrease
c1tr1 = np.array([])
c1tr11 = np.append(c1tr1, c1tdf1)
qcur.execute("SELECT * FROM `summaryerror`")
sdata = qcur.fetchall()
mape = pd.DataFrame(sdata)
qconn.close()
return render_template('mdashboard.html',mon=value,qulist=mqst,mape=mape.to_html(index=False),say=1,pdata=ata,adata=adata.values,x_axis=index,frm=len(qdata)-1,to=k,x13=btres11,x14=ctres11,y13=b1tr11,y14=b2tr11,y15=b3tr11,y16=b4tr11,y17=b5tr11,y18=b6tr11,y19=c1tr11)
#quarterly
@app.route('/qoutgraph', methods = ['GET','POST'])
def qoutgraph():
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("SELECT `Model` FROM `forecastoutputq` GROUP BY `Model`")
sfile=cur.fetchall()
global qst
qlist=pd.DataFrame(sfile)
qlst=qlist['Model'].astype(str)
qst=qlst.values
con.close()
return render_template('qdashboard.html',qulist=qst)
@app.route('/qoutgraph1', methods = ['GET', 'POST'])
def qoutgraph1():
if request.method=='POST':
value=request.form['item']
qconn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
qcur = qconn.cursor()
qcur.execute("SELECT * FROM `demandforecastinputdata")
qsdata = qcur.fetchall()
qdata = pd.DataFrame(qsdata)
#graph1
adata=qdata['TotalDemand']
x_axis=qdata['Date'].astype(str)
#predictedgraph1
pcur = qconn.cursor()
pcur.execute("SELECT * FROM `forecastoutputq` WHERE `Model`='"+value+"'")
psdata = pcur.fetchall()
edata = pd.DataFrame(psdata)
eedata=edata['TotalDemand'].astype(float)
ldata=eedata.values
nur = qconn.cursor()
nur.execute("SELECT MIN(`Date`) AS 'MIN' FROM `forecastoutputq` WHERE `Model`='"+value+"'")
MIN=nur.fetchone()
pdata=[]
i=0
k=0
a="null"
while(x_axis[i]<MIN['MIN']):
pdata.append(a)
i=i+1
k=k+1
ata=np.concatenate((pdata,ldata),axis=0)
#x axis
fcur = qconn.cursor()
fcur.execute("SELECT `Date` FROM `demandforecastinputdata` WHERE `Date`<'"+MIN['MIN']+"'")
fsdata = fcur.fetchall()
indx = pd.DataFrame(fsdata)
indx=indx['Date'].astype(str).values
index=np.concatenate((indx,edata['Date'].values),axis=0)
#bargraph
bcur = qconn.cursor()
bcur.execute("SELECT * FROM `forecastoutputq` WHERE `Model`='"+value+"'")
bsdata = bcur.fetchall()
bdata = pd.DataFrame(bsdata)
btdf=bdata['Date'].astype(str)
btre11 = np.array([])
btres11 = np.append(btre11,btdf)
b1tdf1=bdata[['Spain']] #spain
b1tr1 = np.array([])
b1tr11 = np.append(b1tr1, b1tdf1)
b2tdf1=bdata[['Austria']] #austria
b2tr1 = np.array([])
b2tr11 = np.append(b2tr1, b2tdf1)
b3tdf1=bdata[['Japan']] #japan
b3tr1 = np.array([])
b3tr11 = np.append(b3tr1, b3tdf1)
b4tdf1=bdata[['Hungary']] #hungry
b4tr1 = np.array([])
b4tr11 = np.append(b4tr1, b4tdf1)
b5tdf1=bdata[['Germany']] #germany
b5tr1 = np.array([])
b5tr11 = np.append(b5tr1, b5tdf1)
b6tdf1=bdata[['TotalDemand']] #total
b6tr1 = np.array([])
b6tr11 = np.append(b6tr1, b6tdf1)
#comparisonbar
ccur = qconn.cursor()
ccur.execute("SELECT * FROM `forecastoutputq` WHERE `Model`='"+value+"'")
csdata = ccur.fetchall()
cdata = pd.DataFrame(csdata)
ctdf=cdata['Date'].astype(str)
ctre11 = np.array([])
ctres11 = np.append(ctre11,ctdf)
c1tdf1=cdata[['RatioIncrease']] #ratioincrease
c1tr1 = np.array([])
c1tr11 = np.append(c1tr1, c1tdf1)
qcur.execute("SELECT * FROM `summaryerror`")
sdata = qcur.fetchall()
mape = pd.DataFrame(sdata)
qconn.close()
return render_template('qdashboard.html',mon=value,qulist=qst,mape=mape.to_html(index=False),say=1,pdata=ata,adata=adata.values,x_axis=index,frm=len(qdata)-1,to=k,x13=btres11,x14=ctres11,y13=b1tr11,y14=b2tr11,y15=b3tr11,y16=b4tr11,y17=b5tr11,y18=b6tr11,y19=c1tr11)
@app.route("/yearlysimulation",methods = ['GET','POST'])
def yearlysimulation():
if request.method == 'POST':
gdp=0
pi=0
ms=0
adv=0
gdp_dis=request.form.get('gdp_dis')
pi_dis=request.form.get('pi_dis')
ms_dis=request.form.get('ms_dis')
adv_dis=request.form.get('adv_dis')
min=request.form.get('min')
max=request.form.get('max')
mue=request.form.get('mue')
sig=request.form.get('sig')
cval=request.form.get('cval')
min1=request.form.get('min1')
max1=request.form.get('max1')
mue1=request.form.get('mue1')
sig1=request.form.get('sig1')
cval1=request.form.get('cval1')
min2=request.form.get('min2')
max2=request.form.get('max2')
mue2=request.form.get('mue2')
sig2=request.form.get('sig2')
cval2=request.form.get('cval2')
min3=request.form.get('min3')
max3=request.form.get('max3')
mue3=request.form.get('mue3')
sig3=request.form.get('sig3')
cval3=request.form.get('cval3')
itr= int(request.form.get('itr'))
frm = request.form.get('from')
sfrm=int(frm[:4])
to = request.form.get('to')
sto=int(to[:4])
kwargs={}
atrtable=[]
if request.form.get('gdp'):
gdp=1
atrtable.append('Gdp')
if gdp_dis == 'gdp_dis1':
min=request.form.get('min')
max=request.form.get('max')
kwargs['Gdp_dis']='Uniform'
kwargs['gdpvalues']=[min,max]
if gdp_dis == 'gdp_dis2':
mue=request.form.get('mue')
sig=request.form.get('sig')
kwargs['Gdp_dis']='Normal'
kwargs['gdpvalues']=[mue,sig]
if gdp_dis == 'gdp_dis3':
kwargs['Gdp_dis']='Random'
pass
if gdp_dis == 'gdp_dis4':
cval=request.form.get('cval')
kwargs['Gdp_dis']='Constant'
kwargs['gdpvalues']=[cval]
if request.form.get('pi'):
pi=1
atrtable.append('Pi')
if pi_dis == 'pi_dis1':
min1=request.form.get('min1')
max1=request.form.get('max1')
kwargs['Pi_dis']='Uniform'
kwargs['pivalues']=[min1,max1]
if pi_dis == 'pi_dis2':
mue1=request.form.get('mue1')
sig1=request.form.get('sig1')
kwargs['Pi_dis']='Normal'
kwargs['pivalues']=[mue1,sig1]
if pi_dis == 'pi_dis3':
kwargs['Pi_dis']='Random'
pass
if pi_dis == 'pi_dis4':
cval1=request.form.get('cval1')
kwargs['Pi_dis']='Constant'
kwargs['pivalues']=[cval1]
if request.form.get('ms'):
ms=1
atrtable.append('Ms')
if ms_dis == 'ms_dis1':
min=request.form.get('min2')
max=request.form.get('max2')
kwargs['Ms_dis']='Uniform'
kwargs['msvalues']=[min2,max2]
if ms_dis == 'ms_dis2':
mue=request.form.get('mue2')
sig=request.form.get('sig2')
kwargs['Ms_dis']='Normal'
kwargs['msvalues']=[mue2,sig2]
if ms_dis == 'ms_dis3':
kwargs['Ms_dis']='Random'
pass
if ms_dis == 'ms_dis4':
cval=request.form.get('cval2')
kwargs['Ms_dis']='Constant'
kwargs['msvalues']=[cval2]
if request.form.get('adv'):
adv=1
atrtable.append('Adv')
if adv_dis == 'adv_dis1':
min=request.form.get('min3')
max=request.form.get('max3')
kwargs['Adv_dis']='Uniform'
kwargs['advvalues']=[min3,max3]
if adv_dis == 'adv_dis2':
mue=request.form.get('mue3')
sig=request.form.get('sig3')
kwargs['Adv_dis']='Normal'
kwargs['advvalues']=[mue3,sig3]
if adv_dis == 'adv_dis3':
kwargs['Adv_dis']='Random'
pass
if adv_dis == 'adv_dis4':
cval=request.form.get('cval3')
kwargs['Adv_dis']='Constant'
kwargs['advvalues']=[cval3]
#print(kwargs)
#print(atrtable)
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS `stech` (`gdp` VARCHAR(1),`pi` VARCHAR(1), `ms` VARCHAR(1),`adv` VARCHAR(1),`itr` VARCHAR(5),`sfrm` VARCHAR(10),`sto` VARCHAR(10))")
cur.execute("DELETE FROM `stech`")
con.commit()
cur.execute("INSERT INTO `stech` VALUES('"+str(gdp)+"','"+str(pi)+"','"+str(ms)+"','"+str(adv)+"','"+str(itr)+"','"+str(sfrm)+"','"+str(sto)+"')")
con.commit()
data = pd.DataFrame(Yeardata)
#print(data)
data.columns
xvar=pd.concat([data['GDP'],data['Pi_Exports'],data['Market_Share'],data['Advertisement_Expense']],axis=1)
yvar=pd.DataFrame(data['TotalDemand'])
regr = linear_model.LinearRegression()
regr.fit(xvar,yvar)
# predict=regr.predict(xvar)
#Error Measures
def ME(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(y_true - y_pred)
#MAE
def MAE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs(y_true - y_pred))
#MAPE
def MAPE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_pred)) * 100
def sim(iteration,data,startyear,endyear,atrtable,Gdp_dis=None,gdpvalues=None,Adv_dis=None,advvalues=None,Ms_dis=None,msvalues=None,Pi_dis=None,pivalues=None):
preddata=pd.DataFrame()
simdata=pd.DataFrame()
#Errordf=pd.DataFrame()
Errormsr=pd.DataFrame()
date=pd.date_range(start=pd.datetime(startyear, 1, 1), end=pd.datetime(endyear+1, 1, 1),freq='A')
date=pd.DataFrame(date.strftime("%Y"))
#Fetching The Orignal Data Of Available Years of the Orignal Data That We Have Actually
m=len(date)
Arrayofdates=data['Date']
vari=[]
for var in Arrayofdates:
vari.append(var[:4])
Arrayofdates=pd.DataFrame(vari)
dates=[]
Fetchdata=[]
for i in range(0,m):
years=date.loc[i]
for j in range(0,len(Arrayofdates)):
if int(Arrayofdates.loc[j])==int(years):
da=data['TotalDemand'].loc[j]
Fetchdata.append(da) #Gives Data In the Given Range That we have actually
dates.extend(years) #Gives Years that we have data
for i in range(0,iteration):
df=pd.DataFrame()
#for The Gdp
S='flag'
for row in atrtable:
if row=='Gdp':
S='Gdp'
if S=='Gdp':
for row in Gdp_dis:
if row=='Normal':
gdpdf=pd.DataFrame(np.random.normal(gdpvalues[0],gdpvalues[1],m))
elif row=='Uniform':
gdpdf=pd.DataFrame(np.random.normal(gdpvalues[0],gdpvalues[1],m))
elif row=='Constant':
gdpdf=pd.DataFrame(np.random.choice([gdpvalues[0]],m))
else:
gdpdf=pd.DataFrame(np.random.uniform(-4,4,m))
else:
gdpdf=pd.DataFrame(np.random.uniform(0,0,m))
# for the pi dataframe
O='flag'
for row in atrtable:
if row=='Pi':
O='Pi'
if O=='Pi':
for row in Pi_dis:
if row=='Normal':
pidf=pd.DataFrame(np.random.normal(pivalues[0],pivalues[1],m))
elif row=='Uniform':
pidf=pd.DataFrame(np.random.normal(pivalues[0],pivalues[1],m))
elif row=='Constant':
pidf=pd.DataFrame(np.random.choice([pivalues[0]],m))
else:
pidf=pd.DataFrame(np.random.random_integers(80,120,m))
else:
pidf=pd.DataFrame(np.random.uniform(0,0,m))
#for the Adv Dataframe
N='flag'
for row in atrtable:
if row=='Adv':
N='Adv'
if N=='Adv':
for row in Adv_dis:
if row=='Normal':
advdf=pd.DataFrame(np.random.normal(advvalues[0],advvalues[1],m))
elif row=='Uniform':
advdf=pd.DataFrame(np.random.normal(advvalues[0],advvalues[1],m))
elif row=='Constant':
advdf=pd.DataFrame(np.random.choice([advvalues[0]],m))
else:
advdf=pd.DataFrame(np.random.random_integers(500000,1000000,m))
else:
advdf=pd.DataFrame(np.random.uniform(0,0,m))
#for the Ms dataframe
U='flag'
for row in atrtable:
if row=='Ms':
U='Ms'
if U=='Ms':
for row in Ms_dis:
if row=='Normal':
msdf=pd.DataFrame(np.random.normal(msvalues[0],msvalues[1],m))
elif row=='Uniform':
msdf=pd.DataFrame(np.random.normal(msvalues[0],msvalues[1],m))
elif row=='Constant':
msdf=pd.DataFrame(np.random.choice([msvalues[0]],m))
else:
msdf=pd.DataFrame(np.random.uniform(0.1,0.5,m))
else:
msdf=pd.DataFrame(np.random.uniform(0,0,m))
#Concatenating All the dataframes for Simulation Data
df=pd.concat([gdpdf,pidf,msdf,advdf],axis=1)
simid=pd.DataFrame(np.random.choice([i+1],m))
dd=pd.concat([simid,gdpdf,pidf,advdf,msdf],axis=1)
dd.columns=['Year','Gdp','Pi','Adv','Ms']
simdata=pd.concat([simdata,dd],axis=0)
#Predicting the Data And store in pred data through onhand Regression Method
dfs=pd.DataFrame(regr.predict(df))
datatable=pd.concat([simid,date,dfs],axis=1)
datatable.columns=['simid','Year','Total_Demand(Tonnes)']
preddata=pd.concat([datatable,preddata],axis=0)
datas=list()
#Geting Data With Respective Dates
# print(datatable)
for row in dates:
# print(dates)
datas.extend(datatable.loc[datatable['Year'] ==row, 'Total_Demand(Tonnes)'])
kkk=pd.DataFrame(datas)
me=ME(Fetchdata,kkk)
mae=MAE(Fetchdata,kkk)
mape=MAPE(Fetchdata,kkk)
dfe=pd.DataFrame([me,mae,mape],index=['ME','MAE','MAPE']).T
Errormsr=pd.concat([Errormsr,dfe],axis=0).reset_index(drop=True)
return preddata,simdata,Errormsr
preddata,simdata,Errormsr=sim(itr,data,sfrm,sto,atrtable,**kwargs)
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
preddata.to_sql(con=engine, name='predicteddata',index=False, if_exists='replace')
engine2 = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
simdata.to_sql(con=engine2, name='simulationdata',index=False, if_exists='replace')
con.commit()
engine3 = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Errormsr.to_sql(con=engine3, name='simerror',index=False, if_exists='replace')
con.commit()
cnr=con.cursor()
cnr.execute("SELECT * FROM `simerror`")
sdata = cnr.fetchall()
simerror = pd.DataFrame(sdata)
con.close()
return render_template('ysimulation.html',sayy=1,simerror=simerror.to_html(index=False))
return render_template('ysimulation.html')
##PROCURMENT PLANNING
@app.route('/procurementplanning')
def procurementplanning():
return render_template('vendorselection_criterianumberask.html')
@app.route("/criteriagenerate", methods=['GET','POST'])
def criteriagenerate():
if request.method == 'POST':
global cnmbr
global vnmbr
cnmbr = int(request.form['cnmbr'])
vnmbr = int(request.form['vnmbr'])
if cnmbr == 0 or vnmbr==0:
return render_template('criterianumberask.html',warning='Data Invalid')
cmainlist=[]
global cnames
cnames = []
for i in range (1,cnmbr+1):
lst=[]
name='cname'+str(i)
lst.append(i)
lst.append(name)
cmainlist.append(lst)
cnames.append(name)
vmainlist=[]
global vnames
vnames = []
for i in range (1,vnmbr+1):
lst=[]
name='vname'+str(i)
lst.append(i)
lst.append(name)
vmainlist.append(lst)
vnames.append(name)
return render_template('vendorselection_criteriagenerate.html',cmainlist=cmainlist,vmainlist=vmainlist)
return render_template('vendorselection_criterianumberask.html')
@app.route("/criteriagenerated", methods=['GET','POST'])
def criteriagenerated():
if request.method == 'POST':
global criterianames
criterianames=[]
for name in cnames:
criterianame = request.form[name]
criterianames.append(criterianame)
global vendornames
vendornames=[]
for name in vnames:
vendorname = request.form[name]
vendornames.append(vendorname)
mcrlst=[]
cn=len(criterianames)
k=1
global maincriteriaoption
maincriteriaoption=[]
global maincritriacri
maincritriacri=[]
for i in range(cn-1):
for j in range (i+1,cn):
cri='criteriaorder'+str(k)
opt='coption'+str(k)
crlst=[k,cri,criterianames[i],criterianames[j],opt]
mcrlst.append(crlst)
k=k+1
maincriteriaoption.append(opt)
maincritriacri.append(cri)
mvrlst=[]
vn=len(vendornames)
k=1
global mainvendoroption
mainvendoroption=[]
global mainvendorcri
mainvendorcri=[]
for z in criterianames:
mvrlst1=[]
vcri=[]
vopt=[]
for i in range(vn-1):
for j in range (i+1,vn):
cri='vendororder'+z+str(k)
opt='voption'+z+str(k)
vrlst=[k,cri,vendornames[i],vendornames[j],opt]
mvrlst1.append(vrlst)
k=k+1
vcri.append(cri)
vopt.append(opt)
mvrlst.append(mvrlst1)
mainvendorcri.append(vcri)
mainvendoroption.append(vopt)
return render_template('vendorselection_maincriteria.html',mcrlst=mcrlst,mvrlst=mvrlst)
return render_template('vendorselection_criteriagenerated.html')
def tablecreator(imp,val,crit):
n=len(imp)
for i in range(n):
if imp[i]==1:
val[i]=float(1/val[i])
fdata=pd.DataFrame(columns=[crit],index=[crit])
i=0
k=0
for index in fdata.index:
j=0
for columns in fdata.columns:
if i==j:
fdata[index][columns]=1
if i<j:
fdata[index][columns]=round((float(val[k])),2)
fdata[columns][index]=round((1/val[k]),2)
k=k+1
j=j+1
i=i+1
return fdata
@app.route("/criteriaread", methods=['GET','POST'])
def criteriaread():
if request.method == 'POST':
importances = []
values = []
for name1 in maincritriacri:
imp = int(request.form[name1])
importances.append(imp)
for name2 in maincriteriaoption:
val = int(request.form[name2])
values.append(val)
#global maincriteriadata
maincriteriadata=tablecreator(importances,values,criterianames)
mainimportances=[]
for crioption in mainvendorcri:
importance=[]
for option1 in crioption:
impc = int(request.form[option1])
importance.append(impc)
mainimportances.append(importance)
mainvalues=[]
for vendoroption in mainvendoroption:
vvalues=[]
for option2 in vendoroption:
valuev = int(request.form[option2])
vvalues.append(valuev)
mainvalues.append(vvalues)
maindf=[]
for z in range(len(criterianames)):
df=tablecreator(mainimportances[z],mainvalues[z],vendornames)
maindf.append(df)
dictmain={'crit':maincriteriadata}
names=criterianames
dfs=maindf
dictionary=dict((n,d) for (n,d) in zip(names,dfs))
def ahpmain(dictmain):
global wt_Crit
wt_Crit=[]
key=[]
key=list(dictmain.keys())
for i in key:
Crit=np.dot(dictmain[i],dictmain[i])
row_sum=[]
for j in range(len(Crit)):
row_sum.append(sum(Crit[j]))
wt_Crit.append([s/sum(row_sum) for s in row_sum])
Crit=[]
return wt_Crit
def ahp(dictmain,dictionary):
global output
main= ahpmain(dictmain)
submain= ahpmain(dictionary)
dd=pd.DataFrame(submain).T
df=pd.DataFrame(main).T
output=np.dot(dd,df)
return output,dd
yaxis,dd=ahp(dictmain,dictionary)
yax=pd.DataFrame(yaxis,index=vendornames,columns=['Score']).sort_values('Score',ascending=False).T
ynames=yax.columns
yval=yax.T.values
dd.index=vendornames
dd.columns=names
dd=dd.T
opq23=[]
for column in dd.columns:
opq21=[]
opq22=[]
opq21.append(column)
for val in dd[column]:
opq22.append(val)
opq21.append(opq22)
opq23.append(opq21)
return render_template('vendorselection_ahp_final_output.html',ynames=ynames,yval=yval,dd=opq23,names=names)
return render_template('vendorselection_criteriagenerated.html')
#DETERMINISTIC STARTS
@app.route("/spt")
def spt():
return render_template('SinglePeriod.html')
@app.route("/ppbreak")
def ppbreak():
return render_template('pbreak.html')
@app.route('/pbrk', methods=['GET','POST'])
def pbrk():
return render_template('pbrk.html')
@app.route('/eoq', methods=['GET','POST'])
def eoq():
##setUpCost::setting up cost prior(>>setUpCost;<<moving rate)
AnnulaUnitsDemand=100##purchase demand of product per year
FixedCost=500 ##cost fixed for the product
AnnHoldingcost=0.25 ##remaining goods cost
UnitCost=445 ##purchasing cost
LeadTime=10 ##time b/w initiation and completion of a production process.
SafetyStock=100##extra stock
if request.method == 'POST':
AnnulaUnitsDemand= request.form['AnnulaUnitsDemand']
FixedCost=request.form['FixedCost']
AnnHoldingcost=request.form['AnnHoldingcost']
UnitCost=request.form['UnitCost']
LeadTime=request.form['LeadTime']
SafetyStock=request.form['SafetyStock']
AnnulaUnitsDemand=float(AnnulaUnitsDemand)
FixedCost=float(FixedCost)
AnnHoldingcost=float(AnnHoldingcost)
UnitCost=float(UnitCost)
LeadTime=float(LeadTime)
SafetyStock=float(SafetyStock)
sgap=1
pgap=1
HoldingCost=AnnHoldingcost*UnitCost
EOQ=round((math.sqrt((2*AnnulaUnitsDemand*FixedCost)/(HoldingCost*pgap))*sgap),2)
REOQ=round((math.sqrt((2*AnnulaUnitsDemand*FixedCost)/(HoldingCost*pgap))*sgap),0)
totOrderCost=round((FixedCost*AnnulaUnitsDemand/EOQ),2)
totHoldCost=round(((HoldingCost*EOQ*pgap)/2),2)
TotalCost=round((totOrderCost+totHoldCost),2)
NumOrders=round((AnnulaUnitsDemand/EOQ),2)
OrderTime=round((365/NumOrders),2)
ReorderPoint=round((((AnnulaUnitsDemand/365)*LeadTime)+SafetyStock),0)
count=round((EOQ*.75),0)
qtylist1=[]
hclist=[]
sclist=[]
mtlist=[]
tclist=[]
while (count < EOQ):
qtylist1.append(count)
hclist.append(round((count/2*HoldingCost),2))
sclist.append(round((AnnulaUnitsDemand/count*FixedCost),2))
mtlist.append(round((AnnulaUnitsDemand*UnitCost),2))
tclist.append(round((count/2*HoldingCost+AnnulaUnitsDemand/count*FixedCost),2))
count +=2
qtylist1.append(EOQ)
hclist.append(totHoldCost)
sclist.append(totOrderCost)
tclist.append(totHoldCost+totOrderCost)
while (count < (EOQ*2)):
qtylist1.append(count)
hclist.append(round((count/2*HoldingCost),2))
sclist.append(round((AnnulaUnitsDemand/count*FixedCost),2))
mtlist.append(round((AnnulaUnitsDemand*UnitCost),2))
tclist.append(round((count/2*HoldingCost+AnnulaUnitsDemand/count*FixedCost),2))
count +=2
val=0
for i in range(len(tclist)):
if(EOQ==qtylist1[i]):
val=i
# sstock=int(math.sqrt((LeadTime^2)+(int(ReorderPoint)^2)))
return render_template('eoq.html',NumOrders=NumOrders,OrderTime=OrderTime,
ReorderPoint=ReorderPoint,HoldCost=totHoldCost,TotalCost=TotalCost,
EOQ=EOQ,REOQ=REOQ,
sclist=sclist,hclist=hclist,tclist=tclist,val=val,qtylist1=qtylist1,
AnnulaUnitsDemand=AnnulaUnitsDemand,FixedCost=FixedCost,
AnnHoldingcost=AnnHoldingcost,UnitCost=UnitCost,LeadTime=LeadTime,
SafetyStock=SafetyStock)
########################EEEEppppppppppQQQQQQ############
########################EEEEppppppppppQQQQQQ############
@app.route('/eproduction', methods=['GET','POST'])
def eproduction():
AnnulaUnitsDemand=100
Prodrate=125
FixedCost=500
AnnHoldingcost=0.1
UnitCost=25000
LeadTime=10
SafetyStock=100
if request.method == 'POST':
AnnulaUnitsDemand= request.form['AnnulaUnitsDemand']
Prodrate=request.form['Prodrate']
FixedCost=request.form['FixedCost']
AnnHoldingcost=request.form['AnnHoldingcost']
UnitCost=request.form['UnitCost']
LeadTime=request.form['LeadTime']
SafetyStock=request.form['SafetyStock']
AnnulaUnitsDemand=int(AnnulaUnitsDemand)
Prodrate=int(Prodrate)
FixedCost=int(FixedCost)
AnnHoldingcost=float(AnnHoldingcost)
UnitCost=int(UnitCost)
LeadTime=int(LeadTime)
SafetyStock=int(SafetyStock)
if(Prodrate<=AnnulaUnitsDemand):
return render_template('eproduction.html',warning='Production date should not be least than Annual Demand',
AnnulaUnitsDemand=AnnulaUnitsDemand,FixedCost=FixedCost,
AnnHoldingcost=AnnHoldingcost,UnitCost=UnitCost,Prodrate=Prodrate,
LeadTime=LeadTime,SafetyStock=SafetyStock
)
pgap=round((1-(AnnulaUnitsDemand/Prodrate)),2)
HoldingCost=float(AnnHoldingcost*UnitCost)
EOQ=round((math.sqrt((2*AnnulaUnitsDemand*FixedCost)/(HoldingCost*pgap))),2)
REOQ=round((math.sqrt((2*AnnulaUnitsDemand*FixedCost)/(HoldingCost*pgap))),0)
totOrderCost=round((FixedCost*AnnulaUnitsDemand/EOQ),2)
totHoldCost=round(((HoldingCost*EOQ*pgap)/2),2)
TotalCost=round((totOrderCost+totHoldCost),2)
NumOrders=round((AnnulaUnitsDemand/EOQ),2)
OrderTime=round((365/NumOrders),2)
ReorderPoint=round((((AnnulaUnitsDemand/365)*LeadTime)+SafetyStock),0)
count=EOQ*.75
qtylist1=[]
hclist=[]
sclist=[]
mtlist=[]
tclist=[]
while (count < EOQ):
qtylist1.append(int(count))
hclist.append(round((count/2*HoldingCost*pgap),2))
sclist.append(round((AnnulaUnitsDemand/count*FixedCost),2))
mtlist.append(round((AnnulaUnitsDemand*UnitCost),2))
tclist.append(round(((count/2*HoldingCost*pgap+AnnulaUnitsDemand/count*FixedCost)),2))
count +=2
qtylist1.append(EOQ)
hclist.append(totHoldCost)
sclist.append(totOrderCost)
tclist.append(totOrderCost+totHoldCost)
while (count < (EOQ*1.7)):
qtylist1.append(int(count))
hclist.append(round((count/2*HoldingCost*pgap),2))
sclist.append(round((AnnulaUnitsDemand/count*FixedCost),2))
mtlist.append(round((AnnulaUnitsDemand*UnitCost),2))
tclist.append(round(((count/2*HoldingCost*pgap+AnnulaUnitsDemand/count*FixedCost)),2))
count +=2
val=0
for i in range(len(tclist)):
if(EOQ==qtylist1[i]):
val=i
return render_template('eproduction.html',NumOrders=NumOrders,OrderTime=OrderTime,
ReorderPoint=ReorderPoint,HoldCost=totHoldCost,TotalCost=TotalCost,
EOQ=EOQ,REOQ=REOQ,
sclist=sclist,hclist=hclist,tclist=tclist,val=val,qtylist1=qtylist1,
AnnulaUnitsDemand=AnnulaUnitsDemand,FixedCost=FixedCost,
AnnHoldingcost=AnnHoldingcost,UnitCost=UnitCost,Prodrate=Prodrate,
LeadTime=LeadTime,SafetyStock=SafetyStock
)
######################EEEEppppppppppQQQQQQ############
######################EEEEppppppppppQQQQQQ############
@app.route('/eoq_backorders', methods=['GET','POST'])
def eoq_backorders():
AnnulaUnitsDemand=12000
shortcost=1.1
FixedCost=8000
AnnHoldingcost=0.3
UnitCost=1
LeadTime=10
SafetyStock=100
if request.method == 'POST':
AnnulaUnitsDemand= request.form['AnnulaUnitsDemand']
shortcost=request.form['shortcost']
FixedCost=request.form['FixedCost']
AnnHoldingcost=request.form['AnnHoldingcost']
UnitCost=request.form['UnitCost']
LeadTime=request.form['LeadTime']
SafetyStock=request.form['SafetyStock']
AnnulaUnitsDemand=int(AnnulaUnitsDemand)
shortcost=int(shortcost)
FixedCost=int(FixedCost)
AnnHoldingcost=float(AnnHoldingcost)
UnitCost=int(UnitCost)
LeadTime=int(LeadTime)
SafetyStock=int(SafetyStock)
HoldingCost=float(AnnHoldingcost*UnitCost)
sgap=(shortcost+HoldingCost)/shortcost
EOQ=round((math.sqrt((2*AnnulaUnitsDemand*FixedCost)/HoldingCost))*(math.sqrt(sgap)),2)
REOQ=round(math.sqrt((2*AnnulaUnitsDemand*FixedCost)/(HoldingCost)*sgap),0)
totbackorder=EOQ*(HoldingCost/(shortcost+HoldingCost))
totOrderCost=round(((FixedCost*AnnulaUnitsDemand)/EOQ),2)
totHoldCost=round(((HoldingCost*((EOQ-totbackorder)**2))/(2*EOQ)),2)
totshortcost=round((shortcost*(totbackorder**2)/(2*EOQ)),2)
TotalCost=round((totOrderCost+totHoldCost+totshortcost),2)
NumOrders=round((AnnulaUnitsDemand/EOQ),2)
OrderTime=round((365/NumOrders),2)
ReorderPoint=round((((AnnulaUnitsDemand/365)*LeadTime)+SafetyStock),0)
count= EOQ*.75
qtylist1=[]
hclist=[]
sclist=[]
mtlist=[]
shlist=[]
tclist=[]
while (count < EOQ):
qtylist1.append(int((count)))
hclist.append(round(((HoldingCost*((count-totbackorder)**2))/(2*count)),2))
sclist.append(round((AnnulaUnitsDemand/count*FixedCost),2))
mtlist.append(round((AnnulaUnitsDemand*UnitCost),2))
shlist.append(round((shortcost*((totbackorder)**2)/(2*count)),2))
tclist.append(round(((((HoldingCost*((count-totbackorder)**2))/(2*count))+AnnulaUnitsDemand/count*FixedCost)+shortcost*((totbackorder)**2)/(2*count)),2))
count +=2
qtylist1.append(EOQ)
hclist.append(totHoldCost)
sclist.append(totOrderCost)
shlist.append(totshortcost)
tclist.append(totOrderCost+totshortcost+totHoldCost)
while (count < (EOQ*1.7)):
qtylist1.append(int((count)))
hclist.append(round(((HoldingCost*((count-totbackorder)**2))/(2*count)),2))
sclist.append(round((AnnulaUnitsDemand/count*FixedCost),2))
mtlist.append(round((AnnulaUnitsDemand*UnitCost),2))
shlist.append(round((shortcost*((totbackorder)**2)/(2*count)),2))
tclist.append(round(((((HoldingCost*((count-totbackorder)**2))/(2*count))+AnnulaUnitsDemand/count*FixedCost)+shortcost*((totbackorder)**2)/(2*count)),2))
count +=2
val=0
for i in range(len(tclist)):
if(EOQ==qtylist1[i]):
val=i
return render_template('eoq_backorders.html',NumOrders=NumOrders,OrderTime=OrderTime,
ReorderPoint=ReorderPoint,HoldCost=totHoldCost,TotalCost=TotalCost,
EOQ=EOQ,REOQ=REOQ,
shlist=shlist,sclist=sclist,hclist=hclist,tclist=tclist,val=val,qtylist1=qtylist1,
AnnulaUnitsDemand=AnnulaUnitsDemand,FixedCost=FixedCost,
AnnHoldingcost=AnnHoldingcost,UnitCost=UnitCost,shortcost=shortcost,
LeadTime=LeadTime,SafetyStock=SafetyStock)
#################pbreak######################
@app.route("/pbreak_insert", methods=['GET','POST'])
def pbreak_insert():
if request.method == 'POST':
quantity = request.form.getlist("quantity[]")
price = request.form.getlist("price[]")
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_classification',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
curr = conn.cursor()
curr.execute("CREATE TABLE IF NOT EXISTS `pbreaktable` (quantity int(8),price int(8))")
curr.execute("DELETE FROM `pbreaktable`")
conn.commit()
say=1
for i in range(len(quantity)):
quantity_clean = quantity[i]
price_clean = price[i]
if quantity_clean and price_clean:
curr.execute("INSERT INTO `pbreaktable`(`quantity`,`price`) VALUES('"+quantity_clean+"','"+price_clean+"')")
conn.commit()
else:
say=0
if say==0:
message="Some values were not inserted!"
else:
message="All values were inserted!"
return(message)
@app.route('/view', methods=['GET','POST'])
def view():
if request.method == 'POST':
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_classification',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
curr = conn.cursor()
curr.execute("SELECT * FROM `pbreaktable`")
res = curr.fetchall()
ress=pd.DataFrame(res)
return render_template('pbrk.html',username=username,ress =ress.to_html())
@app.route('/pbreakcalculate', methods=['GET','POST'])
def pbreakcalculate():
AnnulaUnitsDemand=10
FixedCost=1
AnnHoldingcost=0.1
UnitCost=445
LeadTime=10
SafetyStock=100
if request.method == 'POST':
if request.form['AnnulaUnitsDemand']:
AnnulaUnitsDemand= request.form['AnnulaUnitsDemand']
AnnulaUnitsDemand=float(AnnulaUnitsDemand)
if request.form['FixedCost']:
FixedCost=request.form['FixedCost']
FixedCost=float(FixedCost)
if request.form['AnnHoldingcost']:
AnnHoldingcost=request.form['AnnHoldingcost']
AnnHoldingcost=float(AnnHoldingcost)
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_classification',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
curr = conn.cursor()
curr.execute("SELECT * FROM `pbreaktable`")
res = curr.fetchall()
ress=pd.DataFrame(res)
conn.close()
datatable=pd.DataFrame(columns=['Quantity','Price','EOQ','TotalCost'])
mainlist=[]
Qu=ress['quantity']
Qm=0
for index, i in ress.iterrows():
tcl=[]
quantity = i['quantity']
price = i['price']
HoldingCost1=AnnHoldingcost*price
eoq1=round((math.sqrt((2*AnnulaUnitsDemand*FixedCost)/(HoldingCost1))),2)
REOQ=round(eoq1,0)
totOrderCost1=round((FixedCost*AnnulaUnitsDemand/eoq1),2)
totHoldCost1=round(((HoldingCost1*eoq1)/2),2)
totalcost1=float(round((totOrderCost1+totHoldCost1),2))
lst=[quantity,price,eoq1,totalcost1]
a=pd.DataFrame(lst).T
a.columns=['Quantity','Price','EOQ','TotalCost']
datatable=pd.concat([datatable,a],ignore_index=True)
name='TotalCost (Price='+str(a['Price'][0])+')'
tcl.append(name)
Qmin=1
Qmax=Qu[Qm]
qtylist2=[]
tclist1=[]
while (Qmin < Qmax):
qtylist2.append(Qmin)
tclist1.append(round((Qmin/2*totHoldCost1+AnnulaUnitsDemand/Qmin*FixedCost),2))
Qmin +=2
Qmin=Qmax+1
qtylist2.append(eoq1)
tclist1.append(totalcost1)
tcl.append(tclist1)
mainlist.append(tcl)
Eu=datatable['EOQ']
Qu=datatable['Quantity']
Tu=datatable['TotalCost']
minlst=[]
for i in range(len(Eu)):
if i ==0:
if Eu[i]<=Qu[i]:
minlst.append(i)
else:
if Eu[i]<=Qu[i] and Eu[i]>Qu[i-1]:
minlst.append(i)
if len(minlst)==0:
minnval='Solution not feasible'
else:
minval=Tu[minlst[0]]
minnval=Eu[minlst[0]]
for j in minlst:
if Tu[j]<minval:
minval=Tu[j]
minnval=Eu[j]
val1=0
for i in range(len(tclist1)):
if (round(minnval))==qtylist2[i]:
val1=i
minival=round(minval)
minnival=round(minnval)
NumOrders=round((AnnulaUnitsDemand/minnval),2)
OrderTime=round((365/NumOrders),2)
ReorderPoint=round((((AnnulaUnitsDemand/365)*LeadTime)+SafetyStock),0)
return render_template('pbreak.html',
NumOrders=NumOrders,OrderTime=OrderTime,REOQ=REOQ,ReorderPoint=ReorderPoint,
AnnulaUnitsDemand=AnnulaUnitsDemand,FixedCost=FixedCost,
AnnHoldingcost=AnnHoldingcost,UnitCost=UnitCost,LeadTime=LeadTime,
SafetyStock=SafetyStock,minnval=minnval,minval=minval,minival=minival,minnival=minnival,
datatable=datatable.to_html(index=False),mainlist=mainlist,
val1=val1,tclist1=tclist1,qtylist2=qtylist2)
#################Demand problalstic######################
@app.route('/demand', methods=['GET', 'POST'])
def demand():
cost=10
price=12
salvage=2
if request.method == 'POST':
cost=request.form['cost']
price=request.form['price']
salvage=request.form['salvage']
cost=int(cost)
price=int(price)
salvage=int(salvage)
data=pd.read_csv(localpath+"\\Demand.csv")
data = pd.DataFrame(data)
cdf=[]
sum=0
for row in data['Prob']:
sum=sum+row
cdf.append(sum)
cumm_freq=(pd.DataFrame(cdf)).values##y-axis
overcost=cost-salvage
undercost=price-cost
CSl=undercost/(undercost+overcost)
k=[row>CSl for row in cumm_freq]
count=1
for row in k:
if row==False:
count=count+1
demand=(data['Demand']).values
w=data['Demand'].loc[count]##line across x-axis
val=0
for i in range(len(cumm_freq)):
if(w==demand[i]):
val=i
return render_template('demand.html',cost=cost,price=price,salvage=salvage,
cumm_freq=cumm_freq,demand=demand,val=val)
@app.route('/normal', methods=['GET', 'POST'])
def normal():
cost=10
price=12
salvage=9
sd=2
if request.method == 'POST':
cost=request.form['cost']
price=request.form['price']
salvage=request.form['salvage']
cost=int(cost)
price=int(price)
salvage=int(salvage)
data=pd.read_csv(localpath+"\\Demand.csv")
data = pd.DataFrame(data)
overcost1=cost-salvage
undercost1=price-cost
CSl=undercost1/(undercost1+overcost1)
zz=st.norm.ppf(CSl)##x-line
z=float(format(zz, '.2f'))
# Expecteddemand=round(mea+(z*sd))
mean = 0; sd = 1; variance = np.square(sd)
x = np.arange(-4,4,.01)##x-axis
f =(np.exp(-np.square(x-mean)/2*variance)/(np.sqrt(2*np.pi*variance)))##y-axis
val=0
for i in range(len(f)):
if(z==round((x[i]),2)):
val=i
return render_template('normal.html',x=x,f=f,val=val,cost=cost,price=price,salvage=salvage)
@app.route('/utype', methods=['GET','POST'])
def utype():
cost=10
price=12
salvage=2
mini=1
maxi=10
if request.method == 'POST':
cost=request.form['cost']
price=request.form['price']
salvage=request.form['salvage']
mini=request.form['mini']
maxi=request.form['maxi']
cost=int(cost)
price=int(price)
salvage=int(salvage)
mini=int(mini)
maxi=int(maxi)
data=pd.read_csv(localpath+"\\Demand.csv")
data = pd.DataFrame(data)
overcost=cost-salvage
undercost=price-cost
CSl=undercost/(undercost+overcost)
expdemand1=round(mini+((maxi-mini)*CSl))
# a=[mini,0]
# b=[mini,100]
# c=[maxi,0]
# d=[maxi,100]
# width = c[0] - b[0]
# height = d[1] - a[1]
lims = np.arange(0,maxi,1)
val=0
for i in range(len(lims)):
if(expdemand1==lims[i]):
val=i
return render_template('utype.html',x=lims,f=lims,val=val,cost=cost,price=price,salvage=salvage,mini=mini,maxi=maxi)
@app.route('/outputx', methods=['GET', 'POST'])
def outputx():
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_classification',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("SELECT * FROM `abc`")
all_data = cur.fetchall()
all_data = pd.DataFrame(all_data)
A_ccat=.8
B_ccat=.95
A_ucat=.1
B_ucat=.25
tot_cost=all_data['Cost'].sum()
tot_usage=all_data['Annual Usage'].sum()
all_data['perc_cost']=all_data['Cost']/tot_cost
all_data['perc_usage']=all_data['Annual Usage']/tot_usage
all_data.sort_values(by=['perc_cost'], inplace=True, ascending=False)
sort_data=all_data.reset_index()
sort_data['cum_cperc']=np.nan
sort_data['cum_uperc']=np.nan
sort_data['Class']=''
for i in range(len(sort_data)):
if(i==0):
sort_data.set_value(i, 'cum_cperc', sort_data['perc_cost'][i])
sort_data.set_value(i, 'cum_uperc', sort_data['perc_usage'][i])
# cperc_data.append(all_data['perc_cost'][i])
sort_data.set_value(i,'Class','A')
else:
sort_data.set_value(i, 'cum_cperc', sort_data['perc_cost'][i]+sort_data['cum_cperc'][i-1])
sort_data.set_value(i, 'cum_uperc', sort_data['perc_usage'][i]+sort_data['cum_uperc'][i-1])
if(sort_data['cum_cperc'][i]<=A_ccat and sort_data['cum_uperc'][i]<=A_ucat):
sort_data.set_value(i,'Class','A')
elif(sort_data['cum_cperc'][i]<=B_ccat and sort_data['cum_uperc'][i]<=B_ucat):
sort_data.set_value(i,'Class','B')
else:
sort_data.set_value(i,'Class','C')
x7=sort_data[['cum_cperc']]
x1=x7*100
x3=np.round(x1)
x2=np.array([])
x5 = np.append(x2,x3)
y7= sort_data[['cum_uperc']]
y1=y7*100
y3=np.round(y1)
y2=np.array([])
y5 = np.append(y2,y3)
###############% of Total cost//
a= sort_data[(sort_data['Class']=='A')][['perc_cost']]
j=a.sum()
k=j*100
pd.DataFrame(k)
kf=k[0]
b= sort_data[(sort_data['Class']=='B')][['perc_cost']]
n=b.sum()
m=n*100
pd.DataFrame(m)
mf=m[0]
c= sort_data[(sort_data['Class']=='C')][['perc_cost']]
o=c.sum()
p=o*100
pd.DataFrame(p)
pf=p[0]
tes=k,m,p
t2 = np.array([])
te2 = np.append(t2,tes)
###################Items // Annual Usage
# z=sort_data[['Product number']]
# z1=z.sum()
f= sort_data[(sort_data['Class']=='A')][['Product number']]
v=f.sum()
pd.DataFrame(v)
vif=v[0]
f1= sort_data[(sort_data['Class']=='B')][['Product number']]
u=f1.sum()
pd.DataFrame(u)
uif=u[0]
f2= sort_data[(sort_data['Class']=='C')][['Product number']]
vf=f2.sum()
pd.DataFrame(vf)
kif=vf[0]
#################% of Total units // Annual Usage
t= sort_data[(sort_data['Class']=='A')][['perc_usage']]
i=t.sum()
p1=i*100
pd.DataFrame(p1)
nf=p1[0]
l= sort_data[(sort_data['Class']=='B')][['perc_usage']]
t=l.sum()
q1=t*100
pd.DataFrame(q1)
qf=q1[0]
u= sort_data[(sort_data['Class']=='C')][['perc_usage']]
w=u.sum()
s1=w*100
pd.DataFrame(s1)
sf=s1[0]
test=p1,q1,s1
tt2 = np.array([])
tte2 = np.append(tt2,test)
#############values//Cost*Annual Usage
sort_data['Value'] = sort_data['Cost'] * sort_data['Annual Usage']
fz= sort_data[(sort_data['Class']=='A')][['Value']]
vz=fz.sum()
pd.DataFrame(vz)
vzz=vz[0]
fz1= sort_data[(sort_data['Class']=='B')][['Value']]
uz=fz1.sum()
pd.DataFrame(uz)
uzf=uz[0]
fz2= sort_data[(sort_data['Class']=='C')][['Value']]
vzf=fz2.sum()
pd.DataFrame(vzf)
kzf=vzf[0]
h=[{'Scenario':'A','Values':vzz,'product number':vif,'perc_usage':nf,'perc_cost ':kf},
{'Scenario':'B','Values':uzf,'product number':uif,'perc_usage':qf,'perc_cost ':mf},
{'Scenario':'C','Values':kzf,'product number':kif,'perc_usage':sf,'perc_cost ':pf}]
df = pd.DataFrame(h)
lo=sort_data[['Product Description','Product number','Cost','Annual Usage','Class']]
cur = conn.cursor()
cur.execute("SELECT * FROM `abc1`")
all_data4 = cur.fetchall()
all_data4 = pd.DataFrame(all_data4)
lolz=all_data4[['Product number','Product Description','Cost','Annual Usage','Average Stay','Average Consumption','Criticality']]
######################FFFFFFFFSSSSSSSSSNNNNNNNNNNNN#########################
######################FFFFFFFFSSSSSSSSSNNNNNNNNNNNN#########################
######################FFFFFFFFSSSSSSSSSNNNNNNNNNNNN#########################
curr = conn.cursor()
curr.execute("SELECT * FROM `fsn`")
all_data1 = curr.fetchall()
all_data1 = pd.DataFrame(all_data1)
F_cat=.2
S_cat=.5
tot_stay=all_data1['Average Stay'].sum()
tot_consupt=all_data1['Average Consumption'].sum()
all_data1['perc_stay']=all_data1['Average Stay']/tot_stay
all_data1['perc_cons']=all_data1['Average Consumption']/tot_consupt
all_data1.sort_values(by=['perc_stay'], inplace=True, ascending=True)
sort_data1=all_data1.reset_index()
sort_data1['cum_stay']=np.nan
sort_data1['cum_cons']=np.nan
sort_data1['Class']=''
for i in range(len(sort_data1)):
if(i==0):
sort_data1.set_value(i, 'cum_stay', sort_data1['perc_stay'][i])
sort_data1.set_value(i, 'cum_cons', sort_data1['perc_cons'][i])
sort_data1.set_value(i,'Class','F')
else:
sort_data1.set_value(i, 'cum_stay', sort_data1['perc_stay'][i]+sort_data1['cum_stay'][i-1])
sort_data1.set_value(i, 'cum_cons', sort_data1['perc_cons'][i]+sort_data1['cum_cons'][i-1])
if(sort_data1['cum_stay'][i]<=F_cat) :
sort_data1.set_value(i,'Class','F')
elif(sort_data1['cum_stay'][i]<=S_cat):
sort_data1.set_value(i,'Class','S')
else:
sort_data1.set_value(i,'Class','N')
x71=sort_data1[['cum_stay']]
x11=x71*100
x31=np.round(x11)
x21=np.array([])
x51 = np.append(x21,x31)
y71= sort_data1[['cum_cons']]
y11=y71*100
y31=np.round(y11)
y21=np.array([])
y51 = np.append(y21,y31)
###############% of Total cost//
a1= sort_data1[(sort_data1['Class']=='F')][['perc_stay']]
j1=a1.sum()
k1=j1*100
pd.DataFrame(k1)
kf1=k1[0]
b1= sort_data1[(sort_data1['Class']=='S')][['perc_stay']]
n1=b1.sum()
m1=n1*100
pd.DataFrame(m1)
mf1=m1[0]
c1= sort_data1[(sort_data1['Class']=='N')][['perc_stay']]
o1=c1.sum()
p1=o1*100
pd.DataFrame(p1)
pf1=p1[0]
tes1=k1,m1,p1
t21 = np.array([])
te21 = np.append(t21,tes1)
###################Items // Annual Usage
# z=sort_data[['Product number']]
# z1=z.sum()
f1= sort_data1[(sort_data1['Class']=='F')][['Product number']]
v1=f1.sum()
pd.DataFrame(v1)
vif1=v1[0]
f11= sort_data1[(sort_data1['Class']=='S')][['Product number']]
u1=f11.sum()
pd.DataFrame(u1)
uif1=u1[0]
f21= sort_data1[(sort_data1['Class']=='N')][['Product number']]
vf1=f21.sum()
pd.DataFrame(vf1)
kif1=vf1[0]
#################% of Total units // Annual Usage
t1= sort_data1[(sort_data1['Class']=='F')][['perc_cons']]
i1=t1.sum()
p11=i1*100
pd.DataFrame(p11)
nf1=p11[0]
l1= sort_data1[(sort_data1['Class']=='S')][['perc_cons']]
t1=l1.sum()
q11=t1*100
pd.DataFrame(q11)
qf1=q11[0]
u1= sort_data1[(sort_data1['Class']=='N')][['perc_cons']]
w1=u1.sum()
s11=w1*100
pd.DataFrame(s11)
sf1=s11[0]
test1=p11,q11,s11
tt21 = np.array([])
tte21 = np.append(tt21,test1)
#############values//Cost*Annual Usage
sort_data1['Value'] = sort_data1['Average Stay'] * sort_data1['Average Consumption']
fz1= sort_data1[(sort_data1['Class']=='F')][['Value']]
vz1=fz1.sum()
pd.DataFrame(vz1)
vzz1=vz1[0]
fz11= sort_data1[(sort_data1['Class']=='S')][['Value']]
uz1=fz11.sum()
pd.DataFrame(uz1)
uzf1=uz1[0]
fz21= sort_data1[(sort_data1['Class']=='N')][['Value']]
vzf1=fz21.sum()
pd.DataFrame(vzf1)
kzf1=vzf1[0]
h1=[{'Scenario':'F','Values':vzz1,'product number':vif1,'perc_cons':nf1,'perc_stay ':kf1},
{'Scenario':'S','Values':uzf1,'product number':uif1,'perc_cons':qf1,'perc_stay ':mf1},
{'Scenario':'N','Values':kzf1,'product number':kif1,'perc_cons':sf1,'perc_stay ':pf1}]
df1 = pd.DataFrame(h1)
lo1=sort_data1[['Product Description','Product number','perc_stay','perc_cons','Class']]
##############VVVVVVVVVEEEEEEEEEEEEDDDDDDDDD#########
##############VVVVVVVVVEEEEEEEEEEEEDDDDDDDDD#########
cur1 = conn.cursor()
cur1.execute("SELECT * FROM `ved`")
all_data2 = cur1.fetchall()
all_data2 = pd.DataFrame(all_data2)
all_data2['values']=all_data2['Class'] + all_data2["Criticality"]
AV= all_data2[(all_data2['values']=='AV')]
AV=AV.index.max()
AE= all_data2[(all_data2['values']=='AE')]
AE= AE.index.max()
AE=np.nan_to_num(AE)
AD= all_data2[(all_data2['values']=='AD')]
AD=AD.index.max()
AD=np.nan_to_num(AD)
BV=all_data2[(all_data2['values']=='BV')]
BV=BV.index.max()
BE=all_data2[(all_data2['values']=='BE')]
BE=BE.index.max()
BD=all_data2[(all_data2['values']=='BD')]
BD=BD.index.max()
BD=np.nan_to_num(BD)
CV=all_data2[(all_data2['values']=='CV')]
CV=CV.index.max()
CV=np.nan_to_num(CV)
CE=all_data2[(all_data2['values']=='CE')]
CE=CE.index.max()
CD=all_data2[(all_data2['values']=='CD')]
CD=CD.index.max()
###############################################
xx71=all_data2[['cum_cperc']]
xx71=xx71.astype(float)
xx11=xx71*100
xx31=xx11.round()
xx21=np.array([])
xx51 = np.append(xx21,xx31)
yy71= all_data2[['cum_uperc']]
yy71=yy71.astype(float)
yy11=yy71*100
yy31=yy11.round(0)
yy21=np.array([])
yy51 = np.append(yy21,yy31)
###############% of Total cost//
aa= all_data2[(all_data2['Criticality']=='V')][['perc_cost']]
jj=aa.sum()
kk=jj*100
#k=pd.DataFrame(k)
kkf=kk[0]
bb= all_data2[(all_data2['Criticality']=='E')][['perc_cost']]
nn=bb.sum()
mm=nn*100
# m=pd.DataFrame(m)
mmf=mm[0]
cc= all_data2[(all_data2['Criticality']=='D')][['perc_cost']]
oo=cc.sum()
pp=oo*100
# p=pd.DataFrame(p)
ppf=pp[0]
ttes=[kk,mm,pp]
ttes=pd.concat(ttes)
th2 = np.array([])
the2 = np.append(th2,ttes)
###################Items // Annual Usage
# z=sort_data[['Product number']]
# z1=z.sum()
ff= all_data2[(all_data2['Criticality']=='V')][['Product number']]
vv=ff.sum()
pd.DataFrame(vv)
vvif=vv[0]
ff1= all_data2[(all_data2['Criticality']=='E')][['Product number']]
uu=ff1.sum()
pd.DataFrame(uu)
uuif=uu[0]
ff2= all_data2[(all_data2['Criticality']=='D')][['Product number']]
vvf=ff2.sum()
pd.DataFrame(vvf)
kkif=vvf[0]
#################% of Total units // Annual Usage
tt= all_data2[(all_data2['Criticality']=='V')][['perc_usage']]
ii=tt.sum()
pp1=ii*100
pd.DataFrame(pp1)
nnf=pp1[0]
ll= all_data2[(all_data2['Criticality']=='E')][['perc_usage']]
tq=ll.sum()
qq1=tq*100
pd.DataFrame(qq1)
qqf=qq1[0]
uw= all_data2[(all_data2['Criticality']=='D')][['perc_usage']]
wu=uw.sum()
sc1=wu*100
pd.DataFrame(sc1)
ssf=sc1[0]
testt=[pp1,qq1,sc1]
testt=pd.concat(testt)
ttt2 = np.array([])
ttte2 = np.append(ttt2,testt)
#############values//Cost*Annual Usage
all_data2['Value'] = all_data2['Cost'] * all_data2['Annual Usage']
fzz= all_data2[(all_data2['Criticality']=='V')][['Value']]
vzz=fzz.sum()
pd.DataFrame(vzz)
vzzz=vzz[0]
fzz1= all_data2[(all_data2['Criticality']=='E')][['Value']]
uzz=fzz1.sum()
pd.DataFrame(uzz)
uzzf=uzz[0]
fzz2= all_data2[(all_data2['Criticality']=='D')][['Value']]
vzzf=fzz2.sum()
pd.DataFrame(vzzf)
kzzf=vzzf[0]
hh=[{'Scenario':'V','Values':vzzz,'product number':vvif,'perc_usage':nnf,'perc_cost ':kkf},
{'Scenario':'E','Values':uzzf,'product number':uuif,'perc_usage':qqf,'perc_cost ':mmf},
{'Scenario':'D','Values':kzzf,'product number':kkif,'perc_usage':ssf,'perc_cost ':ppf}]
dff = pd.DataFrame(hh)
return render_template('inventoryclassification.html',
x=y5,y=x5,
barcost=te2 ,barusage=tte21,
s=df.to_html(index=False),
sam=lo.to_html(index=False),
tale=lolz.to_html(index=False),
x1=x51,y1=y51,
bar1=te21 ,bar2=tte2,
s1=df1.to_html(index=False),
sam1=lo1.to_html(index=False),
xx1=AV,xx2=AE,xx3=AD,
yy1=BV,yy2=BE,yy3=BD,
zz1=CV,zz2=CE,zz3=CD,
bb1=the2 ,bb2=ttte2,
zone1=yy51,zone2=xx51,
sammy=dff.to_html(index=False))
@app.route('/vendormanagement')
def vendormanagement():
return render_template('vendormanagement.html')
@app.route('/vendormanagementimport',methods=['POST','GET'])
def vendormanagementimport():
global vendordata
global vendordataview
db = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
vendordata = pd.read_sql("SELECT * from vendor_management", con=db)
db.close()
vendordata['POdate']=pd.to_datetime(vendordata['POdate'])
vendordata['POdate_year']=vendordata['POdate'].dt.year
vendordataview=vendordata.head(50)
return render_template('vendormanagementview.html',vendordataview=vendordataview.to_html(index=False))
@app.route('/vendormanagementview',methods=['POST','GET'])
def vendormanagementview():
return render_template('vendormanagementview.html',vendordataview=vendordataview.to_html(index=False))
@app.route('/vndrmngmnt1',methods=['POST','GET'])
def vndrmngmnt1():
VENDORID=sorted(vendordata['Vendorid'].unique())
if request.method=='POST':
vendorin=request.form['name1']
def Vendor(VendorId):
datasetcomb34=vendordata[['Vendorid','Vendor_name','Vendor_address','Vendormin_order']][vendordata['Vendorid']== VendorId]
return datasetcomb34.iloc[0,:]
snglvw=Vendor(vendorin)
singleview=pd.DataFrame(snglvw).T
return render_template('vendormanagement1.html',say=1,vendorin=vendorin,VENDORID=VENDORID,singleview=singleview.to_html(index=False))
return render_template('vendormanagement1.html',VENDORID=VENDORID)
@app.route('/vndrmngmnt2',methods=['POST','GET'])
def vndrmngmnt2():
pouyear=sorted(vendordata['POdate_year'].unique())
if request.method == 'POST':
SelectedYear = int(request.form['name1'])
SelectedTop = int(request.form['name2'])
def top10vendorspend(year,top_value):
x=[]
y=[]
gg1=vendordata[(vendordata['POdate_year']==year)].groupby(['POdate_year','Vendorid'])['PO_Value'].sum()
x=gg1.nlargest(top_value).index.get_level_values(1)
y=gg1.nlargest(top_value).values
df=pd.DataFrame({'VendorID':x,'Total':y})
return df
vndrvspnd=top10vendorspend(SelectedYear,SelectedTop)
def top10vendoravgspend(top):
gg3=vendordata.groupby(['POdate_year','Vendorid'])['PO_Value'].mean()
xxx=gg3.nlargest(top).index.get_level_values(1)
yyy=round(gg3.nlargest(top),2).values
df=pd.DataFrame({'VendorID':xxx,'Mean':yyy})
return df
vndrvavgspnd=top10vendoravgspend(SelectedTop)
return render_template('vendormanagement2.html',say=1,SelectedYear=SelectedYear,pouyear=pouyear,vndrval=vndrvspnd.values,vndrvavg=vndrvavgspnd.values)
return render_template('vendormanagement2.html',pouyear=pouyear)
@app.route('/vndrmngmnt3',methods=['POST','GET'])
def vndrmngmnt3():
pouyear=sorted(vendordata['POdate_year'].unique())
if request.method == 'POST':
SelectedYear = int(request.form['name1'])
SelectedTop = int(request.form['name2'])
def top10POvendorvalue(year,top_value):
x=[]
y=[]
gg1=vendordata[(vendordata['POdate_year']==year)].groupby(['POdate_year','Vendorid'])['Inventoryreplenished'].sum()
x=gg1.nlargest(top_value).index.get_level_values(1)
y=gg1.nlargest(top_value).values
df=pd.DataFrame({'VendorId':x,'Total':y})
return df
vndrval=top10POvendorvalue(SelectedYear,SelectedTop)
def top10POvendoravg(top):
gg3=vendordata.groupby(['POdate_year','Vendorid'])['Inventoryreplenished'].mean()
xxx=gg3.nlargest(top).index.get_level_values(1)
yyy=round(gg3.nlargest(top),2).values
df=pd.DataFrame({'VendorID':xxx,'Mean':yyy})
return df
vndrvavg=top10POvendoravg(SelectedTop)
return render_template('vendormanagement3.html',say=1,SelectedYear=SelectedYear,pouyear=pouyear,vndrval=vndrval.values,vndrvavg=vndrvavg.values)
return render_template('vendormanagement3.html',pouyear=pouyear)
@app.route('/vndrmngmnt4',methods=['POST','GET'])
def vndrmngmnt4():
pouyear=sorted(vendordata['POdate_year'].unique())
if request.method == 'POST':
SelectedYear = int(request.form['name1'])
SelectedTop = int(request.form['name2'])
def top10vendorPOcnt(year,top):
x=[]
y=[]
gg1=vendordata[(vendordata['POdate_year']==year)].groupby(['POdate_year','Vendorid'])['POdate_year'].count()
x=gg1.nlargest(top).index.get_level_values(1)
y=gg1.nlargest(top).values
df=pd.DataFrame({'MatID':x,'Total_count':y})
return df
vndrvavgpoacnt=top10vendorPOcnt(SelectedYear,SelectedTop)
def top10vendorPOavg(top):
g=vendordata.groupby('Vendorid')['POdate_year'].size()
xx=g.nlargest(top).index.get_level_values(0)
yy=g.nlargest(top).values
dfexp7=pd.DataFrame({'VendorID':xx,'Average_count':yy})
return dfexp7
vndrvavgpoavg=top10vendorPOavg(SelectedTop)
return render_template('vendormanagement4.html',say=1,SelectedYear=SelectedYear,pouyear=pouyear,vndrval=vndrvavgpoacnt.values,vndrvavg=vndrvavgpoavg.values)
return render_template('vendormanagement4.html',pouyear=pouyear)
@app.route('/vendorperformanceanalysis')
def vendorperformanceanalysis():
return render_template('vendorperformanceanalysis.html',say=0)
@app.route('/vendorperformanceanalysisdata',methods=['POST','GET'])
def vendorperformanceanalysisdata():
if request.method=='POST':
global wdata
global wtdata
file1 = request.files['file1'].read()
file2 = request.files['file2'].read()
if len(file1)==0 or len(file2)==0:
return render_template('vendorperformanceanalysis.html',say=0,warning='Data Invalid')
data1=pd.read_csv(io.StringIO(file1.decode('utf-8')))
wdata=pd.DataFrame(data1)
data2=pd.read_csv(io.StringIO(file2.decode('utf-8')))
wtdata=pd.DataFrame(data2)
return render_template('vendorperformanceanalysis.html',say=1,data1=data1.to_html(index=False),data2=data2.to_html(index=False))
@app.route('/vendorperformanceanalys',methods=['POST','GET'])
def vendorperformanceanalys():
wt=[]
for ds in wtdata['Weight']:
wt.append(round((float(ds)),2))
treatment=[]
for ds in wtdata['Positive Attribute']:
if ds=='Yes':
treatment.append('+')
else:
treatment.append('-')
def normalize(df,alpha,treatment):
y=df.iloc[:,1:len(list(df))]
for i, j in zip(list(y),treatment):
if j== '-':
y[i]=y[i].min()/y[i]
elif j== '+':
y[i]=y[i]/y[i].max()
for i, t in zip(list(y),wt):
y[i]=y[i]*t
df['Score'] = y.sum(axis=1)
df=df.sort_values('Score', ascending=False)
df['Rank']=df['Score'].rank(ascending=False)
df['Rank']=df['Rank'].astype(int)
return df[['Rank','Vendor']]
dff=normalize(wdata,wt,treatment)
return render_template('vendorperformanceanalysisview.html',say=1,data=dff.to_html(index=False))
@app.route('/purchaseorderallocation')
def purchaseorderallocation():
return render_template('purchaseorderallocation.html')
@app.route('/purchaseorderallocationimport',methods=['POST','GET'])
def purchaseorderallocationimport():
global ddemand1
global dsupply1
global maxy1
global miny1
global Vcost1
global Vrisk1
db = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
ddemand1 = pd.read_sql("SELECT * from opt_demand", con=db)
dsupply1 = pd.read_sql("SELECT * from opt_supply", con=db)
maxy1 = pd.read_sql("SELECT * from opt_maxcapacity", con=db)
miny1 = pd.read_sql("SELECT * from opt_mincapacity", con=db)
Vcost1 = pd.read_sql("SELECT * from opt_vcost", con=db)
Vrisk1 = pd.read_sql("SELECT * from opt_vrisk", con=db)
db.close()
return render_template('purchaseorderallocationimport.html',ddemand=ddemand1.to_html(index=False),dsupply=dsupply1.to_html(index=False),
maxy=maxy1.to_html(index=False),miny=miny1.to_html(index=False),Vcost=Vcost1.to_html(index=False),Vrisk=Vrisk1.to_html(index=False))
@app.route('/purchaseorderallocationanalyse',methods=['POST','GET'])
def purchaseorderallocationanalyse():
ddemand=ddemand1.set_index("Product")
dsupply=dsupply1.set_index("Vendor")
maxy=maxy1.set_index("Vendors\Product List")
miny=miny1.set_index("Vendors\Product List")
Vcost =Vcost1.set_index("Vendors\Product List")
Vrisk = Vrisk1.set_index("Vendors\Product List")
demand=dict(zip(list(ddemand.index),ddemand.iloc[:,0].values))
supply=dict(zip(list(dsupply.index),dsupply.iloc[:,0].values))
max1=maxy.to_dict()
min1=miny.to_dict()
Vendors=list(dsupply.index)
Products=list(ddemand.index)
VcostNorm = Vcost.copy()
VriskNorm = Vrisk.copy()
if request.method=='POST':
CostWeight=float(request.form['CostWeight'])
RiskWeight=float(request.form['RiskWeight'])
Total=[]
for i in list(list(VcostNorm)):
Tot = VcostNorm[i].sum()
Total.append(Tot)
for i, j in zip(list(VcostNorm),Total):
VcostNorm[i]=VcostNorm[i]/j
Total=[]
for i in list(list(VriskNorm)):
Tot = VriskNorm[i].sum()
Total.append(Tot)
for i, j in zip(list(VriskNorm),Total):
VriskNorm[i]=VriskNorm[i]/j
risk=VriskNorm.to_dict()
cost=VcostNorm.to_dict()
Total_cost=defaultdict(dict)
Total_Risk=defaultdict(dict)
Total_Cost=pd.DataFrame(CostWeight*pd.DataFrame(cost))
Total_Risk=pd.DataFrame(RiskWeight*pd.DataFrame(risk))
Decision_var=(Total_Cost+Total_Risk).to_dict()
prob = pulp.LpProblem("Optimization", pulp.LpMinimize)
routes = [(w,b) for w in Products for b in Vendors]
x = LpVariable.dicts("route", (Products, Vendors), cat = 'LpInteger')
prob += lpSum([x[w][b] * Decision_var[w][b] for (w,b) in routes]),"Objective function"
for w in Products:
prob += lpSum([x[w][b] for b in Vendors]) == demand[w]
for b in Vendors:
prob += lpSum([x[w][b] for w in Products]) <= supply[b]
for w in Products:
for b in Vendors:
prob += x[w][b] <= max1[w][b]
for w in Products:
for b in Vendors:
prob += x[w][b] >= min1[w][b]
prob.writeLP("SO.lp")
prob.solve()
opt_status=pulp.LpStatus[prob.status]
if opt_status=='Optimal':
#print (pulp.value(prob.objective))
re=[]
res=[]
ress=[]
i=0
for variable in prob.variables():
re.append(variable.varValue)
res.append(variable.varValue)
i=i+1
if (i==len(Total_Cost)):
i=0
ress.append(re)
re=[]
Optimal_quantity1=pd.DataFrame(ress,columns=Vendors,index=Products).astype(int)
opq13=[]
for column in Optimal_quantity1.columns:
opq11=[]
opq12=[]
opq11.append(column)
for val in Optimal_quantity1[column]:
opq12.append(val)
opq11.append(opq12)
opq13.append(opq11)
Optimal_quantity2=Optimal_quantity1.T
opq23=[]
for column in Optimal_quantity2.columns:
opq21=[]
opq22=[]
opq21.append(column)
for val in Optimal_quantity2[column]:
opq22.append(val)
opq21.append(opq22)
opq23.append(opq21)
VCran=[]
for column in Vcost.columns:
for val in Vcost[column].values:
VCran.append(val)
VRran=[]
for column in Vrisk.columns:
for val in Vrisk[column].values:
VRran.append(val)
Costproduct=[i*j for (i,j) in zip(res,VCran)]
sumCostproduct=sum(Costproduct)
Riskproduct=[i*j for (i,j) in zip(res,VRran)]
optrisk=sum(Riskproduct)/sum(res)
return render_template('purchaseorderallocationoutput.html',username=username,say=1,optrisk=optrisk,sumCostproduct=sumCostproduct,Optimal_quantity1=opq13,
Optimal_quantity2=opq23,grpi1=Optimal_quantity1.index,grpi2=Optimal_quantity2.index,warning2="The obtained solution was "+opt_status)
return render_template('purchaseorderallocationoutput.html',warning1="The obtained solution was "+opt_status)
return render_template('purchaseorderallocationoutput.html')
@app.route('/purchaseordermanagement')
def purchaseordermanagement():
return render_template('purchaseordermanagement.html')
@app.route('/poimport',methods=['POST','GET'])
def poimport():
global podata
global podatahead
db = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
podata = pd.read_sql("SELECT * from po_management", con=db)
db.close()
podata['POdate']=pd.to_datetime(podata['POdate'])
podata['PO_year']=podata['POdate'].dt.year
podata['Orderreceiveddate']=pd.to_datetime(podata['Orderreceiveddate'])
podata['Orderreceivedyear']=podata['Orderreceiveddate'].dt.year
podatahead=podata.head(50)
return render_template('purchaseordermanagementview.html',podatahead=podatahead.to_html(index=False))
@app.route('/purchaseordermanagementview')
def purchaseordermanagementview():
return render_template('purchaseordermanagementview.html',podatahead=podatahead.to_html(index=False))
@app.route('/pomtype1',methods=['POST','GET'])
def pomtype1():
PONO=sorted(podata['POno'].unique())
if request.method=='POST':
SelectedPOno=int(request.form['name1'])
def POSingle(POno):
podat=podata[['POno','POammendmentdate','POdate','POverificationdate','PO_Value']][podata['POno']== POno]
return podat.iloc[0,:]
snglvw=POSingle(SelectedPOno)
svpodata=pd.DataFrame(snglvw).T
return render_template('purchaseordermanagement1.html',say=1,sayy=1,PONO=PONO,svpodata=svpodata.to_html(index=False),SelectedPOno=SelectedPOno)
return render_template('purchaseordermanagement1.html',say=1,PONO=PONO)
@app.route('/pomtype2',methods=['POST','GET'])
def pomtype2():
uyear=sorted(podata['PO_year'].unique())
if request.method=='POST':
SelectedYear=int(request.form['name1'])
podata.loc[(podata.PO_Value >= 0) & (podata.PO_Value < 10000), 'PO_Group'] = '0-10K'
podata.loc[(podata.PO_Value >= 10000) & (podata.PO_Value < 50000), 'PO_Group'] = '10K-50K'
podata.loc[(podata.PO_Value >= 50000) & (podata.PO_Value < 100000), 'PO_Group'] = '50K-100K'
podata.loc[(podata.PO_Value >= 100000) & (podata.PO_Value < 500000), 'PO_Group'] = '100K-500K'
podata.loc[(podata.PO_Value >= 500000) & (podata.PO_Value < 1000000), 'PO_Group'] = '500K-1M'
podata.loc[podata.PO_Value >= 1000000, 'PO_Group'] = '>1M'
podata.loc[podata.PO_Group == '0-10K', 'PO_GroupNo'] = 1
podata.loc[podata.PO_Group == '10K-50K', 'PO_GroupNo'] = 2
podata.loc[podata.PO_Group == '50K-100K', 'PO_GroupNo'] = 3
podata.loc[podata.PO_Group == '100K-500K', 'PO_GroupNo'] = 4
podata.loc[podata.PO_Group == '500K-1M', 'PO_GroupNo'] = 5
podata.loc[podata.PO_Group == '>1M', 'PO_GroupNo'] = 6
def top10POyrcount(year):
x=[]
y=[]
gg1=podata[(podata['PO_year']==year)].groupby(['PO_year','PO_GroupNo','PO_Group'])['PO_year'].size()
x=gg1.index.get_level_values(2)
z=gg1.index.get_level_values(1)
y=gg1.values
df=pd.DataFrame({'z':z, 'PO Value':x,'Total Count':y})
df=df.sort_values('z')
df=df.drop('z',axis=1)
return df
df=top10POyrcount(SelectedYear)
return render_template('purchaseordermanagement2.html',say=1,sayy=1,uyear=uyear,data=df.values,SelectedYear=SelectedYear)
return render_template('purchaseordermanagement2.html',say=1,uyear=uyear)
@app.route('/pomtype3',methods=['POST','GET'])
def pomtype3():
uyear=sorted(podata['PO_year'].unique())
if request.method=='POST':
SelectedYear=int(request.form['name1'])
podata.loc[(podata.Inventoryreplenished >= 0) & (podata.Inventoryreplenished < 100), 'Inventory_Group'] = '0-100'
podata.loc[(podata.Inventoryreplenished >= 100) & (podata.Inventoryreplenished < 200), 'Inventory_Group'] = '100-200'
podata.loc[(podata.Inventoryreplenished >= 200) & (podata.Inventoryreplenished < 300), 'Inventory_Group'] = '200-300'
podata.loc[(podata.Inventoryreplenished >= 300) & (podata.Inventoryreplenished < 400), 'Inventory_Group'] = '300-400'
podata.loc[(podata.Inventoryreplenished >= 400) & (podata.Inventoryreplenished < 500), 'Inventory_Group'] = '400-500'
podata.loc[podata.Inventoryreplenished >= 500,'Inventory_Group'] = '>500'
def top10poinvyrcount(year):
x=[]
y=[]
gg1=podata[(podata['PO_year']==year)].groupby(['PO_year','Inventory_Group'])['Inventory_Group'].size()
x=gg1.index.get_level_values(1)
y=gg1.values
df=pd.DataFrame({'Inventory Value':x,'Total Count':y})
df=df.sort_values('Inventory Value')
return df
df=top10poinvyrcount(SelectedYear)
return render_template('purchaseordermanagement3.html',say=1,sayy=1,uyear=uyear,data=df.values,SelectedYear=SelectedYear)
return render_template('purchaseordermanagement3.html',say=1,uyear=uyear)
@app.route('/pomtype5',methods=['POST','GET'])
def pomtype5():
uyear=sorted(podata['PO_year'].unique())
if request.method=='POST':
SelectedYear=int(request.form['name1'])
podata['date_diff']=podata['Orderreceiveddate']-podata['POdate']
podata.loc[(podata.date_diff >= '15 days') & (podata.date_diff < '18 days'), 'date_diff_Group'] = '15-18'
podata.loc[(podata.date_diff >= '18 days') & (podata.date_diff < '21 days'), 'date_diff_Group'] = '18-20'
podata.loc[(podata.date_diff >= '21 days') & (podata.date_diff <= '23 days'), 'date_diff_Group'] = '20-23'
def topleadyear(year):
x=[]
y=[]
gg1=podata[(podata['PO_year']==year)].groupby(['PO_year','date_diff_Group'])['date_diff_Group'].size()
x=gg1.index.get_level_values(1)
y=gg1.values
df=pd.DataFrame({'Lead_Time':x,'Total Count':y})
return df
df=topleadyear(SelectedYear)
return render_template('purchaseordermanagement5.html',say=1,sayy=1,uyear=uyear,data=df.values,SelectedYear=SelectedYear)
return render_template('purchaseordermanagement5.html',say=1,uyear=uyear)
@app.route('/pomtype4',methods=['POST','GET'])
def pomtype4():
pocdata=podata.groupby('PO_year')['PO_year'].size()
year=pocdata.index.get_level_values(0)
count=pocdata.values.astype(int)
df=pd.DataFrame({'Year':year,'PO_Count':count})
return render_template('purchaseordermanagement4.html',data=df.values)
#Aggregate Planning
@app.route("/aggregate",methods = ['GET','POST'])
def aggregate():
if request.method== 'POST':
from_date=request.form['from']
to_date=request.form['to']
factory=request.form['typedf']
connection = pymysql.connect(host='localhost',
user='user',
password='',
db='test',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
x=connection.cursor()
x.execute("select * from `agggendata`")
connection.commit()
data=pd.DataFrame(x.fetchall())
fromdifftodata= data[(data['Month'] > from_date) & (data['Month'] < to_date )]
datas=fromdifftodata[fromdifftodata['Factory']==factory]
global forecastedplaniingdata
forecastedplaniingdata=pd.concat([datas['Month'],datas['Demand_Forecast']],axis=1)
dataforecast= | pd.concat([datas['Month'],datas['Factory'],datas['Demand_Forecast']],axis=1) | pandas.concat |
import copy
import re
from textwrap import dedent
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
MultiIndex,
)
import pandas._testing as tm
jinja2 = pytest.importorskip("jinja2")
from pandas.io.formats.style import ( # isort:skip
Styler,
)
from pandas.io.formats.style_render import (
_get_level_lengths,
_get_trimming_maximums,
maybe_convert_css_to_tuples,
non_reducing_slice,
)
@pytest.fixture
def mi_df():
return DataFrame(
[[1, 2], [3, 4]],
index=MultiIndex.from_product([["i0"], ["i1_a", "i1_b"]]),
columns=MultiIndex.from_product([["c0"], ["c1_a", "c1_b"]]),
dtype=int,
)
@pytest.fixture
def mi_styler(mi_df):
return | Styler(mi_df, uuid_len=0) | pandas.io.formats.style.Styler |
import pathlib
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
from scipy.optimize import linear_sum_assignment
from dicodile.config import DATA_HOME
from dicodile.utils.viz import display_dictionaries
OUTPUT_DIR = pathlib.Path('benchmarks_results')
DATA_DIR = DATA_HOME / 'images' / 'text'
# Matplotlib config
mpl.rcParams['xtick.labelsize'] = 12
mpl.rcParams['ytick.labelsize'] = 12
# Figure config
IM_NOISE_LEVEL = 3
def style_edge_axes(ax, style):
"""Color the border of an matplotlib Axes."""
for spine in ax.spines.values():
spine.set(**style)
def plot_dictionary(result_file='dicodile_text.py_PAMI_20-06-29_15h35.pkl',
res=None, s=1):
if res is None:
df = pd.read_pickle(OUTPUT_DIR / result_file)
tl_max = df['text_length'].max() # noqa: F841
res = df.query(
'noise_level== @IM_NOISE_LEVEL & text_length == @tl_max'
)
res = res.loc[res['score_cdl_2'].idxmax()]
# compute ordering for the dictionary
_, j = linear_sum_assignment(res['corr_cdl'], maximize=True)
_, j_init = linear_sum_assignment(res['corr_init'], maximize=True)
_, j_dl = linear_sum_assignment(res['corr_dl'], maximize=True)
# Define display elements
display_elements = {
'Pattern': {
'D': res['D'],
'style': dict(color='C3', linestyle='dotted', lw=3)
},
'Random Patches': {
'D': res['D_init'][j_init],
'style': dict(color='C2', linestyle='dotted', lw=3)
},
'DiCoDiLe': {
'D': res['D_cdl'][j],
'style': dict(color='C1', lw=3)
},
'Online DL': {
'D': res['D_dl'][j_dl],
'style': dict(color='C0', lw=3)
}
}
labels = list(display_elements.keys())
list_D = [e['D'] for e in display_elements.values()]
styles = [e['style'] for e in display_elements.values()]
# compute layout
n_dict = len(list_D)
D_0 = res['D']
n_atoms = D_0.shape[0]
n_cols = max(4, int(np.sqrt(n_atoms)))
n_rows = int(np.ceil(n_atoms / n_cols))
nr = n_rows * n_dict
fig = plt.figure(figsize=(6.4, 6.8))
gs = mpl.gridspec.GridSpec(
nrows=nr + 2, ncols=n_cols,
height_ratios=[.3, .1] + [.6 / nr] * nr
)
# display all the atoms
axes = np.array([[fig.add_subplot(gs[i + 2, j])
for j in range(n_cols)] for i in range(nr)])
display_dictionaries(*list_D, styles=styles, axes=axes)
# Add a legend
handles = [mpl.lines.Line2D([0], [0], **s) for s in styles]
ax_legend = fig.add_subplot(gs[1, :])
ax_legend.set_axis_off()
ax_legend.legend(handles, labels, loc='center', ncol=2,
bbox_to_anchor=(0, 0.5, 1, .05), fontsize=14)
# Display the original images
data = np.load(DATA_DIR / res['filename'])
im = data.get('X')[190:490, 250:750]
ax = fig.add_subplot(gs[0, :n_cols // 2])
ax.imshow(im, cmap='gray')
ax.set_axis_off()
ax = fig.add_subplot(gs[0, n_cols // 2:])
noise = IM_NOISE_LEVEL * im.std() * np.random.randn(*im.shape)
ax.imshow(im + noise, cmap='gray')
ax.set_axis_off()
# Adjust plot and save figure
plt.subplots_adjust(wspace=.1, top=.99, bottom=0.01)
fig.savefig(OUTPUT_DIR / 'dicodile_text_dict.pdf', dpi=300)
def plot_performances(result_file='dicodile_text.py_20-06-26_13h49.pkl',
noise_levels=[.1, IM_NOISE_LEVEL]):
df = | pd.read_pickle(OUTPUT_DIR / result_file) | pandas.read_pickle |
from os.path import join, exists, dirname, basename
from os import makedirs
import sys
import pandas as pd
from glob import glob
import seaborn as sns
import numpy as np
from scipy import stats
import xlsxwriter
import matplotlib.pyplot as plt
from scripts.parse_samplesheet import get_min_coverage, get_role, add_aliassamples, get_species
from scripts.snupy import check_snupy_status
import json
import datetime
import getpass
import socket
import requests
from requests.auth import HTTPBasicAuth
import urllib3
import yaml
import pickle
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
plt.switch_backend('Agg')
RESULT_NOT_PRESENT = -5
def report_undertermined_filesizes(fp_filesizes, fp_output, fp_error,
zscorethreshold=1):
# read all data
fps_sizes = glob(join(dirname(fp_filesizes), '*.txt'))
pds_sizes = []
for fp_size in fps_sizes:
data = pd.read_csv(
fp_size, sep="\t", names=["filesize", "filename", "status"],
index_col=1)
# mark given read as isme=True while all other data in the dir
# are isme=False
data['isme'] = fp_filesizes in fp_size
data['filesize'] /= 1024**3
pds_sizes.append(data)
pd_sizes = pd.concat(pds_sizes)
# compute z-score against non-bad known runs
pd_sizes['z-score'] = np.nan
idx_nonbad = pd_sizes[pd_sizes['status'] != 'bad'].index
pd_sizes.loc[idx_nonbad, 'z-score'] = stats.zscore(
pd_sizes.loc[idx_nonbad, 'filesize'])
# plot figure
fig = plt.figure()
ax = sns.distplot(
pd_sizes[(pd_sizes['isme'] == np.False_) &
(pd_sizes['status'] != 'bad')]['filesize'],
kde=False, rug=False, color="black", label='known runs')
ax = sns.distplot(
pd_sizes[(pd_sizes['isme'] == np.False_) &
(pd_sizes['status'] == 'bad')]['filesize'],
kde=False, rug=False, color="red", label='bad runs')
ax = sns.distplot(
pd_sizes[pd_sizes['isme'] == np.True_]['filesize'],
kde=False, rug=True, color="green", label='this run')
_ = ax.set_ylabel('number of files')
_ = ax.set_xlabel('file-size in GB')
ax.set_title('run %s' % basename(fp_filesizes)[:-4])
ax.legend()
# raise error if current run contains surprisingly large undetermined
# filesize
if pd_sizes[(pd_sizes['isme'] == np.True_) &
(pd_sizes['status'] == 'unknown')]['z-score'].max() > zscorethreshold:
ax.set_title('ERROR: %s' % ax.get_title())
fig.savefig(fp_error, bbox_inches='tight')
raise ValueError(
("Compared to known historic runs, your run contains surprisingly "
"(z-score > %f) large file(s) of undetermined reads. You will find"
" an supporting image at '%s'. Please do the following things:\n"
"1. discuss with lab personal about the quality of the run.\n"
"2. should you decide to keep going with this run, mark file "
"status (3rd column) in file '%s' as 'good'.\n"
"3. for future automatic considerations, mark file status (3rd "
"column) as 'bad' if you have decided to abort processing due to"
" too low quality (z-score kind of averages about known values)."
) % (zscorethreshold, fp_error, fp_filesizes))
else:
fig.savefig(fp_output, bbox_inches='tight')
def report_exome_coverage(
fps_sample, fp_plot,
min_coverage=30, min_targets=80, coverage_cutoff=200):
"""Creates an exome coverage plot for multiple samples.
Parameters
----------
fps_sample : [str]
A list of file-paths with coverage data in csv format.
fp_plot : str
Filepath of output graph.
min_coverage : int
Default: 30.
An arbitraty threshold of minimal coverage that we expect.
A vertical dashed line is drawn at this value.
min_targets : float
Default: 80.
An arbitraty threshold of minimal targets that we expect to be covered.
A horizontal dashed line is drawn at this value.
coverage_cutoff : float
Default: 200.
Rightmost coverage cut-off value where X-axis is limited.
Raises
------
ValueError : If one of the sample's coverage falls below expected
thresholds.
"""
# Usually we aim for a 30X coverage on 80% of the sites.
fig, ax = plt.subplots()
ax.axhline(y=min_targets, xmin=0, xmax=coverage_cutoff, color='gray',
linestyle='--')
ax.axvline(x=min_coverage, ymin=0, ymax=100, color='gray', linestyle='--')
samples_below_coverage_threshold = []
for fp_sample in fps_sample:
coverage = pd.read_csv(fp_sample, sep="\t")
samplename = fp_sample.split('/')[-1].split('.')[0]
linewidth = 1
if coverage[coverage['#coverage'] == min_coverage]['percent_cumulative'].min() < min_targets:
linewidth = 4
samples_below_coverage_threshold.append(samplename)
ax.plot(coverage['#coverage'],
coverage['percent_cumulative'],
label=samplename,
linewidth=linewidth)
ax.set_xlim((0, coverage_cutoff))
ax.set_xlabel('Read Coverage')
ax.set_ylabel('Targeted Exome Bases')
ax.legend()
if len(samples_below_coverage_threshold) > 0:
fp_plot = fp_plot.replace('.pdf', '.error.pdf')
fig.savefig(fp_plot, bbox_inches='tight')
if len(samples_below_coverage_threshold) > 0:
raise ValueError(
"The following %i sample(s) have coverage below expected "
"thresholds. Please discuss with project PIs on how to proceed. "
"Maybe, samples need to be re-sequenced.\n\t%s\nYou will find more"
" information in the generated coverage plot '%s'." % (
len(samples_below_coverage_threshold),
'\n\t'.join(samples_below_coverage_threshold),
fp_plot))
ACTION_PROGRAMS = [
{'action': 'background',
'program': 'GATK',
'fileending_snupy_extract': '.snp_indel.gatk',
'fileending_spike_calls': '.gatk.snp_indel.vcf',
'stepname_spike_calls': 'gatk_CombineVariants',
},
{'action': 'background',
'program': 'Platypus',
'fileending_snupy_extract': '.indel.ptp',
'fileending_spike_calls': '.ptp.annotated.filtered.indels.vcf',
'stepname_spike_calls': 'platypus_filtered',
},
{'action': 'tumornormal',
'program': 'Varscan',
'fileending_snupy_extract': '.somatic.varscan',
'fileending_spike_calls':
{'homo sapiens': '.snp.somatic_germline.vcf',
'mus musculus': '.indel_snp.vcf'},
'stepname_spike_calls': 'merge_somatic',
},
{'action': 'tumornormal',
'program': 'Mutect',
'fileending_snupy_extract': '.somatic.mutect',
'fileending_spike_calls': '.all_calls.vcf',
'stepname_spike_calls': 'mutect',
},
{'action': 'tumornormal',
'program': 'Excavator2',
'fileending_snupy_extract': '.somatic.cnv.excavator2',
'fileending_spike_calls': '.vcf',
'stepname_spike_calls': 'excavator_somatic',
},
{'action': 'trio',
'program': 'Varscan\ndenovo',
'fileending_snupy_extract': '.denovo.varscan',
'fileending_spike_calls': '.var2denovo.vcf',
'stepname_spike_calls': 'writing_headers',
},
{'action': 'trio',
'program': 'Excavator2',
'fileending_snupy_extract': '.trio.cnv.excavator2',
'fileending_spike_calls': '.vcf',
'stepname_spike_calls': 'excavator_trio',
},
]
def _get_statusdata_demultiplex(samplesheets, prefix, config):
demux_yields = []
for flowcell in samplesheets['run'].unique():
fp_yielddata = '%s%s%s/Data/%s.yield_data.csv' % (prefix, config['dirs']['intermediate'], config['stepnames']['yield_report'], flowcell)
if exists(fp_yielddata):
demux_yields.append(
| pd.read_csv(fp_yielddata, sep="\t") | pandas.read_csv |
"""Unit tests for functions in src/util.py"""
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal, assert_series_equal
from src.util import (
StanInput,
make_columns_lower_case,
one_encode,
stanify_dict,
)
@pytest.mark.parametrize(
"s_in,expected",
[
(
pd.Series(["8", 1, "????"], index=["a", "b", "c"]),
pd.Series([1, 2, 3], index=["a", "b", "c"]),
),
(
pd.Series([1, "????", "????"], index=["a", "b", "c"]),
pd.Series([1, 2, 2], index=["a", "b", "c"]),
),
],
)
def test_one_encode(s_in: pd.Series, expected: pd.Series):
assert_series_equal(one_encode(s_in), expected)
@pytest.mark.parametrize(
"df_in,expected",
[
(
pd.DataFrame({"A": [1, 2, 3], "B": ["a", "b", "c"]}),
pd.DataFrame({"a": [1, 2, 3], "b": ["a", "b", "c"]}),
),
(
pd.DataFrame(
[[1, 1]],
columns=pd.MultiIndex.from_product([["A"], ["B", "C"]]),
),
pd.DataFrame(
[[1, 1]],
columns=pd.MultiIndex.from_product([["a"], ["b", "c"]]),
),
),
],
)
def test_make_columns_lower_case(df_in: pd.DataFrame, expected: pd.DataFrame):
assert_frame_equal(make_columns_lower_case(df_in), expected)
@pytest.mark.parametrize(
"d_in,expected",
[
({"a": | pd.Series([1, 2, 3]) | pandas.Series |
import pandas as pd
import numpy as np
df_1 = pd.read_csv('./data_01.txt',sep="|", header=0, encoding='latin-1')
df_2 = pd.read_csv('./data_02.txt',sep="|", header=0, encoding='latin-1')
df_3 = pd.read_csv('./data_03.txt',sep="|", header=0, encoding='latin-1')
df_4 = | pd.read_csv('./data_04.txt',sep="|", header=0, encoding='latin-1') | pandas.read_csv |
# pylint: disable=E1101
from datetime import datetime, timedelta
from pandas.compat import range, lrange, zip, product
import numpy as np
from pandas import Series, TimeSeries, DataFrame, Panel, isnull, notnull, Timestamp
from pandas.tseries.index import date_range
from pandas.tseries.offsets import Minute, BDay
from pandas.tseries.period import period_range, PeriodIndex, Period
from pandas.tseries.resample import DatetimeIndex, TimeGrouper
import pandas.tseries.offsets as offsets
import pandas as pd
import unittest
import nose
from pandas.util.testing import (assert_series_equal, assert_almost_equal,
assert_frame_equal)
import pandas.util.testing as tm
bday = BDay()
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest
class TestResample(unittest.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='Min')
self.series = Series(np.random.rand(len(dti)), dti)
def test_custom_grouper(self):
dti = DatetimeIndex(freq='Min', start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10))
s = Series(np.array([1] * len(dti)), index=dti, dtype='int64')
b = TimeGrouper(Minute(5))
g = s.groupby(b)
# check all cython functions work
funcs = ['add', 'mean', 'prod', 'ohlc', 'min', 'max', 'var']
for f in funcs:
g._cython_agg_general(f)
b = TimeGrouper(Minute(5), closed='right', label='right')
g = s.groupby(b)
# check all cython functions work
funcs = ['add', 'mean', 'prod', 'ohlc', 'min', 'max', 'var']
for f in funcs:
g._cython_agg_general(f)
self.assertEquals(g.ngroups, 2593)
self.assert_(notnull(g.mean()).all())
# construct expected val
arr = [1] + [5] * 2592
idx = dti[0:-1:5]
idx = idx.append(dti[-1:])
expect = Series(arr, index=idx)
# GH2763 - return in put dtype if we can
result = g.agg(np.sum)
assert_series_equal(result, expect)
df = DataFrame(np.random.rand(len(dti), 10), index=dti, dtype='float64')
r = df.groupby(b).agg(np.sum)
self.assertEquals(len(r.columns), 10)
self.assertEquals(len(r.index), 2593)
def test_resample_basic(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 00:13:00', freq='min',
name='index')
s = Series(np.random.randn(14), index=rng)
result = s.resample('5min', how='mean', closed='right', label='right')
expected = Series([s[0], s[1:6].mean(), s[6:11].mean(), s[11:].mean()],
index=date_range('1/1/2000', periods=4, freq='5min'))
assert_series_equal(result, expected)
self.assert_(result.index.name == 'index')
result = s.resample('5min', how='mean', closed='left', label='right')
expected = Series([s[:5].mean(), s[5:10].mean(), s[10:].mean()],
index=date_range('1/1/2000 00:05', periods=3,
freq='5min'))
assert_series_equal(result, expected)
s = self.series
result = s.resample('5Min', how='last')
grouper = TimeGrouper(Minute(5), closed='left', label='left')
expect = s.groupby(grouper).agg(lambda x: x[-1])
assert_series_equal(result, expect)
def test_resample_basic_from_daily(self):
# from daily
dti = DatetimeIndex(
start=datetime(2005, 1, 1), end=datetime(2005, 1, 10),
freq='D', name='index')
s = Series(np.random.rand(len(dti)), dti)
# to weekly
result = s.resample('w-sun', how='last')
self.assertEquals(len(result), 3)
self.assert_((result.index.dayofweek == [6, 6, 6]).all())
self.assertEquals(result.irow(0), s['1/2/2005'])
self.assertEquals(result.irow(1), s['1/9/2005'])
self.assertEquals(result.irow(2), s.irow(-1))
result = s.resample('W-MON', how='last')
self.assertEquals(len(result), 2)
self.assert_((result.index.dayofweek == [0, 0]).all())
self.assertEquals(result.irow(0), s['1/3/2005'])
self.assertEquals(result.irow(1), s['1/10/2005'])
result = s.resample('W-TUE', how='last')
self.assertEquals(len(result), 2)
self.assert_((result.index.dayofweek == [1, 1]).all())
self.assertEquals(result.irow(0), s['1/4/2005'])
self.assertEquals(result.irow(1), s['1/10/2005'])
result = s.resample('W-WED', how='last')
self.assertEquals(len(result), 2)
self.assert_((result.index.dayofweek == [2, 2]).all())
self.assertEquals(result.irow(0), s['1/5/2005'])
self.assertEquals(result.irow(1), s['1/10/2005'])
result = s.resample('W-THU', how='last')
self.assertEquals(len(result), 2)
self.assert_((result.index.dayofweek == [3, 3]).all())
self.assertEquals(result.irow(0), s['1/6/2005'])
self.assertEquals(result.irow(1), s['1/10/2005'])
result = s.resample('W-FRI', how='last')
self.assertEquals(len(result), 2)
self.assert_((result.index.dayofweek == [4, 4]).all())
self.assertEquals(result.irow(0), s['1/7/2005'])
self.assertEquals(result.irow(1), s['1/10/2005'])
# to biz day
result = s.resample('B', how='last')
self.assertEquals(len(result), 7)
self.assert_((result.index.dayofweek == [4, 0, 1, 2, 3, 4, 0]).all())
self.assertEquals(result.irow(0), s['1/2/2005'])
self.assertEquals(result.irow(1), s['1/3/2005'])
self.assertEquals(result.irow(5), s['1/9/2005'])
self.assert_(result.index.name == 'index')
def test_resample_frame_basic(self):
df = tm.makeTimeDataFrame()
b = TimeGrouper('M')
g = df.groupby(b)
# check all cython functions work
funcs = ['add', 'mean', 'prod', 'min', 'max', 'var']
for f in funcs:
g._cython_agg_general(f)
result = df.resample('A')
assert_series_equal(result['A'], df['A'].resample('A'))
result = df.resample('M')
assert_series_equal(result['A'], df['A'].resample('M'))
df.resample('M', kind='period')
df.resample('W-WED', kind='period')
def test_resample_loffset(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 00:13:00', freq='min')
s = Series(np.random.randn(14), index=rng)
result = s.resample('5min', how='mean', closed='right', label='right',
loffset=timedelta(minutes=1))
idx = date_range('1/1/2000', periods=4, freq='5min')
expected = Series([s[0], s[1:6].mean(), s[6:11].mean(), s[11:].mean()],
index=idx + timedelta(minutes=1))
assert_series_equal(result, expected)
expected = s.resample(
'5min', how='mean', closed='right', label='right',
loffset='1min')
assert_series_equal(result, expected)
expected = s.resample(
'5min', how='mean', closed='right', label='right',
loffset=Minute(1))
assert_series_equal(result, expected)
self.assert_(result.index.freq == Minute(5))
# from daily
dti = DatetimeIndex(
start=datetime(2005, 1, 1), end=datetime(2005, 1, 10),
freq='D')
ser = Series(np.random.rand(len(dti)), dti)
# to weekly
result = ser.resample('w-sun', how='last')
expected = ser.resample('w-sun', how='last', loffset=-bday)
self.assertEqual(result.index[0] - bday, expected.index[0])
def test_resample_upsample(self):
# from daily
dti = DatetimeIndex(
start=datetime(2005, 1, 1), end=datetime(2005, 1, 10),
freq='D', name='index')
s = Series(np.random.rand(len(dti)), dti)
# to minutely, by padding
result = s.resample('Min', fill_method='pad')
self.assertEquals(len(result), 12961)
self.assertEquals(result[0], s[0])
self.assertEquals(result[-1], s[-1])
self.assert_(result.index.name == 'index')
def test_upsample_with_limit(self):
rng = date_range('1/1/2000', periods=3, freq='5t')
ts = Series(np.random.randn(len(rng)), rng)
result = ts.resample('t', fill_method='ffill', limit=2)
expected = ts.reindex(result.index, method='ffill', limit=2)
assert_series_equal(result, expected)
def test_resample_ohlc(self):
s = self.series
grouper = TimeGrouper(Minute(5))
expect = s.groupby(grouper).agg(lambda x: x[-1])
result = s.resample('5Min', how='ohlc')
self.assertEquals(len(result), len(expect))
self.assertEquals(len(result.columns), 4)
xs = result.irow(-2)
self.assertEquals(xs['open'], s[-6])
self.assertEquals(xs['high'], s[-6:-1].max())
self.assertEquals(xs['low'], s[-6:-1].min())
self.assertEquals(xs['close'], s[-2])
xs = result.irow(0)
self.assertEquals(xs['open'], s[0])
self.assertEquals(xs['high'], s[:5].max())
self.assertEquals(xs['low'], s[:5].min())
self.assertEquals(xs['close'], s[4])
def test_resample_ohlc_dataframe(self):
df = (pd.DataFrame({'PRICE': {Timestamp('2011-01-06 10:59:05', tz=None): 24990,
Timestamp('2011-01-06 12:43:33', tz=None): 25499,
Timestamp('2011-01-06 12:54:09', tz=None): 25499},
'VOLUME': {Timestamp('2011-01-06 10:59:05', tz=None): 1500000000,
Timestamp('2011-01-06 12:43:33', tz=None): 5000000000,
Timestamp('2011-01-06 12:54:09', tz=None): 100000000}})
).reindex_axis(['VOLUME', 'PRICE'], axis=1)
res = df.resample('H', how='ohlc')
exp = pd.concat([df['VOLUME'].resample('H', how='ohlc'),
df['PRICE'].resample('H', how='ohlc')],
axis=1,
keys=['VOLUME', 'PRICE'])
assert_frame_equal(exp, res)
df.columns = [['a', 'b'], ['c', 'd']]
res = df.resample('H', how='ohlc')
exp.columns = pd.MultiIndex.from_tuples([('a', 'c', 'open'), ('a', 'c', 'high'),
('a', 'c', 'low'), ('a', 'c', 'close'), ('b', 'd', 'open'),
('b', 'd', 'high'), ('b', 'd', 'low'), ('b', 'd', 'close')])
assert_frame_equal(exp, res)
# dupe columns fail atm
# df.columns = ['PRICE', 'PRICE']
def test_resample_dup_index(self):
# GH 4812
# dup columns with resample raising
df = DataFrame(np.random.randn(4,12),index=[2000,2000,2000,2000],columns=[ Period(year=2000,month=i+1,freq='M') for i in range(12) ])
df.iloc[3,:] = np.nan
result = df.resample('Q',axis=1)
expected = df.groupby(lambda x: int((x.month-1)/3),axis=1).mean()
expected.columns = [ Period(year=2000,quarter=i+1,freq='Q') for i in range(4) ]
assert_frame_equal(result, expected)
def test_resample_reresample(self):
dti = DatetimeIndex(
start=datetime(2005, 1, 1), end=datetime(2005, 1, 10),
freq='D')
s = Series(np.random.rand(len(dti)), dti)
bs = s.resample('B', closed='right', label='right')
result = bs.resample('8H')
self.assertEquals(len(result), 22)
tm.assert_isinstance(result.index.freq, offsets.DateOffset)
self.assert_(result.index.freq == offsets.Hour(8))
def test_resample_timestamp_to_period(self):
ts = _simple_ts('1/1/1990', '1/1/2000')
result = ts.resample('A-DEC', kind='period')
expected = ts.resample('A-DEC')
expected.index = period_range('1990', '2000', freq='a-dec')
assert_series_equal(result, expected)
result = ts.resample('A-JUN', kind='period')
expected = ts.resample('A-JUN')
expected.index = period_range('1990', '2000', freq='a-jun')
assert_series_equal(result, expected)
result = ts.resample('M', kind='period')
expected = ts.resample('M')
expected.index = period_range('1990-01', '2000-01', freq='M')
assert_series_equal(result, expected)
result = ts.resample('M', kind='period')
expected = ts.resample('M')
expected.index = period_range('1990-01', '2000-01', freq='M')
assert_series_equal(result, expected)
def test_ohlc_5min(self):
def _ohlc(group):
if isnull(group).all():
return np.repeat(np.nan, 4)
return [group[0], group.max(), group.min(), group[-1]]
rng = date_range('1/1/2000 00:00:00', '1/1/2000 5:59:50',
freq='10s')
ts = Series(np.random.randn(len(rng)), index=rng)
resampled = ts.resample('5min', how='ohlc', closed='right',
label='right')
self.assert_((resampled.ix['1/1/2000 00:00'] == ts[0]).all())
exp = _ohlc(ts[1:31])
self.assert_((resampled.ix['1/1/2000 00:05'] == exp).all())
exp = _ohlc(ts['1/1/2000 5:55:01':])
self.assert_((resampled.ix['1/1/2000 6:00:00'] == exp).all())
def test_downsample_non_unique(self):
rng = date_range('1/1/2000', '2/29/2000')
rng2 = rng.repeat(5).values
ts = Series(np.random.randn(len(rng2)), index=rng2)
result = ts.resample('M', how='mean')
expected = ts.groupby(lambda x: x.month).mean()
self.assertEquals(len(result), 2)
assert_almost_equal(result[0], expected[1])
assert_almost_equal(result[1], expected[2])
def test_asfreq_non_unique(self):
# GH #1077
rng = date_range('1/1/2000', '2/29/2000')
rng2 = rng.repeat(2).values
ts = Series(np.random.randn(len(rng2)), index=rng2)
self.assertRaises(Exception, ts.asfreq, 'B')
def test_resample_axis1(self):
rng = date_range('1/1/2000', '2/29/2000')
df = DataFrame(np.random.randn(3, len(rng)), columns=rng,
index=['a', 'b', 'c'])
result = df.resample('M', axis=1)
expected = df.T.resample('M').T
tm.assert_frame_equal(result, expected)
def test_resample_panel(self):
rng = date_range('1/1/2000', '6/30/2000')
n = len(rng)
panel = Panel(np.random.randn(3, n, 5),
items=['one', 'two', 'three'],
major_axis=rng,
minor_axis=['a', 'b', 'c', 'd', 'e'])
result = panel.resample('M', axis=1)
def p_apply(panel, f):
result = {}
for item in panel.items:
result[item] = f(panel[item])
return Panel(result, items=panel.items)
expected = p_apply(panel, lambda x: x.resample('M'))
tm.assert_panel_equal(result, expected)
panel2 = panel.swapaxes(1, 2)
result = panel2.resample('M', axis=2)
expected = p_apply(panel2, lambda x: x.resample('M', axis=1))
tm.assert_panel_equal(result, expected)
def test_resample_panel_numpy(self):
rng = date_range('1/1/2000', '6/30/2000')
n = len(rng)
panel = Panel(np.random.randn(3, n, 5),
items=['one', 'two', 'three'],
major_axis=rng,
minor_axis=['a', 'b', 'c', 'd', 'e'])
result = panel.resample('M', how=lambda x: x.mean(1), axis=1)
expected = panel.resample('M', how='mean', axis=1)
tm.assert_panel_equal(result, expected)
panel = panel.swapaxes(1, 2)
result = panel.resample('M', how=lambda x: x.mean(2), axis=2)
expected = panel.resample('M', how='mean', axis=2)
tm.assert_panel_equal(result, expected)
def test_resample_anchored_ticks(self):
# If a fixed delta (5 minute, 4 hour) evenly divides a day, we should
# "anchor" the origin at midnight so we get regular intervals rather
# than starting from the first timestamp which might start in the middle
# of a desired interval
rng = date_range('1/1/2000 04:00:00', periods=86400, freq='s')
ts = Series(np.random.randn(len(rng)), index=rng)
ts[:2] = np.nan # so results are the same
freqs = ['t', '5t', '15t', '30t', '4h', '12h']
for freq in freqs:
result = ts[2:].resample(freq, closed='left', label='left')
expected = ts.resample(freq, closed='left', label='left')
assert_series_equal(result, expected)
def test_resample_base(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 02:00', freq='s')
ts = Series(np.random.randn(len(rng)), index=rng)
resampled = ts.resample('5min', base=2)
exp_rng = date_range('12/31/1999 23:57:00', '1/1/2000 01:57',
freq='5min')
self.assert_(resampled.index.equals(exp_rng))
def test_resample_daily_anchored(self):
rng = date_range('1/1/2000 0:00:00', periods=10000, freq='T')
ts = Series(np.random.randn(len(rng)), index=rng)
ts[:2] = np.nan # so results are the same
result = ts[2:].resample('D', closed='left', label='left')
expected = ts.resample('D', closed='left', label='left')
assert_series_equal(result, expected)
def test_resample_to_period_monthly_buglet(self):
# GH #1259
rng = date_range('1/1/2000', '12/31/2000')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample('M', kind='period')
exp_index = period_range('Jan-2000', 'Dec-2000', freq='M')
self.assert_(result.index.equals(exp_index))
def test_resample_empty(self):
ts = _simple_ts('1/1/2000', '2/1/2000')[:0]
result = ts.resample('A')
self.assert_(len(result) == 0)
self.assert_(result.index.freqstr == 'A-DEC')
result = ts.resample('A', kind='period')
self.assert_(len(result) == 0)
self.assert_(result.index.freqstr == 'A-DEC')
xp = DataFrame()
rs = xp.resample('A')
assert_frame_equal(xp, rs)
def test_weekly_resample_buglet(self):
# #1327
rng = date_range('1/1/2000', freq='B', periods=20)
ts = Series(np.random.randn(len(rng)), index=rng)
resampled = ts.resample('W')
expected = ts.resample('W-SUN')
assert_series_equal(resampled, expected)
def test_monthly_resample_error(self):
# #1451
dates = date_range('4/16/2012 20:00', periods=5000, freq='h')
ts = Series(np.random.randn(len(dates)), index=dates)
# it works!
result = ts.resample('M')
def test_resample_anchored_intraday(self):
# #1471, #1458
rng = date_range('1/1/2012', '4/1/2012', freq='100min')
df = DataFrame(rng.month, index=rng)
result = df.resample('M')
expected = df.resample('M', kind='period').to_timestamp(how='end')
tm.assert_frame_equal(result, expected)
result = df.resample('M', closed='left')
exp = df.tshift(1, freq='D').resample('M', kind='period')
exp = exp.to_timestamp(how='end')
tm.assert_frame_equal(result, exp)
rng = date_range('1/1/2012', '4/1/2012', freq='100min')
df = DataFrame(rng.month, index=rng)
result = df.resample('Q')
expected = df.resample('Q', kind='period').to_timestamp(how='end')
tm.assert_frame_equal(result, expected)
result = df.resample('Q', closed='left')
expected = df.tshift(1, freq='D').resample('Q', kind='period',
closed='left')
expected = expected.to_timestamp(how='end')
tm.assert_frame_equal(result, expected)
ts = _simple_ts('2012-04-29 23:00', '2012-04-30 5:00', freq='h')
resampled = ts.resample('M')
self.assert_(len(resampled) == 1)
def test_resample_anchored_monthstart(self):
ts = _simple_ts('1/1/2000', '12/31/2002')
freqs = ['MS', 'BMS', 'QS-MAR', 'AS-DEC', 'AS-JUN']
for freq in freqs:
result = ts.resample(freq, how='mean')
def test_corner_cases(self):
# miscellaneous test coverage
rng = date_range('1/1/2000', periods=12, freq='t')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample('5t', closed='right', label='left')
ex_index = date_range('1999-12-31 23:55', periods=4, freq='5t')
self.assert_(result.index.equals(ex_index))
len0pts = _simple_pts('2007-01', '2010-05', freq='M')[:0]
# it works
result = len0pts.resample('A-DEC')
self.assert_(len(result) == 0)
# resample to periods
ts = _simple_ts('2000-04-28', '2000-04-30 11:00', freq='h')
result = ts.resample('M', kind='period')
self.assert_(len(result) == 1)
self.assert_(result.index[0] == Period('2000-04', freq='M'))
def test_anchored_lowercase_buglet(self):
dates = date_range('4/16/2012 20:00', periods=50000, freq='s')
ts = Series(np.random.randn(len(dates)), index=dates)
# it works!
ts.resample('d')
def test_upsample_apply_functions(self):
# #1596
rng = pd.date_range('2012-06-12', periods=4, freq='h')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample('20min', how=['mean', 'sum'])
tm.assert_isinstance(result, DataFrame)
def test_resample_not_monotonic(self):
rng = pd.date_range('2012-06-12', periods=200, freq='h')
ts = Series(np.random.randn(len(rng)), index=rng)
ts = ts.take(np.random.permutation(len(ts)))
result = ts.resample('D', how='sum')
exp = ts.sort_index().resample('D', how='sum')
assert_series_equal(result, exp)
def test_resample_median_bug_1688(self):
for dtype in ['int64','int32','float64','float32']:
df = DataFrame([1, 2], index=[datetime(2012, 1, 1, 0, 0, 0),
datetime(2012, 1, 1, 0, 5, 0)],
dtype = dtype)
result = df.resample("T", how=lambda x: x.mean())
exp = df.asfreq('T')
tm.assert_frame_equal(result, exp)
result = df.resample("T", how="median")
exp = df.asfreq('T')
| tm.assert_frame_equal(result, exp) | pandas.util.testing.assert_frame_equal |
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime
import itertools
import numpy as np
import pytest
from pandas.compat import u
import pandas as pd
from pandas import (
DataFrame, Index, MultiIndex, Period, Series, Timedelta, date_range)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
from pandas.util.testing import assert_frame_equal, assert_series_equal
class TestDataFrameReshape(TestData):
def test_pivot(self):
data = {
'index': ['A', 'B', 'C', 'C', 'B', 'A'],
'columns': ['One', 'One', 'One', 'Two', 'Two', 'Two'],
'values': [1., 2., 3., 3., 2., 1.]
}
frame = DataFrame(data)
pivoted = frame.pivot(
index='index', columns='columns', values='values')
expected = DataFrame({
'One': {'A': 1., 'B': 2., 'C': 3.},
'Two': {'A': 1., 'B': 2., 'C': 3.}
})
expected.index.name, expected.columns.name = 'index', 'columns'
tm.assert_frame_equal(pivoted, expected)
# name tracking
assert pivoted.index.name == 'index'
assert pivoted.columns.name == 'columns'
# don't specify values
pivoted = frame.pivot(index='index', columns='columns')
assert pivoted.index.name == 'index'
assert pivoted.columns.names == (None, 'columns')
def test_pivot_duplicates(self):
data = DataFrame({'a': ['bar', 'bar', 'foo', 'foo', 'foo'],
'b': ['one', 'two', 'one', 'one', 'two'],
'c': [1., 2., 3., 3., 4.]})
with pytest.raises(ValueError, match='duplicate entries'):
data.pivot('a', 'b', 'c')
def test_pivot_empty(self):
df = DataFrame({}, columns=['a', 'b', 'c'])
result = df.pivot('a', 'b', 'c')
expected = DataFrame()
tm.assert_frame_equal(result, expected, check_names=False)
def test_pivot_integer_bug(self):
df = DataFrame(data=[("A", "1", "A1"), ("B", "2", "B2")])
result = df.pivot(index=1, columns=0, values=2)
repr(result)
tm.assert_index_equal(result.columns, Index(['A', 'B'], name=0))
def test_pivot_index_none(self):
# gh-3962
data = {
'index': ['A', 'B', 'C', 'C', 'B', 'A'],
'columns': ['One', 'One', 'One', 'Two', 'Two', 'Two'],
'values': [1., 2., 3., 3., 2., 1.]
}
frame = DataFrame(data).set_index('index')
result = frame.pivot(columns='columns', values='values')
expected = DataFrame({
'One': {'A': 1., 'B': 2., 'C': 3.},
'Two': {'A': 1., 'B': 2., 'C': 3.}
})
expected.index.name, expected.columns.name = 'index', 'columns'
assert_frame_equal(result, expected)
# omit values
result = frame.pivot(columns='columns')
expected.columns = pd.MultiIndex.from_tuples([('values', 'One'),
('values', 'Two')],
names=[None, 'columns'])
expected.index.name = 'index'
tm.assert_frame_equal(result, expected, check_names=False)
assert result.index.name == 'index'
assert result.columns.names == (None, 'columns')
expected.columns = expected.columns.droplevel(0)
result = frame.pivot(columns='columns', values='values')
expected.columns.name = 'columns'
tm.assert_frame_equal(result, expected)
def test_stack_unstack(self):
df = self.frame.copy()
df[:] = np.arange(np.prod(df.shape)).reshape(df.shape)
stacked = df.stack()
stacked_df = DataFrame({'foo': stacked, 'bar': stacked})
unstacked = stacked.unstack()
unstacked_df = stacked_df.unstack()
assert_frame_equal(unstacked, df)
assert_frame_equal(unstacked_df['bar'], df)
unstacked_cols = stacked.unstack(0)
unstacked_cols_df = stacked_df.unstack(0)
assert_frame_equal(unstacked_cols.T, df)
assert_frame_equal(unstacked_cols_df['bar'].T, df)
def test_stack_mixed_level(self):
# GH 18310
levels = [range(3), [3, 'a', 'b'], [1, 2]]
# flat columns:
df = DataFrame(1, index=levels[0], columns=levels[1])
result = df.stack()
expected = Series(1, index=MultiIndex.from_product(levels[:2]))
assert_series_equal(result, expected)
# MultiIndex columns:
df = DataFrame(1, index=levels[0],
columns=MultiIndex.from_product(levels[1:]))
result = df.stack(1)
expected = DataFrame(1, index=MultiIndex.from_product([levels[0],
levels[2]]),
columns=levels[1])
assert_frame_equal(result, expected)
# as above, but used labels in level are actually of homogeneous type
result = df[['a', 'b']].stack(1)
expected = expected[['a', 'b']]
assert_frame_equal(result, expected)
def test_unstack_fill(self):
# GH #9746: fill_value keyword argument for Series
# and DataFrame unstack
# From a series
data = Series([1, 2, 4, 5], dtype=np.int16)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack(fill_value=-1)
expected = DataFrame({'a': [1, -1, 5], 'b': [2, 4, -1]},
index=['x', 'y', 'z'], dtype=np.int16)
assert_frame_equal(result, expected)
# From a series with incorrect data type for fill_value
result = data.unstack(fill_value=0.5)
expected = DataFrame({'a': [1, 0.5, 5], 'b': [2, 4, 0.5]},
index=['x', 'y', 'z'], dtype=np.float)
assert_frame_equal(result, expected)
# GH #13971: fill_value when unstacking multiple levels:
df = DataFrame({'x': ['a', 'a', 'b'],
'y': ['j', 'k', 'j'],
'z': [0, 1, 2],
'w': [0, 1, 2]}).set_index(['x', 'y', 'z'])
unstacked = df.unstack(['x', 'y'], fill_value=0)
key = ('<KEY>')
expected = unstacked[key]
result = pd.Series([0, 0, 2], index=unstacked.index, name=key)
assert_series_equal(result, expected)
stacked = unstacked.stack(['x', 'y'])
stacked.index = stacked.index.reorder_levels(df.index.names)
# Workaround for GH #17886 (unnecessarily casts to float):
stacked = stacked.astype(np.int64)
result = stacked.loc[df.index]
assert_frame_equal(result, df)
# From a series
s = df['w']
result = s.unstack(['x', 'y'], fill_value=0)
expected = unstacked['w']
assert_frame_equal(result, expected)
def test_unstack_fill_frame(self):
# From a dataframe
rows = [[1, 2], [3, 4], [5, 6], [7, 8]]
df = DataFrame(rows, columns=list('AB'), dtype=np.int32)
df.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = df.unstack(fill_value=-1)
rows = [[1, 3, 2, 4], [-1, 5, -1, 6], [7, -1, 8, -1]]
expected = DataFrame(rows, index=list('xyz'), dtype=np.int32)
expected.columns = MultiIndex.from_tuples(
[('A', 'a'), ('A', 'b'), ('B', 'a'), ('B', 'b')])
assert_frame_equal(result, expected)
# From a mixed type dataframe
df['A'] = df['A'].astype(np.int16)
df['B'] = df['B'].astype(np.float64)
result = df.unstack(fill_value=-1)
expected['A'] = expected['A'].astype(np.int16)
expected['B'] = expected['B'].astype(np.float64)
assert_frame_equal(result, expected)
# From a dataframe with incorrect data type for fill_value
result = df.unstack(fill_value=0.5)
rows = [[1, 3, 2, 4], [0.5, 5, 0.5, 6], [7, 0.5, 8, 0.5]]
expected = DataFrame(rows, index=list('xyz'), dtype=np.float)
expected.columns = MultiIndex.from_tuples(
[('A', 'a'), ('A', 'b'), ('B', 'a'), ('B', 'b')])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_datetime(self):
# Test unstacking with date times
dv = pd.date_range('2012-01-01', periods=4).values
data = Series(dv)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack()
expected = DataFrame({'a': [dv[0], pd.NaT, dv[3]],
'b': [dv[1], dv[2], pd.NaT]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
result = data.unstack(fill_value=dv[0])
expected = DataFrame({'a': [dv[0], dv[0], dv[3]],
'b': [dv[1], dv[2], dv[0]]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_timedelta(self):
# Test unstacking with time deltas
td = [Timedelta(days=i) for i in range(4)]
data = Series(td)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack()
expected = DataFrame({'a': [td[0], pd.NaT, td[3]],
'b': [td[1], td[2], pd.NaT]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
result = data.unstack(fill_value=td[1])
expected = DataFrame({'a': [td[0], td[1], td[3]],
'b': [td[1], td[2], td[1]]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_period(self):
# Test unstacking with period
periods = [Period('2012-01'), Period('2012-02'), Period('2012-03'),
Period('2012-04')]
data = Series(periods)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack()
expected = DataFrame({'a': [periods[0], None, periods[3]],
'b': [periods[1], periods[2], None]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
result = data.unstack(fill_value=periods[1])
expected = DataFrame({'a': [periods[0], periods[1], periods[3]],
'b': [periods[1], periods[2], periods[1]]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_categorical(self):
# Test unstacking with categorical
data = pd.Series(['a', 'b', 'c', 'a'], dtype='category')
data.index = pd.MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')],
)
# By default missing values will be NaN
result = data.unstack()
expected = DataFrame({'a': pd.Categorical(list('axa'),
categories=list('abc')),
'b': pd.Categorical(list('bcx'),
categories=list('abc'))},
index=list('xyz'))
| assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
#! -*- coding: utf-8 -*-
#%%
from __future__ import print_function, division
from keras import backend as K
from keras.models import Model
from keras.engine.topology import Layer
from keras.layers import Conv1D, Dense, Dropout, Reshape, Flatten, add, MaxPooling1D, Input, UpSampling1D, BatchNormalization, GaussianNoise, Activation
from keras.regularizers import l2
class Position_Embedding(Layer):
def __init__(self, size=None, mode='sum', **kwargs):
self.size = size #必须为偶数
self.mode = mode
super(Position_Embedding, self).__init__(**kwargs)
def call(self, x):
if (self.size == None) or (self.mode == 'sum'):
self.size = int(x.shape[-1])
batch_size,seq_len = K.shape(x)[0],K.shape(x)[1]
position_j = 1. / K.pow(10000., \
2 * K.arange(self.size / 2, dtype='float32' \
) / self.size)
position_j = K.expand_dims(position_j, 0)
position_i = K.cumsum(K.ones_like(x[:,:,0]), 1)-1 #K.arange不支持变长,只好用这种方法生成
position_i = K.expand_dims(position_i, 2)
position_ij = K.dot(position_i, position_j)
position_ij = K.concatenate([K.cos(position_ij), K.sin(position_ij)], 2)
if self.mode == 'sum':
return position_ij + x
elif self.mode == 'concat':
return K.concatenate([position_ij, x], 2)
def compute_output_shape(self, input_shape):
if self.mode == 'sum':
return input_shape
elif self.mode == 'concat':
return (input_shape[0], input_shape[1], input_shape[2]+self.size)
class Attention(Layer):
def __init__(self, nb_head, size_per_head, **kwargs):
self.nb_head = nb_head
self.size_per_head = size_per_head
self.output_dim = nb_head*size_per_head
super(Attention, self).__init__(**kwargs)
def build(self, input_shape):
self.WQ = self.add_weight(name='WQ',
shape=(input_shape[0][-1], self.output_dim),
initializer='glorot_uniform',
trainable=True)
self.WK = self.add_weight(name='WK',
shape=(input_shape[1][-1], self.output_dim),
initializer='glorot_uniform',
trainable=True)
self.WV = self.add_weight(name='WV',
shape=(input_shape[2][-1], self.output_dim),
initializer='glorot_uniform',
trainable=True)
super(Attention, self).build(input_shape)
def Mask(self, inputs, seq_len, mode='mul'):
if seq_len == None:
return inputs
else:
mask = K.one_hot(seq_len[:,0], K.shape(inputs)[1])
mask = 1 - K.cumsum(mask, 1)
for _ in range(len(inputs.shape)-2):
mask = K.expand_dims(mask, 2)
if mode == 'mul':
return inputs * mask
if mode == 'add':
return inputs - (1 - mask) * 1e12
def call(self, x):
#如果只传入Q_seq,K_seq,V_seq,那么就不做Mask
#如果同时传入Q_seq,K_seq,V_seq,Q_len,V_len,那么对多余部分做Mask
if len(x) == 3:
Q_seq,K_seq,V_seq = x
Q_len,V_len = None,None
elif len(x) == 5:
Q_seq,K_seq,V_seq,Q_len,V_len = x
#对Q、K、V做线性变换
Q_seq = K.dot(Q_seq, self.WQ)
Q_seq = K.reshape(Q_seq, (-1, K.shape(Q_seq)[1], self.nb_head, self.size_per_head))
Q_seq = K.permute_dimensions(Q_seq, (0,2,1,3))
K_seq = K.dot(K_seq, self.WK)
K_seq = K.reshape(K_seq, (-1, K.shape(K_seq)[1], self.nb_head, self.size_per_head))
K_seq = K.permute_dimensions(K_seq, (0,2,1,3))
V_seq = K.dot(V_seq, self.WV)
V_seq = K.reshape(V_seq, (-1, K.shape(V_seq)[1], self.nb_head, self.size_per_head))
V_seq = K.permute_dimensions(V_seq, (0,2,1,3))
#计算内积,然后mask,然后softmax
A = K.batch_dot(Q_seq, K_seq, axes=[3,3]) / self.size_per_head**0.5
A = K.permute_dimensions(A, (0,3,2,1))
A = self.Mask(A, V_len, 'add')
A = K.permute_dimensions(A, (0,3,2,1))
A = K.softmax(A)
#输出并mask
O_seq = K.batch_dot(A, V_seq, axes=[3,2])
O_seq = K.permute_dimensions(O_seq, (0,2,1,3))
O_seq = K.reshape(O_seq, (-1, K.shape(O_seq)[1], self.output_dim))
O_seq = self.Mask(O_seq, Q_len, 'mul')
return O_seq
def compute_output_shape(self, input_shape):
return (input_shape[0][0], input_shape[0][1], self.output_dim)
def resnet_layer(inputs,
filters=16,
kernel_size=3,
strides=1,
batch_normalization=True,
conv_first=True,
dilation_rate=7,
std = 1e-4):
conv = Conv1D(filters,
kernel_size=kernel_size,
strides=strides,
padding='same',
kernel_initializer='he_normal',
kernel_regularizer=l2(1e-4),
dilation_rate = dilation_rate)
x = inputs
x = conv(x)
x = Activation('relu')(x)
x = BatchNormalization()(x)
x = GaussianNoise(stddev = std)(x)
return x
def coder(input,
filters,
kernel_size = 3,
down_sample = True,
size = 2):
input = resnet_layer(input,filters=filters,kernel_size=kernel_size)
x = resnet_layer(input,filters=filters,kernel_size=kernel_size)
input = add([input, x])
x = resnet_layer(input,filters=filters,kernel_size=kernel_size)
x = add([input, x])
if(down_sample):
x = MaxPooling1D(pool_size=size)(x)
else:
x = UpSampling1D(size=size)(x)
return x
def begin(input, std=1e-5):
x = Conv1D(filters=64, kernel_size=1, activation='sigmoid',padding='same')(input)
x = BatchNormalization()(x)
x = GaussianNoise(std)(x)
return x
def end(input):
x = Conv1D(filters=1, kernel_size=3, activation='sigmoid', dilation_rate=7,padding='same')(input)
x = BatchNormalization()(x)
return x
def midden(input):
x = Attention(8,32)([input,input,input])
x = add([x,input])
return x
def lr_schedule(epoch):
"""Learning Rate Schedule
Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs.
Called automatically every epoch as part of callbacks during training.
# Arguments
epoch (int): The number of epochs
# Returns
lr (float32): learning rate
"""
lr = 1e-5
if epoch > 80:
lr *= 0.5e-3
elif epoch > 60:
lr *= 1e-3
elif epoch > 40:
lr *= 1e-2
elif epoch > 20:
lr *= 1e-1
print('Learning rate: ', lr)
return lr
def generate_slide_window(arr, step=1, n=1800):
i = 0
result = []
while(i < (len(arr)-n+1)):
result.append(arr[i:i+n])
i+=step
return np.array(result)
# %%
from warnings import warn
from nilmtk.disaggregate import Disaggregator
from keras.layers import Conv1D, Dense, Dropout, Reshape, Flatten, add, MaxPooling1D, Input
import keras
import pandas as pd
import numpy as np
from collections import OrderedDict
from keras.optimizers import SGD
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from keras.callbacks import ModelCheckpoint, LearningRateScheduler, ReduceLROnPlateau
import keras.backend as K
from statistics import mean
import os
import pickle
import random
import json
from .util import *
random.seed(10)
np.random.seed(10)
class ADAE(Disaggregator):
def __init__(self, params):
"""
Iniititalize the moel with the given parameters
"""
self.MODEL_NAME = "ADAE"
self.chunk_wise_training = params.get('chunk_wise_training',False)
self.sequence_length = params.get('sequence_length',1800)
self.stride_length = params.get('stride_length',self.sequence_length)
self.n_epochs = params.get('n_epochs', 10)
self.batch_size = params.get('batch_size',16)
self.mains_mean = params.get('mains_mean',1000)
self.mains_std = params.get('mains_std',600)
self.appliance_params = params.get('appliance_params',{})
self.save_model_path = params.get('save-model-path', None)
self.load_model_path = params.get('pretrained-model-path',None)
self.models = OrderedDict()
if self.load_model_path:
self.load_model()
def partial_fit(self, train_main, train_appliances, do_preprocessing=True,**load_kwargs):
"""
The partial fit function
"""
# If no appliance wise parameters are specified, then they are computed from the data
if len(self.appliance_params) == 0:
self.set_appliance_params(train_appliances)
# TO preprocess the data and bring it to a valid shape
if do_preprocessing:
print ("Doing Preprocessing")
train_main,train_appliances = self.call_preprocessing(train_main,train_appliances,'train')
train_main = pd.concat(train_main,axis=0).values
train_main = train_main.reshape((-1,self.sequence_length,1))
new_train_appliances = []
for app_name, app_df in train_appliances:
app_df = pd.concat(app_df,axis=0).values
app_df = app_df.reshape((-1,self.sequence_length,1))
new_train_appliances.append((app_name, app_df))
train_appliances = new_train_appliances
for appliance_name, power in train_appliances:
if appliance_name not in self.models:
print ("First model training for ",appliance_name)
self.models[appliance_name] = self.return_network()
print (self.models[appliance_name].summary())
print ("Started Retraining model for ",appliance_name)
model = self.models[appliance_name]
filepath = 'adae-temp-weights-'+str(random.randint(0,100000))+'.h5'
checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
train_x,v_x,train_y,v_y = train_test_split(train_main,power,test_size=.15,random_state=10)
def data_generator(data, targets, batch_size):
batches = (len(data) + batch_size - 1)//batch_size
while(True):
for i in range(batches):
X = data[i*batch_size : (i+1)*batch_size]
Y = targets[i*batch_size : (i+1)*batch_size]
yield (X, Y)
lr_scheduler = LearningRateScheduler(lr_schedule)
lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1),
cooldown=0,
patience=3,
min_lr=0.5e-6)
model.fit_generator(generator = data_generator(train_x, train_y, self.batch_size),
steps_per_epoch = (len(train_x) + self.batch_size - 1) // self.batch_size,
epochs = self.n_epochs,
verbose = 1,
callbacks = [checkpoint,lr_scheduler,lr_reducer],
validation_data = (v_x, v_y)
)
# model.fit(train_x,train_y,validation_data = [v_x,v_y],epochs = self.n_epochs, callbacks = [checkpoint,lr_scheduler,lr_reducer],shuffle=True,batch_size=self.batch_size)
model.load_weights(filepath)
if self.save_model_path:
self.save_model()
def load_model(self):
print ("Loading the model using the pretrained-weights")
model_folder = self.load_model_path
with open(os.path.join(model_folder, "model.json"), "r") as f:
model_string = f.read().strip()
params_to_load = json.loads(model_string)
self.sequence_length = int(params_to_load['sequence_length'])
self.mains_mean = params_to_load['mains_mean']
self.mains_std = params_to_load['mains_std']
self.appliance_params = params_to_load['appliance_params']
for appliance_name in self.appliance_params:
self.models[appliance_name] = self.return_network()
self.models[appliance_name].load_weights(os.path.join(model_folder,appliance_name+".h5"))
def save_model(self):
if os.path.exists(self.save_model_path) == False:
os.makedirs(self.save_model_path)
params_to_save = {}
params_to_save['appliance_params'] = self.appliance_params
params_to_save['sequence_length'] = self.sequence_length
params_to_save['mains_mean'] = self.mains_mean
params_to_save['mains_std'] = self.mains_std
for appliance_name in self.models:
print ("Saving model for ", appliance_name)
self.models[appliance_name].save_weights(os.path.join(self.save_model_path,appliance_name+".h5"))
with open(os.path.join(self.save_model_path,'model.json'),'w') as file:
file.write(json.dumps(params_to_save, cls=NumpyEncoder))
def disaggregate_chunk(self, test_main_list, do_preprocessing=True):
if do_preprocessing:
test_main_list = self.call_preprocessing(test_main_list,submeters_lst=None,method='test')
test_predictions = []
for test_main in test_main_list:
test_main = test_main.values
test_main = test_main.reshape((-1,self.sequence_length,1))
disggregation_dict = {}
for appliance in self.models:
prediction = self.models[appliance].predict(test_main,batch_size=self.batch_size)
app_mean = self.appliance_params[appliance]['mean']
app_std = self.appliance_params[appliance]['std']
prediction = self.denormalize_output(prediction,app_mean,app_std)
valid_predictions = prediction.flatten()
valid_predictions = np.where(valid_predictions>0,valid_predictions,0)
series = | pd.Series(valid_predictions) | pandas.Series |
import sqlite3
import time
import public_function as pb_fnc
import pandas as pd
import numpy as np
class InfoCheck:
bu_name = ""
db_path = "../data/_DB/"
def __init__(self, bu):
self.__class__.bu_name = bu
# get all master data of single code
def get_single_code_all_master_data(self, material_code, master_data_item_list):
str_master_data = ""
for item in master_data_item_list:
str_master_data += item + ","
str_master_data = str_master_data.rstrip(",")
database_fullname = self.__class__.db_path + self.__class__.bu_name + "_Master_Data.db"
datasheet_name = self.__class__.bu_name + "_Master_Data"
conn = sqlite3.connect(database_fullname)
c = conn.cursor()
sql_cmd = 'SELECT %s FROM %s WHERE Material=\"%s\"' % (str_master_data, datasheet_name, material_code)
c.execute(sql_cmd)
result = c.fetchall()
if result:
return result[0]
else:
return 0
# 读取单个代码全部的master data
def get_master_data(self, code):
# 数据库完整路径加名称
db_fullname = self.__class__.db_path + "Master_Data.db"
# 表格名称,等于文件名称
table_name = "MATERIAL_MASTER"
conn = sqlite3.connect(db_fullname)
c = conn.cursor()
str_cmd = "SELECT Description, Chinese_Description, Hierarchy_4, Hierarchy_5, Sales_Status, Purchase_Status, " \
"Standard_Cost FROM " + table_name + " WHERE Material = \'" + code + "\' "
c.execute(str_cmd)
row = c.fetchall()
list_title = ["Description", "Chinese_Description", "Hierarchy_4", "Hierarchy_5", "Sales_Status",
"Purchase_Status", "Standard_Cost"]
return [list_title, list(row[0])]
# 读取全部的master data list
def get_master_data_list(self):
# 文件名,无后缀
file_name = self.__class__.bu_name + "_Master_Data"
# 数据库完整路径加名称
db_fullname = self.__class__.db_path + file_name + ".db"
# 表格名称,等于文件名称
tbl_name = file_name
conn = sqlite3.connect(db_fullname)
c = conn.cursor()
result = c.execute("SELECT * from " + tbl_name)
row = result.fetchall()
conn.close()
return list(row)
# get master data for code list, phase out
# def get_master_data_for_list(self, code_list, master_data_name):
# file_name = self.__class__.bu_name + "_Master_Data" if master_data_name == "SAP_Price" else "Master_Data"
# db_fullname = self.__class__.db_path + file_name + ".db"
# table_name = self.__class__.bu_name + "_SAP_Price" if master_data_name == "SAP_Price" else "MATERIAL_MASTER"
# master_data_result = []
# conn = sqlite3.connect(db_fullname)
# c = conn.cursor()
# for code_name in code_list:
# if master_data_name == "SAP_Price":
# sql_cmd = "SELECT Price FROM " + table_name + " WHERE Material = \'" + code_name + "\'"
# else:
# sql_cmd = "SELECT " + master_data_name + " FROM " + table_name + " WHERE Material = \'" + code_name + "\'"
# c.execute(sql_cmd)
# master_data_output = c.fetchall()
# if master_data_output:
# master_data_result.append(master_data_output[0][0])
# else:
# master_data_result.append(0)
# return master_data_result
# get single column from bu master data
def get_bu_master_data(self, code, column_name):
file_name = self.__class__.bu_name + "_Master_Data"
db_fullname = self.__class__.db_path + file_name + ".db"
conn = sqlite3.connect(db_fullname)
c = conn.cursor()
sql_cmd = 'SELECT %s FROM %s WHERE Material = \"%s\"' % (column_name, file_name, code)
c.execute(sql_cmd)
md_result = c.fetchall()
if md_result:
return md_result[0][0]
else:
return ""
# by H5的销量数据
def get_h5_sales_data(self, data_type, price_type, hierarchy, month_number):
# 文件名,无后缀
tbl_name = self.__class__.bu_name + "_" + data_type
# 数据库完整路径加名称
db_fullname = self.__class__.db_path + tbl_name + ".db"
conn = sqlite3.connect(db_fullname)
c = conn.cursor()
# 创建命令
if price_type == "Standard_Cost":
if hierarchy == "ALL":
str_cmd = "SELECT month, sum(Value_Standard_Cost) from " + tbl_name + " GROUP BY month ORDER BY month"
else:
str_cmd = "SELECT month, sum(Value_Standard_Cost) from " + tbl_name + " WHERE Hierarchy_5 = '" + \
hierarchy + "\' COLLATE NOCASE GROUP BY month ORDER BY month"
else:
if hierarchy == "ALL":
str_cmd = "SELECT month, sum(Value_SAP_Price) from " + tbl_name + " GROUP BY month ORDER BY month"
else:
str_cmd = "SELECT month, sum(Value_SAP_Price) from " + tbl_name + " WHERE Hierarchy_5 = \'" + \
hierarchy + "\' COLLATE NOCASE GROUP BY month ORDER BY month"
c.execute(str_cmd)
sales_result = self.data_mapping(c.fetchall(), pb_fnc.get_current_month(), 0 - month_number)
return sales_result
def get_h5_inventory_data(self, inv_type, price_type, h5_name, month_number):
# 文件名,无后缀
file_name = self.__class__.bu_name + "_" + inv_type + "_INV"
# 数据库完整路径加名称
db_fullname = self.__class__.db_path + file_name + ".db"
# 表格名称,等于文件名称
tbl_name = file_name
conn = sqlite3.connect(db_fullname)
c = conn.cursor()
if h5_name == "ALL":
str_cmd = "SELECT month, SUM(Value_%s) from %s GROUP BY month" % (price_type, tbl_name)
else:
str_cmd = "SELECT month, SUM(Value_%s) from %s WHERE Hierarchy_5 = \"%s\" COLLATE NOCASE " \
"GROUP BY month " % (price_type, tbl_name, h5_name)
c.execute(str_cmd)
h5_inv_result = self.data_mapping(c.fetchall(), pb_fnc.get_current_month(), 0 - month_number)
return h5_inv_result
# get sap_price by code
def get_code_sap_price(self, code_name):
db_fullname = self.__class__.db_path + self.__class__.bu_name + "_Master_Data.db"
table_name = self.__class__.bu_name + "_SAP_Price"
conn = sqlite3.connect(db_fullname)
c = conn.cursor()
c.execute("SELECT Price FROM " + table_name + " WHERE Material = \'" + code_name + "\'")
sap_price_result = c.fetchall()
if not sap_price_result:
return 0
else:
return sap_price_result[0][0]
# get gtin by code
def get_code_gtin(self, code_name):
db_fullname = self.__class__.db_path + "Master_Data.db"
filename = "GTIN"
conn = sqlite3.connect(db_fullname)
c = conn.cursor()
c.execute("SELECT Barcode from " + filename + " WHERE [Material code] = \'" + code_name + "\'")
return c.fetchall()[0][0]
# get RAG by code
def get_code_rag(self, code_name):
db_fullname = self.__class__.db_path + "Master_Data.db"
filename = "RAG_Report"
conn = sqlite3.connect(db_fullname)
c = conn.cursor()
c.execute("SELECT REGLICNO, REGAPDATE, REGEXDATE from " + filename + " WHERE MATNR = \'" + code_name +
"\' ORDER by REGAPDATE")
return c.fetchall()
# get Phoenix Project Status by code
def get_code_phoenix_result(self, material_code):
db_fullname = self.__class__.db_path + self.__class__.bu_name + "_Master_Data.db"
filename = "TU_Phoenix_List"
conn = sqlite3.connect(db_fullname)
c = conn.cursor()
sql_cmd = "SELECT Month, [Target SKU] FROM " + filename + " WHERE [Exit SKU] = \'" + material_code + "\'"
c.execute(sql_cmd)
phoenix_result = c.fetchall()
phoenix_title = ["Phoenix Status", "Stop Manufacturing Date", "Target SKU"]
if len(phoenix_result) == 0:
return [["Phoenix Status"], ["N"]]
else:
return [phoenix_title, ["Y"] + list(phoenix_result[0])]
pass
# by code的销量数据
def get_code_sales(self, data_type, code, month_number):
# 文件名,无后缀
tbl_name = self.__class__.bu_name + "_" + data_type
# 数据库完整路径加名称
db_fullname = self.__class__.db_path + tbl_name + ".db"
conn = sqlite3.connect(db_fullname)
c = conn.cursor()
str_cmd = "SELECT month, SUM(quantity) from " + tbl_name + " WHERE material = \'" + code \
+ "\' GROUP BY month ORDER BY month"
c.execute(str_cmd)
sales_result = self.data_mapping(c.fetchall(), pb_fnc.get_current_month(), 0 - month_number)
conn.close()
return sales_result
# get inventory data by code
# inventory_type: "JNJ", "LP", month_number: positive integer
def get_code_inventory(self, material_code, inventory_type, month_number):
tbl_name = self.__class__.bu_name + "_" + inventory_type + "_INV"
db_fullname = self.__class__.db_path + tbl_name + ".db"
conn = sqlite3.connect(db_fullname)
c = conn.cursor()
stock_column_name = 'Available_Stock' if inventory_type == 'JNJ' else 'quantity'
sql_cmd = 'SELECT month, SUM(%s) FROM %s WHERE Material = \"%s\" GROUP BY month ' \
'ORDER BY month' % (stock_column_name, tbl_name, material_code)
c.execute(sql_cmd)
inventory_result = self.data_mapping(c.fetchall(), pb_fnc.get_current_month(), 0 - month_number)
return inventory_result
# calculate inventory month
@staticmethod
def get_inventory_month(lst_inv, lst_sales, month_number, blank_type=0):
# set blank value as None if blank_type is 1, else set zero
lst_inv_month = []
# leave previous 6 month in blank
for i in range(0, 6):
lst_inv_month.append(0) if blank_type == 0 else lst_inv_month.append(None)
for i in range(0, month_number-6):
if sum(lst_sales[i: i+6]) == 0:
lst_inv_month.append(0) if blank_type == 0 else lst_inv_month.append(None)
else:
lst_inv_month.append(round(lst_inv[i+6] / (sum(lst_sales[i: i+6])/6), 1))
return lst_inv_month
# Generate month list.
# The previous month list does not include current month.
# The future month list include current month.
@staticmethod
def get_time_list(start_point, parameter):
# Get month list in format YYYY-MM (start_point)
# parameter, the month list we need to generate
start_year, start_month = int(start_point[0:4]), int(start_point[-2:])
month_list = []
lower_limit = parameter if parameter <= 0 else 0
upper_limit = parameter if parameter > 0 else 0
for i in range(lower_limit, upper_limit):
t = (start_year, start_month + i, 14, 3, 6, 3, 6, 0, 0)
month_list.append(time.strftime("%Y-%m", time.localtime(time.mktime(t))))
return month_list
# 将数据与指定月份mapping
def data_mapping(self, data, start_month, months):
month_list = self.get_time_list(start_month, months)
result_value = []
for item_month in month_list:
value = 0
for item_value in data:
if item_value[0] == item_month:
value = item_value[1]
result_value.append(value)
return result_value
# get forecast of single code, set fcst_type as Statistical or Final
def get_code_forecast(self, code_name, fcst_type, month_quantity):
db_fullname = self.__class__.db_path + self.__class__.bu_name + "_" + fcst_type + "_Forecast.db"
# get month list and generate blank dataframe
current_month = time.strftime("%Y-%m", time.localtime())
month_list = self.get_time_list(current_month, month_quantity)
df_forecast_final = pd.DataFrame(index=month_list)
# connect to forecast database
conn = sqlite3.connect(db_fullname)
c = conn.cursor()
# get the newest table
c.execute("SELECT name from sqlite_master where type = \"table\" ORDER by name DESC")
tbl_name = c.fetchone()[0]
# get pivot table of forecast
sql_cmd = 'SELECT Month, Quantity FROM %s WHERE Material =\"%s\"' % (tbl_name, code_name)
df_forecast = pd.read_sql(con=conn, sql=sql_cmd, index_col='Month')
# join the two dataframe to mapping
df_forecast_final = df_forecast_final.join(df_forecast)
df_forecast_final.fillna(0, inplace=True)
output = [item[0] for item in df_forecast_final.values.tolist()]
return [month_list, output]
# get forecast of one hierarchy with pandas, set forecast_type as Statistical or Final
def get_h5_forecast(self, h5_name, forecast_type, month_quantity):
# Get future month list
current_month = time.strftime("%Y-%m", time.localtime())
month_list = self.get_time_list(current_month, month_quantity)
df_forecast_result = | pd.DataFrame(index=month_list, data=None) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 13 21:46:26 2021
@author: hk_nien @ Twitter
"""
from pathlib import Path
from time import time
import re
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from tools import set_xaxis_dateformat, set_yaxis_log_minor_labels
import ggd_data
# Clone of the github.com/mzelst/covid-19 repo.
test_path = '../mzelst-covid19-nobackup/data-rivm/tests'
_CACHE = {} # store slow data here: (date_a, date_b) -> (tm, dataframe)
#%%
def load_testdata(min_date='2021-01-01', max_date='2099-01-01'):
"""Return DataFrame. Specify mindate as 'yyyy-mm-dd'.
Use cache if available.
Pull data from mzelst-covid19 repository (which should be up to date).
Return DataFrame with:
- index: multi-index (sdate, fdate).
- n_tested: number of tests (sum over regions)
- n_pos: number postiive
"""
if (min_date, max_date) in _CACHE:
tm, df = _CACHE[(min_date, max_date)]
if time() - tm < 3600:
print('Loaded from cache.')
return df.copy()
# Lead dataframes for all requested dates.
min_date, max_date = [pd.to_datetime(x) for x in [min_date, max_date]]
fdate = min_date
dfs = []
while fdate <= max_date:
fdate_str = fdate.strftime('%Y-%m-%d')
try:
df = ggd_data.load_ggd_pos_tests(fdate_str, quiet=True)
except FileNotFoundError:
break
df.reset_index(inplace=True)
df.rename(columns={'Date_tested': 'sdate'}, inplace=True)
df['fdate'] = fdate
dfs.append(df)
fdate += pd.Timedelta('1 d')
if len(dfs) == 0:
raise ValueError(f'No data loaded for range {min_date} .. {max_date}.')
print(f'Loaded {len(dfs)} GGD test files, last one '
f'{dfs[-1].iloc[0]["fdate"].strftime("%Y-%m-%d")}')
df = pd.concat(dfs)
df = df[['sdate', 'fdate', 'n_tested', 'n_pos']]
df = df.sort_values(['sdate', 'fdate'])
df = df.loc[df['sdate'] >= min_date - pd.Timedelta(2, 'd')]
_CACHE[(min_date, max_date)] = (time(), df.copy())
return df
def plot_jump_10Aug():
"""Plot jump in data due to changes in counting around 10 Aug 2020."""
df = load_testdata('2021-06-01', '2021-09-01')
test_snapshots = {}
for fdate in ['2021-08-09', '2021-08-11']:
test_snapshots[fdate] = df.loc[df['fdate'] == fdate].set_index('sdate')
fig, ax = plt.subplots(figsize=(8, 4), tight_layout=True)
colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b']
# n = len(test_snapshots)
for i, (fdate, df1) in enumerate(test_snapshots.items()):
msize = (1.5-i)*40
ax.scatter(df1.index, df1['n_tested'], label=f'Tests GGD, gepub. {fdate}',
color=colors[i], marker='o^'[i], s=msize)
ax.scatter(df1.index, df1['n_pos'], label=f'Posi. GGD, gepub. {fdate}',
color=colors[i], marker='+x'[i], s=msize*1.5)
oneday = pd.Timedelta('1d')
ax.set_xlim(df['sdate'].min()-oneday, df['sdate'].max()+oneday)
ax.grid()
ax.set_title('Aantal GGD tests per dag op twee publicatiedatums.')
set_xaxis_dateformat(ax, xlabel='Datum monstername')
ax.legend()
fig.show()
def plot_daily_tests_and_delays(date_min, date_max='2099-01-01', src_col='n_tested'):
"""Note: date_min, date_max refer to file dates. Test dates are generally
up to two days earlier. Repository 'mzelst-covid19' should be up to date.
Optionally set src_col='n_pos'
"""
# Convert to index 'sdate' and columns 2, 3, 4, ... with n_tested
# at 2, 3, ... days after sampling date.
df = load_testdata(date_min, date_max)
n_days_wait = 5
for iw in range(2, n_days_wait+1):
df1 = df.loc[df['sdate'] == df['fdate'] - pd.Timedelta(iw, 'd')]
df[iw] = df1[src_col]
df = df.groupby('sdate').max()
# Manually tweak around 2021-08-10 when counting changed.
for iw in range(3, n_days_wait+1):
sdate = pd.to_datetime('2021-08-10') - pd.Timedelta(iw, 'd')
for j in range(iw, n_days_wait+1):
df.at[sdate, j] = np.nan
fig, ax = plt.subplots(figsize=(9, 5), tight_layout=True)
barwidth = pd.Timedelta(1, 'd')
colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b',
'#e377c2', '#7f7f7f', '#bcbd22', '#17becf',
'#dddddd', '#bbbbbb', '#999999', '#777777',
'#555555', '#333333', '#111111'
] * 2
for iw, color in zip(range(2, n_days_wait+1), colors):
xs = df.index + pd.Timedelta(0.5, 'd')
ytops = df[iw].values
ybots = df[iw].values*0 if iw == 2 else df[iw-1].values
edge_args = dict(
edgecolor=('#444488' if len(xs) < 150 else 'none'),
linewidth=min(1.5, 100/len(xs)-0.2),
)
ax.bar(xs, ytops-ybots, bottom=ybots, color=color,
width=barwidth,
label=(f'Na {iw} dagen' if iw < 10 else None),
**edge_args
)
if iw == 2:
xmask = df.index.dayofweek.isin([5,6])
color = '#1b629a'
ax.bar(
xs[xmask], (ytops-ybots)[xmask], bottom=ybots[xmask], color=color,
width=barwidth, **edge_args
)
if date_min < '2021-08-07' < date_max:
ax.text(pd.to_datetime('2021-08-07T13:00'), 20500,
'Telmethode gewijzigd!',
rotation=90, horizontalalignment='center', fontsize=8
)
ytops = df.loc[:, range(2, n_days_wait+1)].max(axis=1, skipna=True) # Series
missing_day2 = (ytops - df[2])/ytops # Series
missing_thresh = 0.005
row_mask = (missing_day2 >= missing_thresh)
for tm, y in ytops.loc[row_mask].items():
if tm < pd.to_datetime(date_min):
continue
percent_late = np.around(missing_day2.loc[tm]*100, 1)
ax.text(
tm + | pd.Timedelta(0.55, 'd') | pandas.Timedelta |
import os
import glob
import pandas as pd
import streamlit as st
@st.cache
def get_local_feather_files():
list_of_files = glob.glob('*.feather')
files_with_size = [(file_path, os.stat(file_path).st_size) for file_path in list_of_files]
df = pd.DataFrame(files_with_size)
df.columns = ['File Name', 'File Size in KBytes']
df['File Size in KBytes'] = (df['File Size in KBytes'] / 1024).astype(int)
return df
@st.cache
def load_data():
data = | pd.read_feather('crashes.feather') | pandas.read_feather |
'''
dedup.py - Deduplicate reads that are coded with a UMI
=========================================================
:Author: <NAME>, <NAME>
:Release: $Id$
:Date: |today|
:Tags: Python UMI
Purpose
-------
The purpose of this command is to deduplicate BAM files based
on the first mapping co-ordinate and the UMI attached to the read.
Selecting the representative read
---------------------------------
The following criteria are applied to select the read that will be retained
from a group of duplicated reads:
1. The read with the lowest number of mapping coordinates (see
--multimapping-detection-method option)
2. The read with the highest mapping quality
Otherwise a read is chosen at random.
dedup-specific options
----------------------
--output-stats (string, filename_prefix)
Output edit distance statistics and UMI usage statistics
using this prefix.
Output files are:
"[prefix]_stats_per_umi_per_position.tsv"
Histogram of counts per position per UMI pre- and post-deduplication
"[prefix]_stats_per_umi_per.tsv"
Table of stats per umi. Number of times UMI was observed,
total counts and median counts, pre- and post-deduplication
"[prefix]_stats_edit_distance.tsv"
Edit distance between UMIs at each position. Positions with a
single UMI are reported seperately. Pre- and post-deduplication and
inluding null expectations from random sampling of UMIs from the
UMIs observed across all positions.
'''
import sys
import collections
import re
import os
# required to make iteritems python2 and python3 compatible
from builtins import dict
import pysam
import pandas as pd
import numpy as np
import umi_tools.Utilities as U
import umi_tools.network as network
import umi_tools.umi_methods as umi_methods
# add the generic docstring text
__doc__ = __doc__ + U.GENERIC_DOCSTRING_GDC
__doc__ = __doc__ + U.GROUP_DEDUP_GENERIC_OPTIONS
def detect_bam_features(bamfile, n_entries=1000):
''' read the first n entries in the bam file and identify the tags
available detecting multimapping '''
inbam = pysam.Samfile(bamfile)
inbam = inbam.fetch(until_eof=True)
tags = ["NH", "X0", "XT"]
available_tags = {x: 1 for x in tags}
for n, read in enumerate(inbam):
if n > n_entries:
break
if read.is_unmapped:
continue
else:
for tag in tags:
if not read.has_tag(tag):
available_tags[tag] = 0
return available_tags
def aggregateStatsDF(stats_df):
''' return a dataframe with aggregated counts per UMI'''
grouped = stats_df.groupby("UMI")
agg_dict = {'counts': [np.median, len, np.sum]}
agg_df = grouped.agg(agg_dict)
agg_df.columns = ['median_counts', 'times_observed', 'total_counts']
return agg_df
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if argv is None:
argv = sys.argv
# setup command line parser
parser = U.OptionParser(version="%prog version: $Id$",
usage=globals()["__doc__"])
group = U.OptionGroup(parser, "dedup-specific options")
group.add_option("--output-stats", dest="stats", type="string",
default=False,
help="Specify location to output stats")
parser.add_option_group(group)
# add common options (-h/--help, ...) and parse command line
(options, args) = U.Start(parser, argv=argv)
U.validateSamOptions(options)
if options.random_seed:
np.random.seed(options.random_seed)
if options.stdin != sys.stdin:
in_name = options.stdin.name
options.stdin.close()
else:
raise ValueError("Input on standard in not currently supported")
if options.stdout != sys.stdout:
if options.no_sort_output:
out_name = options.stdout.name
else:
out_name = U.getTempFilename()
sorted_out_name = options.stdout.name
options.stdout.close()
else:
if options.no_sort_output:
out_name = "-"
else:
out_name = U.getTempFilename()
sorted_out_name = "-"
if not options.no_sort_output: # need to determine the output format for sort
if options.out_sam:
sort_format = "sam"
else:
sort_format = "bam"
if options.in_sam:
in_mode = "r"
else:
in_mode = "rb"
if options.out_sam:
out_mode = "wh"
else:
out_mode = "wb"
if options.stats and options.ignore_umi:
raise ValueError("'--output-stats' and '--ignore-umi' options"
" cannot be used together")
infile = pysam.Samfile(in_name, in_mode)
outfile = pysam.Samfile(out_name, out_mode, template=infile)
if options.paired:
outfile = umi_methods.TwoPassPairWriter(infile, outfile)
nInput, nOutput, input_reads, output_reads = 0, 0, 0, 0
if options.detection_method:
bam_features = detect_bam_features(infile.filename)
if not bam_features[options.detection_method]:
if sum(bam_features.values()) == 0:
raise ValueError(
"There are no bam tags available to detect multimapping. "
"Do not set --multimapping-detection-method")
else:
raise ValueError(
"The chosen method of detection for multimapping (%s) "
"will not work with this bam. Multimapping can be detected"
" for this bam using any of the following: %s" % (
options.detection_method, ",".join(
[x for x in bam_features if bam_features[x]])))
gene_tag = options.gene_tag
metacontig2contig = None
if options.chrom:
inreads = infile.fetch(reference=options.chrom)
else:
if options.per_contig and options.gene_transcript_map:
metacontig2contig = umi_methods.getMetaContig2contig(
infile, options.gene_transcript_map)
metatag = "MC"
inreads = umi_methods.metafetcher(infile, metacontig2contig, metatag)
gene_tag = metatag
else:
inreads = infile.fetch()
# set up ReadCluster functor with methods specific to
# specified options.method
processor = network.ReadDeduplicator(options.method)
bundle_iterator = umi_methods.get_bundles(
options,
metacontig_contig=metacontig2contig)
if options.stats:
# set up arrays to hold stats data
stats_pre_df_dict = {"UMI": [], "counts": []}
stats_post_df_dict = {"UMI": [], "counts": []}
pre_cluster_stats = []
post_cluster_stats = []
pre_cluster_stats_null = []
post_cluster_stats_null = []
topology_counts = collections.Counter()
node_counts = collections.Counter()
read_gn = umi_methods.random_read_generator(
infile.filename, chrom=options.chrom,
barcode_getter=bundle_iterator.barcode_getter)
for bundle, key, status in bundle_iterator(inreads):
nInput += sum([bundle[umi]["count"] for umi in bundle])
while nOutput >= output_reads + 10000:
output_reads += 10000
U.info("Written out %i reads" % output_reads)
while nInput >= input_reads + 1000000:
input_reads += 1000000
U.info("Parsed %i input reads" % input_reads)
if options.stats:
# generate pre-dudep stats
average_distance = umi_methods.get_average_umi_distance(bundle.keys())
pre_cluster_stats.append(average_distance)
cluster_size = len(bundle)
random_umis = read_gn.getUmis(cluster_size)
average_distance_null = umi_methods.get_average_umi_distance(random_umis)
pre_cluster_stats_null.append(average_distance_null)
if options.ignore_umi:
for umi in bundle:
nOutput += 1
outfile.write(bundle[umi]["read"])
else:
# dedup using umis and write out deduped bam
reads, umis, umi_counts = processor(
bundle=bundle,
threshold=options.threshold)
for read in reads:
outfile.write(read)
nOutput += 1
if options.stats:
# collect pre-dudupe stats
stats_pre_df_dict['UMI'].extend(bundle)
stats_pre_df_dict['counts'].extend(
[bundle[UMI]['count'] for UMI in bundle])
# collect post-dudupe stats
post_cluster_umis = [bundle_iterator.barcode_getter(x)[0] for x in reads]
stats_post_df_dict['UMI'].extend(umis)
stats_post_df_dict['counts'].extend(umi_counts)
average_distance = umi_methods.get_average_umi_distance(post_cluster_umis)
post_cluster_stats.append(average_distance)
cluster_size = len(post_cluster_umis)
random_umis = read_gn.getUmis(cluster_size)
average_distance_null = umi_methods.get_average_umi_distance(random_umis)
post_cluster_stats_null.append(average_distance_null)
outfile.close()
if not options.no_sort_output:
# sort the output
pysam.sort("-o", sorted_out_name, "-O", sort_format, out_name)
os.unlink(out_name) # delete the tempfile
if options.stats:
# generate the stats dataframe
stats_pre_df = pd.DataFrame(stats_pre_df_dict)
stats_post_df = | pd.DataFrame(stats_post_df_dict) | pandas.DataFrame |
import os
import pandas as pd
import math
import datetime
from tqdm import tqdm
from pathlib import Path
from seg import seglosses
from seg.config import config
from seg.data import DataLoader
from seg.architect.Unet import unet
from seg.utils import time_to_timestr
import tensorflow as tf
from tensorflow.keras.optimizers import SGD, Adam, RMSprop
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint, TensorBoard, LearningRateScheduler
from tensorflow.keras.optimizers.schedules import InverseTimeDecay
from tensorboard.plugins.hparams import api as hp
tf.get_logger().setLevel("INFO")
HP_FREEZE_AT = hp.HParam("freeze_at", hp.Discrete([16]))
HP_DROPOUT = hp.HParam("dropout", hp.RealInterval(0.1, 0.2))
HP_OPTIMIZER = hp.HParam("optimizer", hp.Discrete(["sgd", "adam"]))
HP_LOSS = hp.HParam("loss", hp.Discrete(["focal"]))
HP_GAMMA = hp.HParam("focal_gamma", hp.Discrete([0.5, 1., 2., 5.]))
HPARAMS = [
HP_FREEZE_AT,
HP_DROPOUT,
HP_OPTIMIZER,
HP_LOSS,
HP_GAMMA,
]
METRICS = [
hp.Metric("epoch_jaccard_index",
group="validation",
display_name="Jaccard Index (val.)"),
hp.Metric("epoch_dice_coeff",
group="validation",
display_name="Dice Coeff (val.)")
]
def lr_step_decay(epoch, lr):
"""
Step decay lr: learning_rate = initial_lr * drop_rate^floor(epoch / epochs_drop)
"""
initial_learning_rate = config["learning_rate"]
drop_rate = 0.5
epochs_drop = 10.0
return initial_learning_rate * math.pow(drop_rate, math.floor(epoch/epochs_drop))
def train(run_dir, hparams, train_gen, valid_gen):
# define model
model = unet(dropout_rate=hparams[HP_DROPOUT],
freeze=config["freeze"],
freeze_at=hparams[HP_FREEZE_AT])
print("Model: ", model._name)
# optim
optimizers = {
"sgd": SGD(learning_rate=config["learning_rate"], momentum=config["momentum"], nesterov=True),
"adam": Adam(learning_rate=config["learning_rate"], amsgrad=True),
"rmsprop": RMSprop(learning_rate=config["learning_rate"], momentum=config["momentum"])
}
optimizer = optimizers[hparams[HP_OPTIMIZER]]
print("Optimizer: ", optimizer._name)
# loss
losses = {
"jaccard": seglosses.jaccard_loss,
"dice": seglosses.dice_loss,
"bce": seglosses.bce_loss,
"bce_dice": seglosses.bce_dice_loss,
"focal": seglosses.focal_loss(gamma=hparams[HP_GAMMA])
}
loss = losses[hparams[HP_LOSS]]
model.compile(optimizer=optimizer,
loss=[loss],
metrics=[seglosses.jaccard_index, seglosses.dice_coeff, seglosses.bce_loss])
# callbacks
lr_schedule = LearningRateScheduler(lr_step_decay,
verbose=1)
anne = ReduceLROnPlateau(monitor="loss",
factor=0.2,
patience=15,
verbose=1,
min_lr=1e-7)
early = EarlyStopping(monitor="val_loss",
patience=50,
verbose=1)
tensorboard_callback = TensorBoard(log_dir=run_dir,
write_images=True)
hparams_callback = hp.KerasCallback(run_dir, hparams)
file_path = "../models/%s/%s/%s_%s_ep{epoch:02d}_bsize%d_insize%s.hdf5" % (
run_dir.split("/")[-2],
run_dir.split("/")[-1],
model._name,
optimizer._name,
config["batch_size"],
config["image_size"]
)
Path("../models/{}/{}".format(run_dir.split("/")[-2],
run_dir.split("/")[-1])).mkdir(parents=True, exist_ok=True)
checkpoint = ModelCheckpoint(file_path, verbose=1, save_best_only=True)
callbacks_list = [
lr_schedule,
early,
anne,
checkpoint,
tensorboard_callback,
hparams_callback
]
print("="*100)
print("TRAINING ...\n")
history = model.fit(train_gen,
batch_size=config["batch_size"],
epochs=config["epochs"],
callbacks=callbacks_list,
validation_data=valid_gen,
workers=8,
use_multiprocessing=True)
his = | pd.DataFrame(history.history) | pandas.DataFrame |
from __future__ import division # brings in Python 3.0 mixed type calculation rules
import datetime
import inspect
import numpy as np
import numpy.testing as npt
import os.path
import pandas as pd
import sys
from tabulate import tabulate
import unittest
print("Python version: " + sys.version)
print("Numpy version: " + np.__version__)
from ..ted_exe import Ted
test = {}
class TestTed(unittest.TestCase):
"""
Unit tests for TED model.
"""
print("ted unittests conducted at " + str(datetime.datetime.today()))
def setUp(self):
"""
Setup routine for ted unit tests.
:return:
"""
pass
def tearDown(self):
"""
Teardown routine for ted unit tests.
:return:
"""
pass
# teardown called after each test
# e.g. maybe write test results to some text file
def create_ted_object(self):
# create empty pandas dataframes to create empty object for testing
df_empty = pd.DataFrame()
# create an empty ted object
ted_empty = Ted(df_empty, df_empty)
return ted_empty
def test_daily_app_flag(self):
"""
:description generates a daily flag to denote whether a pesticide is applied that day or not (1 - applied, 0 - anot applied)
:param num_apps; number of applications
:param app_interval; number of days between applications
:NOTE in TED model there are two application scenarios per simulation (one for a min/max exposure scenario)
(this is why the parameters are passed in)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='bool')
result = | pd.Series([[]], dtype='bool') | pandas.Series |
#!/usr/bin/env python3
import os
import subprocess
import tempfile
from pathlib import Path
import pandas as pd
path_to_file = os.path.realpath(__file__)
repo_root = Path(path_to_file).parent.parent
INPUT_ZIP = repo_root / "downloaded-data" / "fine-grained-refactorings.zip"
SAVE_TO = repo_root / "data" / "fine-grained-refactorings.csv"
with tempfile.TemporaryDirectory() as dir:
subprocess.run(["unzip", INPUT_ZIP, "-d", dir])
dfs = [
pd.read_csv(file)
for file in (Path(dir) / "manualy_labeled_commits (goldset)").iterdir()
]
concat_dfs = | pd.concat(dfs, axis=0, ignore_index=True) | pandas.concat |
from src.sampling import outputs_given_z_c_y
from scipy.spatial import distance
import pandas as pd
import torch
import numpy as np
from torch import nn
def outputs_counterfact_given_y_c(model, y_c_list, center_z=False):
'''
Samples from a list of target and condition given a model
Parameters:
Model: VariationalModel
A fitted VariationalModel
y_c_list: list of tensor
A list of tuple of target and condition tensors
center_z: bool
Whether to sample from the origin of the latent space
'''
n_sample = y_c_list[0][0].shape[0]
if center_z:
z = torch.zeros(n_sample, model.latent_shape)
else:
z = model.latent_distribution.sample((n_sample, ))
counterfactuals = [outputs_given_z_c_y(model, z, c, y) for y, c in y_c_list]
return counterfactuals
def P_X_cat_counterfact_given_y_c(model, y_c_list, center_z=False):
'''
Samples counterfactuals from a list of target and condition given a model.
Parameters:
Model: VariationalModel
A fitted VariationalModel
y_c_list: list of tensor
A list of tuple of target and condition tensors
center_z: bool
Whether to sample from the origin of the latent space
'''
counterfact_outputs = outputs_counterfact_given_y_c(model, y_c_list, center_z)
counterfact_probas = torch.cat([nn.Softmax(dim=1)(el['X_logits']) for el in counterfact_outputs]).reshape(len(y_c_list), -1, model.X_categorical_n_classes, model.X_categorical_shape).permute(0, 1, 3, 2)
return counterfact_probas
def compute_categorical_average_effect(probas):
'''
To be improved (for loop + nan problem + allow for choosing the similarity/distance)
Pb with jensenshannon distance, some probas that are really close return nan instead of zeros.
To allow for computation on other kinds of distance for continuous effects
'''
if isinstance(probas, torch.Tensor):
probas = probas.detach().numpy()
ae = {}
n_classes = probas.shape[0]
for i in range(n_classes):
for j in range(i + 1, n_classes):
jsd = distance.jensenshannon(probas[i].T, probas[j].T, base=3)
ae[f'ae {i}-{j}'] = np.nanmean(jsd, axis=1) # gets nan instead of 0 for proba that are really close
return | pd.DataFrame(ae) | pandas.DataFrame |
import numpy as np
import pandas as pd
from math import ceil
from itertools import combinations, product
from collections import Counter
from scipy.stats import chi2_contingency, pointbiserialr
from sklearn.preprocessing import LabelEncoder
import matplotlib.pyplot as plt
import seaborn as sns
from wordcloud import WordCloud, STOPWORDS
sns.set_style("darkgrid")
class Vizard:
"""Vizard object holds the `DataFrame` along with its configurations including the
`PROBLEM_TYPE`, `DEPENDENT_VARIABLE`, `CATEGORICAL_INDEPENDENT_VARIABLES`,
`CONTINUOUS_INDEPENDENT_VARIABLES`, and `TEXT_VARIABLES`"""
def __init__(self, data, config=None):
self._data = data
self._config = config
@property
def data(self):
return self._data
@property
def config(self):
return self._config
@data.setter
def data(self, data):
self._data = data
@config.setter
def config(self, config):
self._config = config
def categorical(self, x):
fig = plt.figure(figsize=(10, 4))
fig.suptitle(f"{x} Distribution", fontsize=24)
ax = self.data[x].value_counts().plot.bar()
for p in ax.patches:
ax.annotate(
str(p.get_height()), (p.get_x() * 1.005, p.get_height() * 1.005)
)
return fig
def continuous(self, x):
fig = plt.figure(figsize=(20, 6))
fig.suptitle(f"{x} Distribution", fontsize=24)
sns.histplot(self.data[x], kde=False)
return fig
def timeseries(self, x):
fig, ax = plt.subplots(figsize=(20, 6))
self.data[x].plot(ax=ax)
return fig
def categorical_vs_continuous(self, x, y, return_fig=True):
fig = plt.figure(figsize=(20, 8))
ax = [
plt.subplot2grid((2, 3), (0, 0), colspan=1),
plt.subplot2grid((2, 3), (0, 1), colspan=2),
plt.subplot2grid((2, 3), (1, 0), colspan=3),
]
fig.suptitle(x, fontsize=18)
self.data[x].value_counts().plot.pie(ax=ax[0], autopct="%1.1f%%")
ax[0].legend()
sns.histplot(data=self.data, x=y, hue=x, kde=False, ax=ax[1])
sns.boxplot(y=y, x=x, data=self.data, ax=ax[2])
return fig if return_fig else plt.show() if fig is not None else None
def categorical_vs_categorical(self, x, y, return_fig=True):
fig, ax = plt.subplots(1, 2, figsize=(20, 6))
fig.suptitle(x, fontsize=18)
self.data[x].value_counts().plot.pie(ax=ax[0], autopct="%1.1f%%")
ax[0].legend()
sns.heatmap(
pd.crosstab(index=self.data[y], columns=self.data[x]),
ax=ax[1],
cmap="Blues",
annot=True,
square=True,
fmt="d",
)
return fig if return_fig else plt.show() if fig is not None else None
def continuous_vs_continuous(self, x, y, return_fig=True):
fig, ax = plt.subplots(1, 2, figsize=(20, 6))
fig.suptitle(x, fontsize=18)
sns.histplot(self.data[x], ax=ax[0], kde=False)
sns.scatterplot(x=x, y=y, data=self.data, ax=ax[1])
return fig if return_fig else plt.show() if fig is not None else None
def continuous_vs_categorical(self, x, y, return_fig=True):
fig = plt.figure(figsize=(20, 12))
ax = [
plt.subplot2grid((3, 3), (0, 0), colspan=3),
plt.subplot2grid((3, 3), (1, 0), colspan=3),
plt.subplot2grid((3, 3), (2, 0)),
plt.subplot2grid((3, 3), (2, 1), colspan=2),
]
fig.suptitle(x, fontsize=18)
sns.histplot(self.data[x], kde=False, ax=ax[0])
sns.histplot(x=x, hue=y, data=self.data, kde=False, ax=ax[1])
sns.boxplot(x=self.data[x], ax=ax[2])
sns.boxplot(x=y, y=x, data=self.data, ax=ax[3])
return fig if return_fig else plt.show() if fig is not None else None
def check_missing(self, return_fig=True):
"""Plot a heatmap to visualize missing values in the DataFrame"""
fig, ax = plt.subplots(figsize=(20, 6))
fig.suptitle("Missing Values", fontsize=24)
sns.heatmap(self.data.isnull(), cbar=False, yticklabels=False)
return fig if return_fig else plt.show() if fig is not None else None
def count_missing(self, return_fig=True):
"""Plot to visualize the count of missing values in each column in the DataFrame"""
fig = plt.figure(figsize=(20, 4))
fig.suptitle("Count of Missing Values", fontsize=24)
ax = self.data.isna().sum().plot.bar()
for p in ax.patches:
ax.annotate(
str(p.get_height()), (p.get_x() * 1.005, p.get_height() * 1.005)
)
return fig if return_fig else plt.show() if fig is not None else None
def count_missing_by_group(self, group_by=None, return_fig=True):
"""Plot to visualize the count of missing values in each column in the DataFrame,
grouped by a categorical variable
:param group_by: a column in the DataFrame"""
fig, ax = plt.subplots(figsize=(20, 4))
fig.suptitle(f"Count of Missing Values Grouped By {group_by}", fontsize=24)
self.data.drop(group_by, 1).isna().groupby(
self.data[group_by], sort=False
).sum().T.plot.bar(ax=ax)
for p in ax.patches:
ax.annotate(
str(p.get_height()), (p.get_x() * 1.005, p.get_height() * 1.005)
)
return fig if return_fig else plt.show() if fig is not None else None
def count_unique(self, return_fig=True):
"""Plot to visualize the count of unique values in each column in the DataFrame"""
fig = plt.figure(figsize=(20, 4))
fig.suptitle("Count of Unique Values", fontsize=24)
ax = self.data.nunique().plot.bar()
for p in ax.patches:
ax.annotate(
str(p.get_height()), (p.get_x() * 1.005, p.get_height() * 1.005)
)
return fig if return_fig else plt.show() if fig is not None else None
def count_unique_by_group(self, group_by=None, return_fig=True):
"""Plot to visualize the count of unique values in each column in the DataFrame,
grouped by a categorical variable
:param group_by: a column in the DataFrame"""
fig, ax = plt.subplots(figsize=(20, 4))
fig.suptitle(f"Count of Unique Values Grouped By {group_by}", fontsize=24)
self.data.groupby(group_by).nunique().T.plot.bar(ax=ax)
for p in ax.patches:
ax.annotate(
str(p.get_height()), (p.get_x() * 1.005, p.get_height() * 1.005)
)
return fig if return_fig else plt.show() if fig is not None else None
def dependent_variable(self, return_fig=True):
"""Based on the type of problem, plot a univariate visualization of target column"""
if self.config.PROBLEM_TYPE == "regression":
fig = self.continuous(self.config.DEPENDENT_VARIABLE)
elif self.config.PROBLEM_TYPE == "classification":
fig = self.categorical(self.config.DEPENDENT_VARIABLE)
elif self.config.PROBLEM_TYPE == "time_series":
fig = self.timeseries(self.config.DEPENDENT_VARIABLE)
else:
print("Invalid Problem Type")
fig = None
return fig if return_fig else plt.show() if fig is not None else None
def pairwise_scatter(self, return_fig=True):
"""Plot pairwise scatter plots to visualize the continuous variables in the dataset"""
cols = self.config.CONTINUOUS_INDEPENDENT_VARIABLES[:]
if self.config.PROBLEM_TYPE == "regression":
cols.append(self.config.DEPENDENT_VARIABLE)
comb = list(combinations(cols, 2))
n_plots = len(comb)
n_rows = ceil(n_plots / 2)
fig = plt.figure(figsize=(20, 6 * n_rows))
for i, (x, y) in enumerate(comb):
ax = fig.add_subplot(n_rows, 2, i + 1)
sns.scatterplot(data=self.data, x=x, y=y, ax=ax)
ax.set_title(f"{y} vs {x}", fontsize=18)
return fig if return_fig else plt.show() if fig is not None else None
def pairwise_violin(self, return_fig=True):
"""Plot pairwise violin plots to visualize the continuous variables grouped by the
categorical variables in the dataset"""
cat_cols = self.config.CATEGORICAL_INDEPENDENT_VARIABLES[:]
if self.config.PROBLEM_TYPE == "classification":
cat_cols.append(self.config.DEPENDENT_VARIABLE)
comb = list(product(self.config.CONTINUOUS_INDEPENDENT_VARIABLES, cat_cols))
n_plots = len(comb)
n_rows = ceil(n_plots / 2)
fig = plt.figure(figsize=(20, 6 * n_rows))
for i, (x, y) in enumerate(comb):
ax = fig.add_subplot(n_rows, 2, i + 1)
sns.violinplot(data=self.data, x=y, y=x)
ax.set_title(f"{y} vs {x}", fontsize=18)
return fig if return_fig else plt.show() if fig is not None else None
def pairwise_crosstabs(self, return_fig=True):
"""Plot pairwise crosstabs plots to visualize the categorical variables in the dataset"""
cols = self.config.CATEGORICAL_INDEPENDENT_VARIABLES[:]
if self.config.PROBLEM_TYPE == "classification":
cols.append(self.config.DEPENDENT_VARIABLE)
comb = list(combinations(cols, 2))
n_plots = len(comb)
n_rows = ceil(n_plots / 2)
fig = plt.figure(figsize=(20, 6 * n_rows))
for i, (x, y) in enumerate(comb):
ax = fig.add_subplot(n_rows, 2, i + 1)
sns.heatmap(
pd.crosstab(index=self.data[y], columns=self.data[x]),
ax=ax,
cmap="Blues",
annot=True,
square=True,
fmt="d",
)
ax.set_title(f"{y} vs {x}", fontsize=18)
return fig if return_fig else plt.show() if fig is not None else None
def pair_plot(self, return_fig=True):
"""Plot a pairplot to vizualize the complete DataFrame"""
if self.config.PROBLEM_TYPE == "regression":
g = sns.pairplot(
data=self.data[
self.config.CONTINUOUS_INDEPENDENT_VARIABLES
+ [self.config.DEPENDENT_VARIABLE]
]
)
g.fig.suptitle("Pairplot", fontsize=24, y=1.08)
return g if return_fig else plt.show()
elif self.config.PROBLEM_TYPE == "classification":
g = sns.pairplot(
data=self.data[
self.config.CONTINUOUS_INDEPENDENT_VARIABLES
+ [self.config.DEPENDENT_VARIABLE]
],
hue=self.config.DEPENDENT_VARIABLE,
)
g.fig.suptitle("Pairplot", fontsize=24, y=1.08)
return g if return_fig else plt.show()
elif self.config.PROBLEM_TYPE == "unsupervised":
g = sns.pairplot(
data=self.data[
self.config.CONTINUOUS_INDEPENDENT_VARIABLES
]
)
g.fig.suptitle("Pairplot", fontsize=24, y=1.08)
return g if return_fig else plt.show()
else:
pass
def categorical_variables(self):
"""Create bivariate visualizations for the categorical variables with the target variable or
or univariate visualizations in case of unservised problems
"""
if self.config.PROBLEM_TYPE == "regression":
for col in self.config.CATEGORICAL_INDEPENDENT_VARIABLES:
self.categorical_vs_continuous(col, self.config.DEPENDENT_VARIABLE)
elif self.config.PROBLEM_TYPE == "classification":
for col in self.config.CATEGORICAL_INDEPENDENT_VARIABLES:
self.categorical_vs_categorical(col, self.config.DEPENDENT_VARIABLE)
elif self.config.PROBLEM_TYPE == 'unsupervised':
for col in self.config.CATEGORICAL_INDEPENDENT_VARIABLES:
self.categorical(col)
else:
pass
def continuous_variables(self):
"""Create bivariate visualizations for the continuous variables with the target variable
or univariate visualizations in case of unservised problems
"""
if self.config.PROBLEM_TYPE == "regression":
for col in self.config.CONTINUOUS_INDEPENDENT_VARIABLES:
self.continuous_vs_continuous(col, self.config.DEPENDENT_VARIABLE)
elif self.config.PROBLEM_TYPE == "classification":
for col in self.config.CONTINUOUS_INDEPENDENT_VARIABLES:
self.continuous_vs_categorical(col, self.config.DEPENDENT_VARIABLE)
elif self.config.PROBLEM_TYPE == 'unsupervised':
for col in self.config.CONTINUOUS_INDEPENDENT_VARIABLES:
self.continuous(col)
else:
pass
def corr_plot(self, figsize=(10, 10), method="pearson", return_fig=True):
"""Plot a heatmap to vizualize the correlation between the various continuous columns in the DataFrame
:param method: the method of correlation {'pearson', 'kendall', 'spearman'}
"""
fig, ax = plt.subplots(figsize=figsize)
fig.suptitle("Correlation Plot", fontsize=24)
corr = self.data.corr(method=method)
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
mask[np.diag_indices_from(mask)] = True
annot = True if len(corr) <= 10 else False
sns.heatmap(
corr, mask=mask, cmap="RdBu", ax=ax, annot=annot, square=True, center=0
)
return fig if return_fig else plt.show() if fig is not None else None
def point_biserial_plot(self, figsize=(10, 10), return_fig=True):
"""Plot a heatmap to visualize point biserial correaltion between continuous and categorical variables"""
num_cols = self.config.CONTINUOUS_INDEPENDENT_VARIABLES[:]
if self.config.PROBLEM_TYPE == "regression":
num_cols.append(self.config.DEPENDENT_VARIABLE)
cat_cols = self.config.CATEGORICAL_INDEPENDENT_VARIABLES[:]
if self.config.PROBLEM_TYPE == "classification":
cat_cols.append(self.config.DEPENDENT_VARIABLE)
fig, ax = plt.subplots(figsize=figsize)
fig.suptitle("Point Biserial Plot", fontsize=24)
pb_table = np.zeros((len(cat_cols), len(num_cols)))
for i in range(len(cat_cols)):
for j in range(len(num_cols)):
df_ = self.data.dropna(subset=[cat_cols[i], num_cols[j]])
pb_table[i][j] = pointbiserialr(
LabelEncoder().fit_transform(df_[cat_cols[i]]), df_[num_cols[j]]
)[0]
annot = True if max(pb_table.shape) <= 10 else False
pb_table = | pd.DataFrame(pb_table, columns=num_cols, index=cat_cols) | pandas.DataFrame |
"""Remotely control your Binance account via their API : https://binance-docs.github.io/apidocs/spot/en"""
import re
import json
import hmac
import hashlib
import time
import requests
import base64
import sys
import math
import pandas as pd
import numpy as np
from numpy import floor
from datetime import datetime, timedelta
from requests.auth import AuthBase
from requests import Request, Session
from models.helper.LogHelper import Logger
from urllib.parse import urlencode
DEFAULT_MAKER_FEE_RATE = 0.0015 # added 0.0005 to allow for price movements
DEFAULT_TAKER_FEE_RATE = 0.0015 # added 0.0005 to allow for price movements
DEFAULT_TRADE_FEE_RATE = 0.0015 # added 0.0005 to allow for price movements
DEFAULT_GRANULARITY = "1h"
SUPPORTED_GRANULARITY = ["1m", "5m", "15m", "1h", "6h", "1d"]
MULTIPLIER_EQUIVALENTS = [1, 5, 15, 60, 360, 1440]
FREQUENCY_EQUIVALENTS = ["T", "5T", "15T", "H", "6H", "D"]
DEFAULT_MARKET = "BTCGBP"
class AuthAPIBase:
def _isMarketValid(self, market: str) -> bool:
p = re.compile(r"^[A-Z0-9]{5,12}$")
if p.match(market):
return True
return False
def convert_time(self, epoch: int = 0):
if math.isnan(epoch) is False:
epoch_str = str(epoch)[0:10]
return datetime.fromtimestamp(int(epoch_str))
class AuthAPI(AuthAPIBase):
def __init__(
self,
api_key: str = "",
api_secret: str = "",
api_url: str = "https://api.binance.com",
order_history: list = [],
recv_window: int = 5000,
) -> None:
"""Binance API object model
Parameters
----------
api_key : str
Your Binance account portfolio API key
api_secret : str
Your Binance account portfolio API secret
api_url
Binance API URL
"""
# options
self.debug = False
self.die_on_api_error = False
valid_urls = [
"https://api.binance.com",
"https://api.binance.us",
"https://testnet.binance.vision",
]
# validate Binance API
if api_url not in valid_urls:
raise ValueError("Binance API URL is invalid")
# validates the api key is syntactically correct
p = re.compile(r"^[A-z0-9]{64,64}$")
if not p.match(api_key):
self.handle_init_error("Binance API key is invalid")
# validates the api secret is syntactically correct
p = re.compile(r"^[A-z0-9]{64,64}$")
if not p.match(api_secret):
self.handle_init_error("Binance API secret is invalid")
self._api_key = api_key
self._api_secret = api_secret
self._api_url = api_url
# order history
self.order_history = order_history
# api recvWindow
self.recv_window = recv_window
def handle_init_error(self, err: str) -> None:
if self.debug:
raise TypeError(err)
else:
raise SystemExit(err)
def _dispatch_request(self, method: str):
session = Session()
session.headers.update(
{
"Content-Type": "application/json; charset=utf-8",
"X-MBX-APIKEY": self._api_key,
}
)
return {
"GET": session.get,
"DELETE": session.delete,
"PUT": session.put,
"POST": session.post,
}.get(method, "GET")
def createHash(self, uri: str = ""):
return hmac.new(
self._api_secret.encode("utf-8"), uri.encode("utf-8"), hashlib.sha256
).hexdigest()
def getTimestamp(self):
return int(time.time() * 1000)
def getAccounts(self) -> pd.DataFrame:
"""Retrieves your list of accounts"""
# GET /api/v3/account
try:
resp = self.authAPI(
"GET", "/api/v3/account", {"recvWindow": self.recv_window}
)
# unexpected data, then return
if len(resp) == 0 or "balances" not in resp:
return | pd.DataFrame() | pandas.DataFrame |
import math
from typing import cast
import pandas as pd
import pytest
from ete3 import Tree, ClusterTree
from genomics_data_index.api.query.GenomicsDataIndex import GenomicsDataIndex
from genomics_data_index.api.query.SamplesQuery import SamplesQuery
from genomics_data_index.api.query.impl.DataFrameSamplesQuery import DataFrameSamplesQuery
from genomics_data_index.api.query.impl.ExperimentalTreeSamplesQuery import ExperimentalTreeSamplesQuery
from genomics_data_index.api.query.impl.MutationTreeSamplesQuery import MutationTreeSamplesQuery
from genomics_data_index.api.query.impl.TreeSamplesQuery import TreeSamplesQuery
from genomics_data_index.configuration.connector.DataIndexConnection import DataIndexConnection
from genomics_data_index.storage.SampleSet import SampleSet
from genomics_data_index.storage.io.mutation.NucleotideSampleDataPackage import NucleotideSampleDataPackage
from genomics_data_index.storage.model.QueryFeatureHGVS import QueryFeatureHGVS
from genomics_data_index.storage.model.QueryFeatureMLST import QueryFeatureMLST
from genomics_data_index.storage.model.QueryFeatureMutationSPDI import QueryFeatureMutationSPDI
from genomics_data_index.storage.model.db import Sample
from genomics_data_index.test.integration import snippy_all_dataframes, data_dir, snpeff_tree_file
# wrapper methods to simplify writing tests
def query(connection: DataIndexConnection, **kwargs) -> SamplesQuery:
return GenomicsDataIndex(connection=connection).samples_query(**kwargs)
def test_initialized_query_default(loaded_database_connection: DataIndexConnection):
initial_query = query(loaded_database_connection)
assert len(initial_query) == 9
assert len(initial_query.universe_set) == 9
assert len(initial_query.sample_set) == 9
def test_initialized_query_mutations(loaded_database_connection_with_built_tree: DataIndexConnection):
initial_query = query(loaded_database_connection_with_built_tree)
assert len(initial_query) == 9
assert len(initial_query.sample_set) == 9
assert len(initial_query.universe_set) == 9
initial_query = query(loaded_database_connection_with_built_tree,
universe='mutations', reference_name='genome')
assert len(initial_query) == 3
assert len(initial_query.sample_set) == 3
assert len(initial_query.universe_set) == 3
assert isinstance(initial_query, TreeSamplesQuery)
assert initial_query.tree is not None
def test_empty_universe(loaded_database_connection: DataIndexConnection):
query_result = query(loaded_database_connection).isin('empty')
assert 0 == len(query_result)
assert 0 == len(query_result.unknown_set)
assert 9 == len(query_result.absent_set)
assert 9 == len(query_result.universe_set)
query_result = query_result.reset_universe()
assert 0 == len(query_result)
assert 0 == len(query_result.unknown_set)
assert 0 == len(query_result.absent_set)
assert 0 == len(query_result.universe_set)
assert '<SamplesQueryIndex[' in str(query_result)
assert 'selected=NA%' in str(query_result)
assert 'unknown=NA%' in str(query_result)
def test_query_isin_samples(loaded_database_connection: DataIndexConnection):
db = loaded_database_connection.database
sampleB = db.get_session().query(Sample).filter(Sample.name == 'SampleB').one()
query_result = query(loaded_database_connection).isin('SampleB')
assert 1 == len(query_result)
assert {sampleB.id} == set(query_result.sample_set)
assert 0 == len(query_result.unknown_set)
assert 9 == len(query_result.universe_set)
def test_query_isin_samples_no_exist(loaded_database_connection: DataIndexConnection):
query_result = query(loaded_database_connection).isin('no_exist')
assert 0 == len(query_result)
assert 0 == len(query_result.unknown_set)
assert 9 == len(query_result.universe_set)
def test_query_isin_sample_set_single(loaded_database_connection: DataIndexConnection):
db = loaded_database_connection.database
sampleB = db.get_session().query(Sample).filter(Sample.name == 'SampleB').one()
sampleBSet = SampleSet([sampleB.id])
query_result = query(loaded_database_connection).isin(sampleBSet)
assert 1 == len(query_result)
assert {sampleB.id} == set(query_result.sample_set)
assert 0 == len(query_result.unknown_set)
assert 9 == len(query_result.universe_set)
def test_query_isin_samples_query_single(loaded_database_connection: DataIndexConnection):
db = loaded_database_connection.database
sampleB = db.get_session().query(Sample).filter(Sample.name == 'SampleB').one()
query_result_B = query(loaded_database_connection).isin('SampleB')
query_result = query(loaded_database_connection).isin(query_result_B)
assert 1 == len(query_result)
assert {sampleB.id} == set(query_result.sample_set)
assert 0 == len(query_result.unknown_set)
assert 9 == len(query_result.universe_set)
def test_query_isin_samples_query_no_matches(loaded_database_connection: DataIndexConnection):
query_result_empty = query(loaded_database_connection).isin('no_exist')
query_result = query(loaded_database_connection).isin(query_result_empty)
assert 0 == len(query_result)
assert 0 == len(query_result.unknown_set)
assert 9 == len(query_result.universe_set)
def test_query_isin_samples_with_unknown(loaded_database_connection: DataIndexConnection):
db = loaded_database_connection.database
sampleA = db.get_session().query(Sample).filter(Sample.name == 'SampleA').one()
sampleB = db.get_session().query(Sample).filter(Sample.name == 'SampleB').one()
query_result = query(loaded_database_connection).hasa('reference:5061:G:A')
assert {sampleB.id} == set(query_result.sample_set)
assert {sampleA.id} == set(query_result.unknown_set)
# case isin with present and unknown
query_result_isin = query_result.isin(['SampleA', 'SampleB'])
assert 1 == len(query_result_isin)
assert {sampleB.id} == set(query_result_isin.sample_set)
assert {sampleA.id} == set(query_result_isin.unknown_set)
assert 9 == len(query_result_isin.universe_set)
# isin with only unknown
query_result_isin = query_result.isin('SampleA')
assert 0 == len(query_result_isin)
assert set() == set(query_result_isin.sample_set)
assert {sampleA.id} == set(query_result_isin.unknown_set)
assert 9 == len(query_result_isin.universe_set)
# isin with only present
query_result_isin = query_result.isin('SampleB')
assert 1 == len(query_result_isin)
assert {sampleB.id} == set(query_result_isin.sample_set)
assert set() == set(query_result_isin.unknown_set)
assert 9 == len(query_result_isin.universe_set)
def test_query_and_or(loaded_database_connection: DataIndexConnection):
db = loaded_database_connection.database
sampleA = db.get_session().query(Sample).filter(Sample.name == 'SampleA').one()
sampleB = db.get_session().query(Sample).filter(Sample.name == 'SampleB').one()
sampleC = db.get_session().query(Sample).filter(Sample.name == 'SampleC').one()
query_resultA = query(loaded_database_connection).isa('SampleA')
assert 1 == len(query_resultA)
assert {sampleA.id} == set(query_resultA.sample_set)
assert 0 == len(query_resultA.unknown_set)
assert 9 == len(query_resultA.universe_set)
query_result_onlyA = query_resultA.reset_universe()
assert 1 == len(query_result_onlyA)
assert {sampleA.id} == set(query_result_onlyA.sample_set)
assert 0 == len(query_result_onlyA.unknown_set)
assert 1 == len(query_result_onlyA.universe_set)
query_resultB = query(loaded_database_connection).isa('SampleB')
assert 1 == len(query_resultB)
assert {sampleB.id} == set(query_resultB.sample_set)
assert 0 == len(query_resultB.unknown_set)
assert 9 == len(query_resultB.universe_set)
query_result_onlyB = query_resultB.reset_universe()
assert 1 == len(query_result_onlyB)
assert {sampleB.id} == set(query_result_onlyB.sample_set)
assert 0 == len(query_result_onlyB.unknown_set)
assert 1 == len(query_result_onlyB.universe_set)
query_resultB_with_unknownA = query(loaded_database_connection).hasa('reference:5061:G:A')
assert 1 == len(query_resultB_with_unknownA)
assert {sampleB.id} == set(query_resultB_with_unknownA.sample_set)
assert 1 == len(query_resultB_with_unknownA.unknown_set)
assert {sampleA.id} == set(query_resultB_with_unknownA.unknown_set)
assert 9 == len(query_resultB_with_unknownA.universe_set)
query_result_onlyB_with_unknownA = query_resultB_with_unknownA.reset_universe(include_unknown=True)
assert 1 == len(query_result_onlyB_with_unknownA)
assert {sampleB.id} == set(query_result_onlyB_with_unknownA.sample_set)
assert 1 == len(query_result_onlyB_with_unknownA.unknown_set)
assert {sampleA.id} == set(query_result_onlyB_with_unknownA.unknown_set)
assert 2 == len(query_result_onlyB_with_unknownA.universe_set)
assert {sampleA.id, sampleB.id} == set(query_result_onlyB_with_unknownA.universe_set)
query_resultC = query(loaded_database_connection).isa('SampleC')
assert 1 == len(query_resultC)
assert {sampleC.id} == set(query_resultC.sample_set)
assert 9 == len(query_resultC.universe_set)
query_result_onlyC = query_resultC.reset_universe()
assert 1 == len(query_result_onlyC)
assert {sampleC.id} == set(query_result_onlyC.sample_set)
assert 1 == len(query_result_onlyC.universe_set)
assert {sampleC.id} == set(query_result_onlyC.universe_set)
query_resultAB = query(loaded_database_connection).isin(['SampleA', 'SampleB'])
assert 2 == len(query_resultAB)
assert {sampleA.id, sampleB.id} == set(query_resultAB.sample_set)
assert 0 == len(query_resultC.unknown_set)
assert 9 == len(query_resultAB.universe_set)
# Test AND
query_resultA_and_AB = query_resultA.and_(query_resultAB)
assert 1 == len(query_resultA_and_AB)
assert {sampleA.id} == set(query_resultA_and_AB.sample_set)
assert 0 == len(query_resultA_and_AB.unknown_set)
assert 9 == len(query_resultA_and_AB.universe_set)
query_resultAB_and_A = query_resultAB.and_(query_resultA)
assert 1 == len(query_resultAB_and_A)
assert {sampleA.id} == set(query_resultAB_and_A.sample_set)
assert 0 == len(query_resultAB_and_A.unknown_set)
assert 9 == len(query_resultAB_and_A.universe_set)
query_resultA_and_B = query_resultA.and_(query_resultB)
assert 0 == len(query_resultA_and_B)
assert 0 == len(query_resultA_and_B.unknown_set)
assert 9 == len(query_resultA_and_B.universe_set)
# Test AND with unknowns
query_resultA_and_B_unknown_A = query_resultA.and_(query_resultB_with_unknownA)
assert 0 == len(query_resultA_and_B_unknown_A)
assert 1 == len(query_resultA_and_B_unknown_A.unknown_set)
assert {sampleA.id} == set(query_resultA_and_B_unknown_A.unknown_set)
assert 9 == len(query_resultA_and_B_unknown_A.universe_set)
query_resultAB_and_B_unknown_A = query_resultAB.and_(query_resultB_with_unknownA)
assert 1 == len(query_resultAB_and_B_unknown_A)
assert {sampleB.id} == set(query_resultAB_and_B_unknown_A.sample_set)
assert 1 == len(query_resultAB_and_B_unknown_A.unknown_set)
assert {sampleA.id} == set(query_resultAB_and_B_unknown_A.unknown_set)
assert 9 == len(query_resultAB_and_B_unknown_A.universe_set)
query_resultB_unknown_A_and_AB = query_resultB_with_unknownA.and_(query_resultAB)
assert 1 == len(query_resultB_unknown_A_and_AB)
assert {sampleB.id} == set(query_resultB_unknown_A_and_AB.sample_set)
assert 1 == len(query_resultB_unknown_A_and_AB.unknown_set)
assert {sampleA.id} == set(query_resultB_unknown_A_and_AB.unknown_set)
assert 9 == len(query_resultB_unknown_A_and_AB.universe_set)
query_resultB_and_B_unknown_A = query_resultB.and_(query_resultB_with_unknownA)
assert 1 == len(query_resultB_and_B_unknown_A)
assert {sampleB.id} == set(query_resultB_and_B_unknown_A.sample_set)
assert 0 == len(query_resultB_and_B_unknown_A.unknown_set)
assert 9 == len(query_resultB_and_B_unknown_A.universe_set)
query_resultB_unknown_A_and_B = query_resultB_with_unknownA.and_(query_resultB)
assert 1 == len(query_resultB_unknown_A_and_B)
assert {sampleB.id} == set(query_resultB_unknown_A_and_B.sample_set)
assert 0 == len(query_resultB_unknown_A_and_B.unknown_set)
assert 9 == len(query_resultB_unknown_A_and_B.universe_set)
query_result_onlyB_and_onlyA = query_result_onlyB.and_(query_result_onlyA)
assert 0 == len(query_result_onlyB_and_onlyA)
assert 0 == len(query_result_onlyB_and_onlyA.unknown_set)
assert {sampleA.id, sampleB.id} == set(query_result_onlyB_and_onlyA.absent_set)
assert 2 == len(query_result_onlyB_and_onlyA.universe_set)
assert {sampleA.id, sampleB.id} == set(query_result_onlyB_and_onlyA.universe_set)
query_result_onlyB_unknown_A_and_onlyA = query_result_onlyB_with_unknownA.and_(query_result_onlyA)
assert 0 == len(query_result_onlyB_unknown_A_and_onlyA)
assert 1 == len(query_result_onlyB_unknown_A_and_onlyA.unknown_set)
assert {sampleA.id} == set(query_result_onlyB_unknown_A_and_onlyA.unknown_set)
assert 1 == len(query_result_onlyB_unknown_A_and_onlyA.absent_set)
assert 2 == len(query_result_onlyB_unknown_A_and_onlyA.universe_set)
assert {sampleA.id, sampleB.id} == set(query_result_onlyB_unknown_A_and_onlyA.universe_set)
query_result_onlyB_unknown_A_and_onlyC = query_result_onlyB_with_unknownA.and_(query_result_onlyC)
assert 0 == len(query_result_onlyB_unknown_A_and_onlyC)
assert 0 == len(query_result_onlyB_unknown_A_and_onlyC.unknown_set)
assert 3 == len(query_result_onlyB_unknown_A_and_onlyC.absent_set)
assert 3 == len(query_result_onlyB_unknown_A_and_onlyC.universe_set)
assert {sampleA.id, sampleB.id, sampleC.id} == set(query_result_onlyB_unknown_A_and_onlyC.universe_set)
# Test Python & (bitwise and) operator
query_result_onlyB_unknown_A_and_onlyA = query_result_onlyB_with_unknownA & query_result_onlyA
assert 0 == len(query_result_onlyB_unknown_A_and_onlyA)
assert 1 == len(query_result_onlyB_unknown_A_and_onlyA.unknown_set)
assert {sampleA.id} == set(query_result_onlyB_unknown_A_and_onlyA.unknown_set)
assert 1 == len(query_result_onlyB_unknown_A_and_onlyA.absent_set)
assert 2 == len(query_result_onlyB_unknown_A_and_onlyA.universe_set)
assert {sampleA.id, sampleB.id} == set(query_result_onlyB_unknown_A_and_onlyA.universe_set)
query_result_onlyB_unknown_A_and_onlyC = query_result_onlyB_with_unknownA & query_result_onlyC
assert 0 == len(query_result_onlyB_unknown_A_and_onlyC)
assert 0 == len(query_result_onlyB_unknown_A_and_onlyC.unknown_set)
assert 3 == len(query_result_onlyB_unknown_A_and_onlyC.absent_set)
assert 3 == len(query_result_onlyB_unknown_A_and_onlyC.universe_set)
assert {sampleA.id, sampleB.id, sampleC.id} == set(query_result_onlyB_unknown_A_and_onlyC.universe_set)
# Test OR
query_resultA_or_AB = query_resultA.or_(query_resultAB)
assert 2 == len(query_resultA_or_AB)
assert {sampleA.id, sampleB.id} == set(query_resultA_or_AB.sample_set)
assert 7 == len(query_resultA_or_AB.absent_set)
assert 0 == len(query_resultA_or_AB.unknown_set)
assert 9 == len(query_resultA_or_AB.universe_set)
query_resultAB_or_A = query_resultAB.or_(query_resultA)
assert 2 == len(query_resultAB_or_A)
assert {sampleA.id, sampleB.id} == set(query_resultAB_or_A.sample_set)
assert 0 == len(query_resultAB_or_A.unknown_set)
assert 7 == len(query_resultAB_or_A.absent_set)
assert 9 == len(query_resultAB_or_A.universe_set)
query_resultA_or_B = query_resultA.or_(query_resultB)
assert 2 == len(query_resultA_or_B)
assert {sampleA.id, sampleB.id} == set(query_resultA_or_B.sample_set)
assert 0 == len(query_resultA_or_B.unknown_set)
assert 7 == len(query_resultA_or_B.absent_set)
assert 9 == len(query_resultA_or_B.universe_set)
query_resultA_or_B_or_C = query_resultA.or_(query_resultB).or_(query_resultC)
assert 3 == len(query_resultA_or_B_or_C)
assert {sampleA.id, sampleB.id, sampleC.id} == set(query_resultA_or_B_or_C.sample_set)
assert 0 == len(query_resultA_or_B_or_C.unknown_set)
assert 6 == len(query_resultA_or_B_or_C.absent_set)
assert 9 == len(query_resultA_or_B_or_C.universe_set)
# Test OR with unknowns
query_resultA_or_B_unknown_A = query_resultA.or_(query_resultB_with_unknownA)
assert 2 == len(query_resultA_or_B_unknown_A)
assert {sampleA.id, sampleB.id} == set(query_resultA_or_B_unknown_A.sample_set)
assert 0 == len(query_resultA_or_B_unknown_A.unknown_set)
assert 9 == len(query_resultA_or_B_unknown_A.universe_set)
query_resultAB_or_B_unknown_A = query_resultAB.or_(query_resultB_with_unknownA)
assert 2 == len(query_resultAB_or_B_unknown_A)
assert {sampleA.id, sampleB.id} == set(query_resultAB_or_B_unknown_A.sample_set)
assert 0 == len(query_resultAB_or_B_unknown_A.unknown_set)
assert 9 == len(query_resultAB_or_B_unknown_A.universe_set)
query_resultB_unknown_A_or_AB = query_resultB_with_unknownA.or_(query_resultAB)
assert 2 == len(query_resultB_unknown_A_or_AB)
assert {sampleA.id, sampleB.id} == set(query_resultB_unknown_A_or_AB.sample_set)
assert 0 == len(query_resultB_unknown_A_or_AB.unknown_set)
assert 9 == len(query_resultB_unknown_A_or_AB.universe_set)
query_resultB_or_B_unknown_A = query_resultB.or_(query_resultB_with_unknownA)
assert 1 == len(query_resultB_or_B_unknown_A)
assert {sampleB.id} == set(query_resultB_or_B_unknown_A.sample_set)
assert 1 == len(query_resultB_or_B_unknown_A.unknown_set)
assert {sampleA.id} == set(query_resultB_or_B_unknown_A.unknown_set)
assert 9 == len(query_resultB_or_B_unknown_A.universe_set)
query_resultB_unknown_A_or_B = query_resultB_with_unknownA.or_(query_resultB)
assert 1 == len(query_resultB_unknown_A_or_B)
assert {sampleB.id} == set(query_resultB_unknown_A_or_B.sample_set)
assert 1 == len(query_resultB_unknown_A_or_B.unknown_set)
assert {sampleA.id} == set(query_resultB_unknown_A_or_B.unknown_set)
assert 9 == len(query_resultB_unknown_A_or_B.universe_set)
query_result_onlyB_or_onlyA = query_result_onlyB.or_(query_result_onlyA)
assert 2 == len(query_result_onlyB_or_onlyA)
assert 0 == len(query_result_onlyB_or_onlyA.unknown_set)
assert {sampleA.id, sampleB.id} == set(query_result_onlyB_or_onlyA.sample_set)
assert 2 == len(query_result_onlyB_or_onlyA.universe_set)
assert {sampleA.id, sampleB.id} == set(query_result_onlyB_or_onlyA.universe_set)
query_result_onlyB_unknown_A_or_onlyA = query_result_onlyB_with_unknownA.or_(query_result_onlyA)
assert 2 == len(query_result_onlyB_unknown_A_or_onlyA)
assert 0 == len(query_result_onlyB_unknown_A_or_onlyA.unknown_set)
assert {sampleA.id, sampleB.id} == set(query_result_onlyB_unknown_A_or_onlyA.sample_set)
assert 0 == len(query_result_onlyB_unknown_A_or_onlyA.absent_set)
assert 2 == len(query_result_onlyB_unknown_A_or_onlyA.universe_set)
assert {sampleA.id, sampleB.id} == set(query_result_onlyB_unknown_A_or_onlyA.universe_set)
query_result_onlyB_unknown_A_or_onlyC = query_result_onlyB_with_unknownA.or_(query_result_onlyC)
assert 2 == len(query_result_onlyB_unknown_A_or_onlyC)
assert {sampleB.id, sampleC.id} == set(query_result_onlyB_unknown_A_or_onlyC.sample_set)
assert 1 == len(query_result_onlyB_unknown_A_or_onlyC.unknown_set)
assert {sampleA.id} == set(query_result_onlyB_unknown_A_or_onlyC.unknown_set)
assert 0 == len(query_result_onlyB_unknown_A_or_onlyC.absent_set)
assert 3 == len(query_result_onlyB_unknown_A_or_onlyC.universe_set)
assert {sampleA.id, sampleB.id, sampleC.id} == set(query_result_onlyB_unknown_A_or_onlyC.universe_set)
# Test Python | (bitwise or) operator
query_result_onlyB_unknown_A_or_onlyA = query_result_onlyB_with_unknownA | query_result_onlyA
assert 2 == len(query_result_onlyB_unknown_A_or_onlyA)
assert 0 == len(query_result_onlyB_unknown_A_or_onlyA.unknown_set)
assert {sampleA.id, sampleB.id} == set(query_result_onlyB_unknown_A_or_onlyA.sample_set)
assert 0 == len(query_result_onlyB_unknown_A_or_onlyA.absent_set)
assert 2 == len(query_result_onlyB_unknown_A_or_onlyA.universe_set)
assert {sampleA.id, sampleB.id} == set(query_result_onlyB_unknown_A_or_onlyA.universe_set)
query_result_onlyB_unknown_A_or_onlyC = query_result_onlyB_with_unknownA | query_result_onlyC
assert 2 == len(query_result_onlyB_unknown_A_or_onlyC)
assert {sampleB.id, sampleC.id} == set(query_result_onlyB_unknown_A_or_onlyC.sample_set)
assert 1 == len(query_result_onlyB_unknown_A_or_onlyC.unknown_set)
assert {sampleA.id} == set(query_result_onlyB_unknown_A_or_onlyC.unknown_set)
assert 0 == len(query_result_onlyB_unknown_A_or_onlyC.absent_set)
assert 3 == len(query_result_onlyB_unknown_A_or_onlyC.universe_set)
assert {sampleA.id, sampleB.id, sampleC.id} == set(query_result_onlyB_unknown_A_or_onlyC.universe_set)
def test_query_reset_universe(loaded_database_connection: DataIndexConnection):
db = loaded_database_connection.database
sampleA = db.get_session().query(Sample).filter(Sample.name == 'SampleA').one()
sampleB = db.get_session().query(Sample).filter(Sample.name == 'SampleB').one()
# No unknowns
query_result = query(loaded_database_connection).isin('SampleB')
assert 1 == len(query_result)
assert {sampleB.id} == set(query_result.sample_set)
assert 0 == len(query_result.unknown_set)
assert 9 == len(query_result.universe_set)
query_result = query_result.reset_universe()
assert 1 == len(query_result)
assert {sampleB.id} == set(query_result.sample_set)
assert 0 == len(query_result.unknown_set)
assert 1 == len(query_result.universe_set)
# With unknowns
query_result = query(loaded_database_connection).hasa('reference:5061:G:A')
assert 1 == len(query_result)
assert {sampleB.id} == set(query_result.sample_set)
assert 1 == len(query_result.unknown_set)
assert {sampleA.id} == set(query_result.unknown_set)
assert 9 == len(query_result.universe_set)
query_result = query_result.reset_universe()
assert 1 == len(query_result)
assert {sampleB.id} == set(query_result.sample_set)
assert 1 == len(query_result.unknown_set)
assert {sampleA.id} == set(query_result.unknown_set)
assert 2 == len(query_result.universe_set)
assert {sampleA.id, sampleB.id} == set(query_result.universe_set)
# With unknowns but excluding them
query_result = query(loaded_database_connection).hasa('reference:5061:G:A')
assert 1 == len(query_result)
assert {sampleB.id} == set(query_result.sample_set)
assert 1 == len(query_result.unknown_set)
assert {sampleA.id} == set(query_result.unknown_set)
assert 9 == len(query_result.universe_set)
query_result = query_result.reset_universe(include_unknown=False)
assert 1 == len(query_result)
assert {sampleB.id} == set(query_result.sample_set)
assert 0 == len(query_result.unknown_set)
assert 1 == len(query_result.universe_set)
assert {sampleB.id} == set(query_result.universe_set)
def test_query_set_universe(loaded_database_connection: DataIndexConnection):
db = loaded_database_connection.database
sampleA = db.get_session().query(Sample).filter(Sample.name == 'SampleA').one()
sampleB = db.get_session().query(Sample).filter(Sample.name == 'SampleB').one()
sampleC = db.get_session().query(Sample).filter(Sample.name == 'SampleC').one()
# No unknowns
query_result = query(loaded_database_connection).isin('SampleB')
assert 1 == len(query_result)
assert {sampleB.id} == set(query_result.sample_set)
assert 0 == len(query_result.unknown_set)
assert 8 == len(query_result.absent_set)
assert 9 == len(query_result.universe_set)
query_result_su = query_result.set_universe(SampleSet(sample_ids=[sampleA.id, sampleB.id]))
assert 1 == len(query_result_su)
assert {sampleB.id} == set(query_result_su.sample_set)
assert 0 == len(query_result_su.unknown_set)
assert 1 == len(query_result_su.absent_set)
assert {sampleA.id} == set(query_result_su.absent_set)
assert 2 == len(query_result_su.universe_set)
assert {sampleA.id, sampleB.id} == set(query_result_su.universe_set)
query_result_su = query_result.set_universe(SampleSet(sample_ids=[sampleB.id]))
assert 1 == len(query_result_su)
assert {sampleB.id} == set(query_result_su.sample_set)
assert 0 == len(query_result_su.unknown_set)
assert 0 == len(query_result_su.absent_set)
assert 1 == len(query_result_su.universe_set)
assert {sampleB.id} == set(query_result_su.universe_set)
# With unknowns
query_result = query(loaded_database_connection).hasa('reference:5061:G:A')
assert 1 == len(query_result)
assert {sampleB.id} == set(query_result.sample_set)
assert 1 == len(query_result.unknown_set)
assert {sampleA.id} == set(query_result.unknown_set)
assert 7 == len(query_result.absent_set)
assert 9 == len(query_result.universe_set)
query_result_su = query_result.set_universe(SampleSet(sample_ids=[sampleA.id, sampleB.id]))
assert 1 == len(query_result_su)
assert {sampleB.id} == set(query_result_su.sample_set)
assert 1 == len(query_result_su.unknown_set)
assert 0 == len(query_result_su.absent_set)
assert 2 == len(query_result_su.universe_set)
assert {sampleA.id, sampleB.id} == set(query_result_su.universe_set)
query_result_su = query_result.set_universe(SampleSet(sample_ids=[sampleB.id]))
assert 1 == len(query_result_su)
assert {sampleB.id} == set(query_result_su.sample_set)
assert 0 == len(query_result_su.unknown_set)
assert 0 == len(query_result_su.absent_set)
assert 1 == len(query_result_su.universe_set)
assert {sampleB.id} == set(query_result_su.universe_set)
query_result_su = query_result.set_universe(SampleSet(sample_ids=[sampleA.id, sampleB.id, sampleC.id]))
assert 1 == len(query_result_su)
assert {sampleB.id} == set(query_result_su.sample_set)
assert 1 == len(query_result_su.unknown_set)
assert 1 == len(query_result_su.absent_set)
assert 3 == len(query_result_su.universe_set)
assert {sampleA.id, sampleB.id, sampleC.id} == set(query_result_su.universe_set)
def test_query_isin_samples_multilple(loaded_database_connection: DataIndexConnection):
db = loaded_database_connection.database
sampleA = db.get_session().query(Sample).filter(Sample.name == 'SampleA').one()
sampleB = db.get_session().query(Sample).filter(Sample.name == 'SampleB').one()
query_result = query(loaded_database_connection).isin(['SampleA', 'SampleB'])
assert 2 == len(query_result)
assert {sampleA.id, sampleB.id} == set(query_result.sample_set)
assert 9 == len(query_result.universe_set)
def test_query_isin_samples_multilple_samples_query(loaded_database_connection: DataIndexConnection):
db = loaded_database_connection.database
sampleA = db.get_session().query(Sample).filter(Sample.name == 'SampleA').one()
sampleB = db.get_session().query(Sample).filter(Sample.name == 'SampleB').one()
query_result_AB = query(loaded_database_connection).isin(['SampleA', 'SampleB'])
query_result = query(loaded_database_connection).isin(query_result_AB)
assert 2 == len(query_result)
assert {sampleA.id, sampleB.id} == set(query_result.sample_set)
assert 9 == len(query_result.universe_set)
def test_query_isa_sample_name(loaded_database_connection: DataIndexConnection):
db = loaded_database_connection.database
sampleB = db.get_session().query(Sample).filter(Sample.name == 'SampleB').one()
query_result = query(loaded_database_connection).isa('SampleB')
assert 1 == len(query_result)
assert {sampleB.id} == set(query_result.sample_set)
assert 9 == len(query_result.universe_set)
assert not query_result.has_tree()
# Test case where there is an unknown in the result
query_result = query(loaded_database_connection).hasa('reference:1:1:T').isa('SampleB')
assert 0 == len(query_result)
assert {sampleB.id} == set(query_result.unknown_set)
assert 8 == len(query_result.absent_set)
assert 9 == len(query_result.universe_set)
# Test case where there is a match and unknown in the result
query_result = query(loaded_database_connection).hasa('reference:5061:G:A').isa('SampleB')
assert 1 == len(query_result)
assert {sampleB.id} == set(query_result.sample_set)
assert 0 == len(query_result.unknown_set)
assert 8 == len(query_result.absent_set)
assert 9 == len(query_result.universe_set)
# Test isa no exist
query_result = query(loaded_database_connection).isa('no_exist')
assert 0 == len(query_result)
assert 9 == len(query_result.universe_set)
assert 0 == len(query_result.unknown_set)
# Test isa no exist with unknown
query_result = query(loaded_database_connection).hasa('reference:5061:G:A').isa('no_exist')
assert 0 == len(query_result)
assert 0 == len(query_result.unknown_set)
assert 9 == len(query_result.absent_set)
assert 9 == len(query_result.universe_set)
def test_query_isa_samples_query(loaded_database_connection: DataIndexConnection):
db = loaded_database_connection.database
sampleB = db.get_session().query(Sample).filter(Sample.name == 'SampleB').one()
query_result_B = query(loaded_database_connection).isa('SampleB')
query_result = query(loaded_database_connection).isa(query_result_B)
assert 1 == len(query_result)
assert {sampleB.id} == set(query_result.sample_set)
assert 0 == len(query_result.unknown_set)
assert 8 == len(query_result.absent_set)
assert 9 == len(query_result.universe_set)
# with unknown in result
query_result = query(loaded_database_connection).hasa('reference:1:1:T').isa(query_result_B)
assert 0 == len(query_result)
assert 1 == len(query_result.unknown_set)
assert {sampleB.id} == set(query_result.unknown_set)
assert 8 == len(query_result.absent_set)
assert 9 == len(query_result.universe_set)
# with unknown that gets excluded
query_result = query(loaded_database_connection).hasa('reference:5061:G:A').isa(query_result_B)
assert 1 == len(query_result)
assert {sampleB.id} == set(query_result.sample_set)
assert 0 == len(query_result.unknown_set)
assert 8 == len(query_result.absent_set)
assert 9 == len(query_result.universe_set)
def test_tolist_and_toset(loaded_database_connection: DataIndexConnection):
conn = loaded_database_connection
assert {'SampleA', 'SampleB'} == set(query(conn).isin(['SampleA', 'SampleB']).tolist())
assert {'SampleA', 'SampleB'} == query(conn).isin(['SampleA', 'SampleB']).toset()
assert {'SampleB'} == set(query(conn).hasa('reference:5061:G:A').tolist())
assert {'SampleB'} == query(conn).hasa('reference:5061:G:A').toset()
assert {'SampleA', 'SampleB'} == set(query(conn).hasa('reference:5061:G:A').tolist(include_unknown=True))
assert {'SampleA', 'SampleB'} == query(conn).hasa('reference:5061:G:A').toset(include_unknown=True)
assert {'SampleA', 'SampleB', 'SampleC', '2014C-3598', '2014C-3599', '2014D-0067', '2014D-0068',
'CFSAN002349', 'CFSAN023463'
} == set(query(conn).hasa('reference:5061:G:A').tolist(include_unknown=True, include_absent=True))
assert {'SampleA', 'SampleB', 'SampleC', '2014C-3598', '2014C-3599', '2014D-0067', '2014D-0068',
'CFSAN002349', 'CFSAN023463'
} == query(conn).hasa('reference:5061:G:A').toset(include_unknown=True, include_absent=True)
assert {'SampleA', 'SampleC', '2014C-3598', '2014C-3599', '2014D-0067', '2014D-0068',
'CFSAN002349', 'CFSAN023463'
} == set(query(conn).hasa('reference:5061:G:A').tolist(include_present=False,
include_unknown=True, include_absent=True))
assert {'SampleA', 'SampleC', '2014C-3598', '2014C-3599', '2014D-0067', '2014D-0068',
'CFSAN002349', 'CFSAN023463'
} == query(conn).hasa('reference:5061:G:A').toset(include_present=False,
include_unknown=True, include_absent=True)
assert {'SampleC', '2014C-3598', '2014C-3599', '2014D-0067', '2014D-0068',
'CFSAN002349', 'CFSAN023463'
} == set(query(conn).hasa('reference:5061:G:A').tolist(include_present=False,
include_unknown=False, include_absent=True))
assert {'SampleC', '2014C-3598', '2014C-3599', '2014D-0067', '2014D-0068',
'CFSAN002349', 'CFSAN023463'
} == query(conn).hasa('reference:5061:G:A').toset(include_present=False,
include_unknown=False, include_absent=True)
assert {'SampleA'} == set(query(conn).hasa('reference:5061:G:A').tolist(include_present=False,
include_unknown=True, include_absent=False))
assert {'SampleA'} == query(conn).hasa('reference:5061:G:A').toset(include_present=False,
include_unknown=True, include_absent=False)
def test_query_isin_kmer(loaded_database_connection: DataIndexConnection):
db = loaded_database_connection.database
sampleA = db.get_session().query(Sample).filter(Sample.name == 'SampleA').one()
sampleB = db.get_session().query(Sample).filter(Sample.name == 'SampleB').one()
sampleC = db.get_session().query(Sample).filter(Sample.name == 'SampleC').one()
query_result = query(loaded_database_connection).isin('SampleA', kind='distance', distance=1.0,
units='kmer_jaccard')
assert 3 == len(query_result)
assert {sampleA.id, sampleB.id, sampleC.id} == set(query_result.sample_set)
assert 0 == len(query_result.unknown_set)
assert 9 == len(query_result.universe_set)
assert "isin_kmer_jaccard('SampleA', dist=1.0, k=31)" == query_result.query_expression()
# test with unknowns in query
query_result = query(loaded_database_connection).hasa('reference:5061:G:A').isin(
'SampleA', kind='distance', distance=1.0, units='kmer_jaccard')
assert 1 == len(query_result)
assert {sampleB.id} == set(query_result.sample_set)
assert {sampleA.id} == set(query_result.unknown_set)
assert 9 == len(query_result.universe_set)
assert "reference:5061:G:A AND isin_kmer_jaccard('SampleA', dist=1.0, k=31)" == query_result.query_expression()
# test with unknowns in query, isin should remove unknowns
query_result = query(loaded_database_connection).hasa('reference:5061:G:A').isin(
'SampleB', kind='distance', distance=0, units='kmer_jaccard')
assert 1 == len(query_result)
assert {sampleB.id} == set(query_result.sample_set)
assert 0 == len(query_result.unknown_set)
assert 9 == len(query_result.universe_set)
assert "reference:5061:G:A AND isin_kmer_jaccard('SampleB', dist=0, k=31)" == query_result.query_expression()
def test_query_isin_kmer_samples_query(loaded_database_connection: DataIndexConnection):
db = loaded_database_connection.database
sampleA = db.get_session().query(Sample).filter(Sample.name == 'SampleA').one()
sampleB = db.get_session().query(Sample).filter(Sample.name == 'SampleB').one()
sampleC = db.get_session().query(Sample).filter(Sample.name == 'SampleC').one()
query_result_A = query(loaded_database_connection).isa('SampleA')
query_result = query(loaded_database_connection).isin(query_result_A, kind='distance', distance=1.0,
units='kmer_jaccard')
assert 3 == len(query_result)
assert {sampleA.id, sampleB.id, sampleC.id} == set(query_result.sample_set)
assert 9 == len(query_result.universe_set)
assert "isin_kmer_jaccard(<SamplesQueryIndex[selected=11% (1/9) samples, " \
"unknown=0% (0/9) samples]>, dist=1.0, k=31)" == query_result.query_expression()
def test_query_isin_kmer_samples_set(loaded_database_connection: DataIndexConnection):
db = loaded_database_connection.database
sampleA = db.get_session().query(Sample).filter(Sample.name == 'SampleA').one()
sampleB = db.get_session().query(Sample).filter(Sample.name == 'SampleB').one()
sampleC = db.get_session().query(Sample).filter(Sample.name == 'SampleC').one()
sample_set_a = SampleSet([sampleA.id])
query_result = query(loaded_database_connection).isin(sample_set_a, kind='distance', distance=1.0,
units='kmer_jaccard')
assert 3 == len(query_result)
assert {sampleA.id, sampleB.id, sampleC.id} == set(query_result.sample_set)
assert 9 == len(query_result.universe_set)
assert "isin_kmer_jaccard(set(1 samples), dist=1.0, k=31)" == query_result.query_expression()
def test_query_isin_kmer_samples_query_no_matches(loaded_database_connection: DataIndexConnection):
query_result_empty = query(loaded_database_connection).isa('no_exist')
query_result = query(loaded_database_connection).isin(query_result_empty, kind='distance', distance=1.0,
units='kmer_jaccard')
assert 0 == len(query_result)
assert 9 == len(query_result.universe_set)
assert "isin_kmer_jaccard(<SamplesQueryIndex[selected=0% (0/9) samples, " \
"unknown=0% (0/9) samples]>, dist=1.0, k=31)" == query_result.query_expression()
def test_query_within_kmer_default(loaded_database_connection: DataIndexConnection):
db = loaded_database_connection.database
sampleA = db.get_session().query(Sample).filter(Sample.name == 'SampleA').one()
sampleB = db.get_session().query(Sample).filter(Sample.name == 'SampleB').one()
sampleC = db.get_session().query(Sample).filter(Sample.name == 'SampleC').one()
query_result = query(loaded_database_connection).within('SampleA', distance=1.0)
assert 3 == len(query_result)
assert {sampleA.id, sampleB.id, sampleC.id} == set(query_result.sample_set)
assert 9 == len(query_result.universe_set)
assert "isin_kmer_jaccard('SampleA', dist=1.0, k=31)" == query_result.query_expression()
def test_query_within_invalid_unit_with_no_tree(loaded_database_connection: DataIndexConnection):
with pytest.raises(Exception) as execinfo:
query(loaded_database_connection).within('SampleA', distance=1.0,
units='substitutions')
assert 'units=[substitutions] is not supported' in str(execinfo.value)
def test_to_distances_kmer(loaded_database_connection: DataIndexConnection):
query_result = query(loaded_database_connection).isin(['SampleA', 'SampleB', 'SampleC'], kind='samples')
results_d, labels = query_result.to_distances(kind='kmer')
assert (3, 3) == results_d.shape
assert {'SampleA', 'SampleB', 'SampleC'} == set(labels)
l = {element: idx for idx, element in enumerate(labels)}
assert math.isclose(results_d[l['SampleA']][l['SampleA']], 0, rel_tol=1e-3)
assert math.isclose(results_d[l['SampleA']][l['SampleB']], 0.522, rel_tol=1e-3)
assert math.isclose(results_d[l['SampleA']][l['SampleC']], 0.5, rel_tol=1e-3)
assert math.isclose(results_d[l['SampleB']][l['SampleA']], 0.522, rel_tol=1e-3)
assert math.isclose(results_d[l['SampleB']][l['SampleB']], 0, rel_tol=1e-3)
assert math.isclose(results_d[l['SampleB']][l['SampleC']], 0.3186, rel_tol=1e-3)
assert math.isclose(results_d[l['SampleC']][l['SampleA']], 0.5, rel_tol=1e-3)
assert math.isclose(results_d[l['SampleC']][l['SampleB']], 0.3186, rel_tol=1e-3)
assert math.isclose(results_d[l['SampleC']][l['SampleC']], 0, rel_tol=1e-3)
def test_query_isin_kmer_2_matches(loaded_database_connection: DataIndexConnection):
db = loaded_database_connection.database
sampleA = db.get_session().query(Sample).filter(Sample.name == 'SampleA').one()
sampleC = db.get_session().query(Sample).filter(Sample.name == 'SampleC').one()
query_result = query(loaded_database_connection).isin('SampleA', kind='distances', distance=0.5,
units='kmer_jaccard')
assert 2 == len(query_result)
assert {sampleA.id, sampleC.id} == set(query_result.sample_set)
assert 9 == len(query_result.universe_set)
def test_query_isin_kmer_1_match(loaded_database_connection: DataIndexConnection):
db = loaded_database_connection.database
sampleA = db.get_session().query(Sample).filter(Sample.name == 'SampleA').one()
query_result = query(loaded_database_connection).isin('SampleA', kind='distance', distance=0.49,
units='kmer_jaccard')
assert 1 == len(query_result)
assert {sampleA.id} == set(query_result.sample_set)
assert 9 == len(query_result.universe_set)
assert "isin_kmer_jaccard('SampleA', dist=0.49, k=31)" == query_result.query_expression()
def test_query_single_mutation_serial_processing(loaded_database_connection: DataIndexConnection):
do_test_query_single_mutation(loaded_database_connection)
def test_query_single_mutation_parallel_processing(loaded_database_connection_parallel_variants: DataIndexConnection):
do_test_query_single_mutation(loaded_database_connection_parallel_variants)
def do_test_query_single_mutation(loaded_database_connection: DataIndexConnection):
db = loaded_database_connection.database
sampleA = db.get_session().query(Sample).filter(Sample.name == 'SampleA').one()
sampleB = db.get_session().query(Sample).filter(Sample.name == 'SampleB').one()
all_sample_ids = {i for i, in db.get_session().query(Sample.id).all()}
query_result = query(loaded_database_connection).hasa(QueryFeatureMutationSPDI('reference:5061:G:A'))
assert 1 == len(query_result)
assert {sampleB.id} == set(query_result.sample_set)
assert {sampleA.id} == set(query_result.unknown_set)
assert all_sample_ids - {sampleA.id} - {sampleB.id} == set(query_result.absent_set)
assert 9 == len(query_result.universe_set)
assert not query_result.has_tree()
def test_query_single_mutation_then_add_new_genomes_and_query(loaded_database_connection: DataIndexConnection,
snippy_data_package_2: NucleotideSampleDataPackage):
db = loaded_database_connection.database
sampleA = db.get_session().query(Sample).filter(Sample.name == 'SampleA').one()
sampleB = db.get_session().query(Sample).filter(Sample.name == 'SampleB').one()
all_sample_ids = {i for i, in db.get_session().query(Sample.id).all()}
assert 9 == len(all_sample_ids)
query_result = query(loaded_database_connection).hasa(QueryFeatureMutationSPDI('reference:5061:G:A'))
assert 1 == len(query_result)
assert {sampleB.id} == set(query_result.sample_set)
assert {sampleA.id} == set(query_result.unknown_set)
assert all_sample_ids - {sampleA.id} - {sampleB.id} == set(query_result.absent_set)
assert 9 == len(query_result.universe_set)
# Insert new data which should increase the number of matches I get
loaded_database_connection.variation_service.insert(data_package=snippy_data_package_2,
feature_scope_name='genome')
sampleA_2 = db.get_session().query(Sample).filter(Sample.name == 'SampleA_2').one()
sampleB_2 = db.get_session().query(Sample).filter(Sample.name == 'SampleB_2').one()
all_sample_ids = {i for i, in db.get_session().query(Sample.id).all()}
assert 12 == len(all_sample_ids)
query_result = query(loaded_database_connection).hasa(QueryFeatureMutationSPDI('reference:5061:G:A'))
assert 2 == len(query_result)
assert {sampleB.id, sampleB_2.id} == set(query_result.sample_set)
assert {sampleA.id, sampleA_2.id} == set(query_result.unknown_set)
assert all_sample_ids - {sampleA.id, sampleA_2.id, sampleB.id, sampleB_2.id} == set(query_result.absent_set)
assert 12 == len(query_result.universe_set)
def test_select_unknown_absent_present(loaded_database_connection: DataIndexConnection):
db = loaded_database_connection.database
sampleA = db.get_session().query(Sample).filter(Sample.name == 'SampleA').one()
sampleB = db.get_session().query(Sample).filter(Sample.name == 'SampleB').one()
all_sample_ids = {i for i, in db.get_session().query(Sample.id).all()}
query_result = query(loaded_database_connection).hasa('reference:5061:G:A')
assert 1 == len(query_result)
assert {sampleB.id} == set(query_result.sample_set)
assert 1 == len(query_result.unknown_set)
assert {sampleA.id} == set(query_result.unknown_set)
assert 7 == len(query_result.absent_set)
assert all_sample_ids - {sampleA.id, sampleB.id} == set(query_result.absent_set)
assert 9 == len(query_result.universe_set)
# Select unknown
selected_result = query_result.select_unknown()
assert 1 == len(selected_result)
assert {sampleA.id} == set(selected_result.sample_set)
assert 0 == len(selected_result.unknown_set)
assert 8 == len(selected_result.absent_set)
assert all_sample_ids - {sampleA.id} == set(selected_result.absent_set)
assert 9 == len(selected_result.universe_set)
# Select absent
selected_result = query_result.select_absent()
assert 7 == len(selected_result)
assert all_sample_ids - {sampleA.id, sampleB.id} == set(selected_result.sample_set)
assert 0 == len(selected_result.unknown_set)
assert 2 == len(selected_result.absent_set)
assert {sampleA.id, sampleB.id} == set(selected_result.absent_set)
assert 9 == len(selected_result.universe_set)
# Select present
selected_result = query_result.select_present()
assert 1 == len(selected_result)
assert {sampleB.id} == set(selected_result.sample_set)
assert 0 == len(selected_result.unknown_set)
assert 8 == len(selected_result.absent_set)
assert all_sample_ids - {sampleB.id} == set(selected_result.absent_set)
assert 9 == len(selected_result.universe_set)
def test_query_multiple_mutation_unknowns(loaded_database_connection: DataIndexConnection):
db = loaded_database_connection.database
sampleA = db.get_session().query(Sample).filter(Sample.name == 'SampleA').one()
sampleB = db.get_session().query(Sample).filter(Sample.name == 'SampleB').one()
sampleC = db.get_session().query(Sample).filter(Sample.name == 'SampleC').one()
all_sample_ids = {i for i, in db.get_session().query(Sample.id).all()}
# Initial query object
query_result = query(loaded_database_connection)
assert 9 == len(query_result)
assert 0 == len(query_result.unknown_set)
assert 9 == len(query_result.universe_set)
assert 0 == len(query_result.absent_set)
# Test order 'reference:5061:G:A' then 'reference:190:A:G'
query_result = query(loaded_database_connection).hasa('reference:5061:G:A')
assert 1 == len(query_result)
assert {sampleB.id} == set(query_result.sample_set)
assert {sampleA.id} == set(query_result.unknown_set)
assert all_sample_ids - {sampleA.id, sampleB.id} == set(query_result.absent_set)
assert 9 == len(query_result.universe_set)
query_result = query_result.hasa('reference:190:A:G')
assert 1 == len(query_result)
assert {sampleB.id} == set(query_result.sample_set)
assert {sampleA.id} == set(query_result.unknown_set)
assert all_sample_ids - {sampleA.id, sampleB.id} == set(query_result.absent_set)
assert 9 == len(query_result.universe_set)
# Test order 'reference:190:A:G' then 'reference:5061:G:A'
query_result = query(loaded_database_connection).hasa('reference:190:A:G')
assert 1 == len(query_result)
assert {sampleB.id} == set(query_result.sample_set)
assert {sampleA.id, sampleC.id} == set(query_result.unknown_set)
assert all_sample_ids - {sampleA.id, sampleB.id, sampleC.id} == set(query_result.absent_set)
assert 9 == len(query_result.universe_set)
query_result = query_result.hasa('reference:5061:G:A')
assert 1 == len(query_result)
assert {sampleB.id} == set(query_result.sample_set)
assert {sampleA.id} == set(query_result.unknown_set)
assert all_sample_ids - {sampleA.id, sampleB.id} == set(query_result.absent_set)
assert 9 == len(query_result.universe_set)
# Reset query
query_result = query(loaded_database_connection).hasa('reference:190:A:G')
assert {sampleB.id} == set(query_result.sample_set)
assert {sampleA.id, sampleC.id} == set(query_result.unknown_set)
assert all_sample_ids - {sampleA.id, sampleB.id, sampleC.id} == set(query_result.absent_set)
# Empty query with no unknowns
query_result_empty = query_result.hasa('reference:800:1:G')
assert 0 == len(query_result_empty)
assert set() == set(query_result_empty.sample_set)
assert 0 == len(query_result_empty.unknown_set)
assert 9 == len(query_result_empty.universe_set)
assert 9 == len(query_result_empty.absent_set)
# Empty query with all unknowns
query_result_unknown = query_result.hasa('reference:5160:1:G')
assert 0 == len(query_result_unknown)
assert {sampleA.id, sampleB.id, sampleC.id} == set(query_result_unknown.unknown_set)
assert 9 == len(query_result_unknown.universe_set)
assert all_sample_ids - {sampleA.id, sampleB.id, sampleC.id} == set(query_result.absent_set)
# Query for A,B then for mutation with B and verify A ends up in unknowns
query_result = query(loaded_database_connection).isin(['SampleA', 'SampleB'])
assert 2 == len(query_result)
assert {sampleA.id, sampleB.id} == set(query_result.sample_set)
assert 0 == len(query_result.unknown_set)
assert 9 == len(query_result.universe_set)
assert all_sample_ids - {sampleA.id, sampleB.id} == set(query_result.absent_set)
query_result = query_result.hasa('reference:5061:G:A')
assert 1 == len(query_result)
assert {sampleB.id} == set(query_result.sample_set)
assert {sampleA.id} == set(query_result.unknown_set)
assert 9 == len(query_result.universe_set)
assert all_sample_ids - {sampleA.id, sampleB.id} == set(query_result.absent_set)
def test_query_intersect_sample_set(loaded_database_connection: DataIndexConnection):
db = loaded_database_connection.database
sampleA = db.get_session().query(Sample).filter(Sample.name == 'SampleA').one()
sampleB = db.get_session().query(Sample).filter(Sample.name == 'SampleB').one()
sampleC = db.get_session().query(Sample).filter(Sample.name == 'SampleC').one()
query_result = query(loaded_database_connection).hasa('reference:5061:G:A')
assert 1 == len(query_result)
assert {sampleB.id} == set(query_result.sample_set)
assert {sampleA.id} == set(query_result.unknown_set)
assert 9 == len(query_result.universe_set)
# Test intersect exclude unknown
query_intersect = query_result.intersect(SampleSet(sample_ids=[sampleB.id]))
assert 1 == len(query_intersect)
assert {sampleB.id} == set(query_intersect.sample_set)
assert 0 == len(query_intersect.unknown_set)
assert 9 == len(query_intersect.universe_set)
# Test intersect include unknown
query_intersect = query_result.intersect(SampleSet(sample_ids=[sampleA.id, sampleB.id]))
assert 1 == len(query_intersect)
assert {sampleB.id} == set(query_intersect.sample_set)
assert 1 == len(query_intersect.unknown_set)
assert {sampleA.id} == set(query_intersect.unknown_set)
assert 9 == len(query_intersect.universe_set)
# Test intersect include unknown and sample not in query
query_intersect = query_result.intersect(SampleSet(sample_ids=[sampleA.id, sampleB.id, sampleC.id]))
assert 1 == len(query_intersect)
assert {sampleB.id} == set(query_intersect.sample_set)
assert 1 == len(query_intersect.unknown_set)
assert {sampleA.id} == set(query_intersect.unknown_set)
assert 9 == len(query_intersect.universe_set)
# Test intersect only unknown and sample not in query
query_intersect = query_result.intersect(SampleSet(sample_ids=[sampleA.id, sampleC.id]))
assert 0 == len(query_intersect)
assert 1 == len(query_intersect.unknown_set)
assert {sampleA.id} == set(query_intersect.unknown_set)
assert 9 == len(query_intersect.universe_set)
# Test intersect only unknown
query_intersect = query_result.intersect(SampleSet(sample_ids=[sampleA.id]))
assert 0 == len(query_intersect)
assert 1 == len(query_intersect.unknown_set)
assert {sampleA.id} == set(query_intersect.unknown_set)
assert 9 == len(query_intersect.universe_set)
def test_query_mutation_hgvs(loaded_database_connection_annotations: DataIndexConnection):
db = loaded_database_connection_annotations.database
sample_sh14_001 = db.get_session().query(Sample).filter(Sample.name == 'SH14-001').one()
sample_sh14_014 = db.get_session().query(Sample).filter(Sample.name == 'SH14-014').one()
sample_sh10_014 = db.get_session().query(Sample).filter(Sample.name == 'SH10-014').one()
# hgvs c (nucleotide)
## Test using QueryFeature object
query_result = query(loaded_database_connection_annotations).hasa(
QueryFeatureHGVS.create_from_id('hgvs:NC_011083:SEHA_RS04550:p.Ile224fs'))
assert 2 == len(query_result)
assert {sample_sh14_001.id, sample_sh14_014.id} == set(query_result.sample_set)
assert 3 == len(query_result.universe_set)
assert not query_result.has_tree()
## Test using string
query_result = query(loaded_database_connection_annotations).hasa('hgvs:NC_011083:SEHA_RS04550:p.Ile224fs')
assert 2 == len(query_result)
assert {sample_sh14_001.id, sample_sh14_014.id} == set(query_result.sample_set)
assert 3 == len(query_result.universe_set)
assert not query_result.has_tree()
# hgvs p (protein)
## Test using QueryFeature object
query_result = query(loaded_database_connection_annotations).hasa(
QueryFeatureHGVS.create_from_id('hgvs:NC_011083:SEHA_RS04550:c.670dupA'))
assert 2 == len(query_result)
assert {sample_sh14_001.id, sample_sh14_014.id} == set(query_result.sample_set)
assert 3 == len(query_result.universe_set)
assert not query_result.has_tree()
## Test using string
query_result = query(loaded_database_connection_annotations).hasa('hgvs:NC_011083:SEHA_RS04550:c.670dupA')
assert 2 == len(query_result)
assert {sample_sh14_001.id, sample_sh14_014.id} == set(query_result.sample_set)
assert 3 == len(query_result.universe_set)
assert not query_result.has_tree()
# hgvs n (nucleotide)
## Test using QueryFeature object
query_result = query(loaded_database_connection_annotations).hasa(
QueryFeatureHGVS.create_from_id('hgvs:NC_011083:n.882634G>A'))
assert 1 == len(query_result)
assert {sample_sh10_014.id} == set(query_result.sample_set)
assert 3 == len(query_result.universe_set)
assert not query_result.has_tree()
## Test using string
query_result = query(loaded_database_connection_annotations).hasa('hgvs:NC_011083:n.882634G>A')
assert 1 == len(query_result)
assert {sample_sh10_014.id} == set(query_result.sample_set)
assert 3 == len(query_result.universe_set)
assert not query_result.has_tree()
# Test find no results
query_result = query(loaded_database_connection_annotations).hasa(
QueryFeatureHGVS.create_from_id('hgvs:NC_011083:SEHA_RS04550:p.none'))
assert 0 == len(query_result)
def test_query_single_mutation_complement(loaded_database_connection: DataIndexConnection):
db = loaded_database_connection.database
sampleA = db.get_session().query(Sample).filter(Sample.name == 'SampleA').one()
sampleB = db.get_session().query(Sample).filter(Sample.name == 'SampleB').one()
all_sample_ids = {i for i, in db.get_session().query(Sample.id).all()}
query_result = query(loaded_database_connection).hasa(QueryFeatureMutationSPDI('reference:5061:G:A'))
assert 1 == len(query_result)
assert {sampleB.id} == set(query_result.sample_set)
assert {sampleA.id} == set(query_result.unknown_set)
assert all_sample_ids - {sampleA.id, sampleB.id} == set(query_result.absent_set)
assert 9 == len(query_result.universe_set)
query_result_c = query_result.complement()
assert 7 == len(query_result_c)
assert all_sample_ids - {sampleA.id, sampleB.id} == set(query_result_c.sample_set)
assert {sampleA.id} == set(query_result_c.unknown_set)
assert {sampleB.id} == set(query_result_c.absent_set)
assert 9 == len(query_result_c.universe_set)
query_result_c = ~query_result
assert 7 == len(query_result_c)
assert all_sample_ids - {sampleA.id, sampleB.id} == set(query_result_c.sample_set)
assert {sampleA.id} == set(query_result_c.unknown_set)
assert {sampleB.id} == set(query_result_c.absent_set)
assert 9 == len(query_result_c.universe_set)
def test_query_single_mutation_two_samples_complement(loaded_database_connection: DataIndexConnection):
db = loaded_database_connection.database
sampleA = db.get_session().query(Sample).filter(Sample.name == 'SampleA').one()
sampleB = db.get_session().query(Sample).filter(Sample.name == 'SampleB').one()
sampleC = db.get_session().query(Sample).filter(Sample.name == 'SampleC').one()
all_sample_ids = {i for i, in db.get_session().query(Sample.id).all()}
query_result = query(loaded_database_connection).hasa(QueryFeatureMutationSPDI('reference:839:C:G'))
assert 2 == len(query_result)
assert {sampleB.id, sampleC.id} == set(query_result.sample_set)
assert set() == set(query_result.unknown_set)
assert all_sample_ids - {sampleB.id, sampleC.id} == set(query_result.absent_set)
assert 9 == len(query_result.universe_set)
query_result_c = query_result.complement()
assert 7 == len(query_result_c)
assert all_sample_ids - {sampleB.id, sampleC.id} == set(query_result_c.sample_set)
assert set() == set(query_result_c.unknown_set)
assert {sampleB.id, sampleC.id} == set(query_result_c.absent_set)
assert 9 == len(query_result_c.universe_set)
assert sampleB.id not in query_result_c.sample_set
assert sampleC.id not in query_result_c.sample_set
assert sampleA.id in query_result_c.sample_set
query_result_c = ~query_result
assert 7 == len(query_result_c)
assert all_sample_ids - {sampleB.id, sampleC.id} == set(query_result_c.sample_set)
assert set() == set(query_result_c.unknown_set)
assert {sampleB.id, sampleC.id} == set(query_result_c.absent_set)
assert 9 == len(query_result_c.universe_set)
assert sampleB.id not in query_result_c.sample_set
assert sampleC.id not in query_result_c.sample_set
assert sampleA.id in query_result_c.sample_set
def test_query_single_mutation_summary(loaded_database_connection: DataIndexConnection):
df = query(loaded_database_connection).hasa('reference:5061:G:A', kind='mutation').summary()
assert 1 == len(df)
assert ['Query', 'Present', 'Absent', 'Unknown', 'Total',
'% Present', '% Absent', '% Unknown'] == df.columns.tolist()
assert 'reference:5061:G:A' == df.iloc[0]['Query']
assert 1 == df.iloc[0]['Present']
assert 7 == df.iloc[0]['Absent']
assert 1 == df.iloc[0]['Unknown']
assert 9 == df.iloc[0]['Total']
assert math.isclose((1 / 9) * 100, df.iloc[0]['% Present'])
assert math.isclose((7 / 9) * 100, df.iloc[0]['% Absent'])
assert math.isclose((1 / 9) * 100, df.iloc[0]['% Unknown'])
def test_query_single_mutation_two_samples(loaded_database_connection: DataIndexConnection):
db = loaded_database_connection.database
sampleB = db.get_session().query(Sample).filter(Sample.name == 'SampleB').one()
sampleC = db.get_session().query(Sample).filter(Sample.name == 'SampleC').one()
query_result = query(loaded_database_connection).hasa(QueryFeatureMutationSPDI('reference:839:C:G'))
assert 2 == len(query_result)
assert {sampleB.id, sampleC.id} == set(query_result.sample_set)
assert 9 == len(query_result.universe_set)
def test_query_single_mutation_two_samples_kmer_one_sample(loaded_database_connection: DataIndexConnection):
db = loaded_database_connection.database
sampleB = db.get_session().query(Sample).filter(Sample.name == 'SampleB').one()
sampleC = db.get_session().query(Sample).filter(Sample.name == 'SampleC').one()
query_result = query(loaded_database_connection).hasa(QueryFeatureMutationSPDI('reference:839:C:G'))
assert 2 == len(query_result)
assert {sampleB.id, sampleC.id} == set(query_result.sample_set)
assert 9 == len(query_result.universe_set)
query_result = query_result.isin('SampleA', kind='distance', distance=0.5,
units='kmer_jaccard')
assert 1 == len(query_result)
assert {sampleC.id} == set(query_result.sample_set)
assert 9 == len(query_result.universe_set)
def test_query_hasa_string_features(loaded_database_connection: DataIndexConnection):
db = loaded_database_connection.database
sampleB = db.get_session().query(Sample).filter(Sample.name == 'SampleB').one()
sampleC = db.get_session().query(Sample).filter(Sample.name == 'SampleC').one()
# Test default hasa SPDI
query_result = query(loaded_database_connection).hasa('reference:839:C:G')
assert 2 == len(query_result)
assert {sampleB.id, sampleC.id} == set(query_result.sample_set)
assert 0 == len(query_result.unknown_set)
assert 9 == len(query_result.universe_set)
# Test HGVS (should return no results since snpeff annotations don't exist)
query_result = query(loaded_database_connection).hasa('hgvs:reference:n.839C>G')
assert 0 == len(query_result)
assert 0 == len(query_result.unknown_set)
assert 9 == len(query_result.universe_set)
def test_query_hasa_string_features_snpeff(loaded_database_connection_annotations: DataIndexConnection):
db = loaded_database_connection_annotations.database
sample_sh14_001 = db.get_session().query(Sample).filter(Sample.name == 'SH14-001').one()
sample_sh14_014 = db.get_session().query(Sample).filter(Sample.name == 'SH14-014').one()
sample_sh10_014 = db.get_session().query(Sample).filter(Sample.name == 'SH10-014').one()
query_result = query(loaded_database_connection_annotations)
# Test HGVS with amino acid notation
query_result_test = query_result.hasa('hgvs:NC_011083:SEHA_RS04550:p.Ile224fs')
assert 2 == len(query_result_test)
assert {sample_sh14_001.id, sample_sh14_014.id} == set(query_result_test.sample_set)
assert 0 == len(query_result_test.unknown_set)
assert 3 == len(query_result_test.universe_set)
# Test HGVS nucleotide coding notation
query_result_test = query_result.hasa('hgvs:NC_011083:SEHA_RS04550:c.670dupA')
assert 2 == len(query_result_test)
assert {sample_sh14_001.id, sample_sh14_014.id} == set(query_result_test.sample_set)
assert 0 == len(query_result_test.unknown_set)
assert 3 == len(query_result_test.universe_set)
# Test SPDI deletion sequence
query_result_test = query_result.hasa('NC_011083:835147:C:CA')
assert 2 == len(query_result_test)
assert {sample_sh14_001.id, sample_sh14_014.id} == set(query_result_test.sample_set)
assert 0 == len(query_result_test.unknown_set)
assert 3 == len(query_result_test.universe_set)
# Test SPDI deletion integer
query_result_test = query_result.hasa('NC_011083:835147:1:CA')
assert 2 == len(query_result_test)
assert {sample_sh14_001.id, sample_sh14_014.id} == set(query_result_test.sample_set)
assert 0 == len(query_result_test.unknown_set)
assert 3 == len(query_result_test.universe_set)
# Test HGVS, intergenic region
query_result_test = query_result.hasa('hgvs:NC_011083:n.298943A>T')
assert 3 == len(query_result_test)
assert {sample_sh14_001.id, sample_sh14_014.id, sample_sh10_014.id} == set(query_result_test.sample_set)
assert 0 == len(query_result_test.unknown_set)
assert 3 == len(query_result_test.universe_set)
# Test SPDI, sequence and intergenic region
query_result_test = query_result.hasa('NC_011083:298943:A:T')
assert 3 == len(query_result_test)
assert {sample_sh14_001.id, sample_sh14_014.id, sample_sh10_014.id} == set(query_result_test.sample_set)
assert 0 == len(query_result_test.unknown_set)
assert 3 == len(query_result_test.universe_set)
# Test SPDI, deletion integer and intergenic region
query_result_test = query_result.hasa('NC_011083:298943:1:T')
assert 3 == len(query_result_test)
assert {sample_sh14_001.id, sample_sh14_014.id, sample_sh10_014.id} == set(query_result_test.sample_set)
assert 0 == len(query_result_test.unknown_set)
assert 3 == len(query_result_test.universe_set)
# Test HGVS large deletion with amino acid notation
query_result_test = query_result.hasa('hgvs:NC_011083:SEHA_RS15905:p.Asp140_His155del')
assert 1 == len(query_result_test)
assert {sample_sh10_014.id} == set(query_result_test.sample_set)
assert 0 == len(query_result_test.unknown_set)
assert 3 == len(query_result_test.universe_set)
# Test HGVS large deletion with nucleotide coding notation
query_result_test = query_result.hasa(
'hgvs:NC_011083:SEHA_RS15905:c.417_464delCGACCACGACCACGACCACGACCACGACCACGACCACGACCACGACCA')
assert 1 == len(query_result_test)
assert {sample_sh10_014.id} == set(query_result_test.sample_set)
assert 0 == len(query_result_test.unknown_set)
assert 3 == len(query_result_test.universe_set)
# Test SPDI, large deletion sequence
query_result_test = query_result.hasa('NC_011083:3167187:AACCACGACCACGACCACGACCACGACCACGACCACGACCACGACCACG:A')
assert 1 == len(query_result_test)
assert {sample_sh10_014.id} == set(query_result_test.sample_set)
assert 0 == len(query_result_test.unknown_set)
assert 3 == len(query_result_test.universe_set)
# Test SPDI, large deletion integer
query_result_test = query_result.hasa(
f'NC_011083:3167187:{len("AACCACGACCACGACCACGACCACGACCACGACCACGACCACGACCACG")}:A')
assert 1 == len(query_result_test)
assert {sample_sh10_014.id} == set(query_result_test.sample_set)
assert 0 == len(query_result_test.unknown_set)
assert 3 == len(query_result_test.universe_set)
# Test HGVS smaller deletion in same region with amino acid notation
query_result_test = query_result.hasa('hgvs:NC_011083:SEHA_RS15905:p.Asp144_His155del')
assert 2 == len(query_result_test)
assert {sample_sh14_001.id, sample_sh14_014.id} == set(query_result_test.sample_set)
assert 0 == len(query_result_test.unknown_set)
assert 3 == len(query_result_test.universe_set)
# Test HGVS smaller deletion in same region with nucleotide coding notation
query_result_test = query_result.hasa(
'hgvs:NC_011083:SEHA_RS15905:c.429_464delCGACCACGACCACGACCACGACCACGACCACGACCA')
assert 2 == len(query_result_test)
assert {sample_sh14_001.id, sample_sh14_014.id} == set(query_result_test.sample_set)
assert 0 == len(query_result_test.unknown_set)
assert 3 == len(query_result_test.universe_set)
# Test SPDI, smaller deletion in same region sequence
query_result_test = query_result.hasa('NC_011083:3167187:AACCACGACCACGACCACGACCACGACCACGACCACG:A')
assert 2 == len(query_result_test)
assert {sample_sh14_001.id, sample_sh14_014.id} == set(query_result_test.sample_set)
assert 0 == len(query_result_test.unknown_set)
assert 3 == len(query_result_test.universe_set)
# Test SPDI, smaller deletion in same region integer
query_result_test = query_result.hasa(f'NC_011083:3167187:{len("AACCACGACCACGACCACGACCACGACCACGACCACG")}:A')
assert 2 == len(query_result_test)
assert {sample_sh14_001.id, sample_sh14_014.id} == set(query_result_test.sample_set)
assert 0 == len(query_result_test.unknown_set)
assert 3 == len(query_result_test.universe_set)
# Test HGVSGN.c single mutation
query_result_test = query_result.hasa('hgvs_gn:NC_011083:murF:c.497C>A')
assert 3 == len(query_result_test)
assert {sample_sh10_014.id, sample_sh14_001.id, sample_sh14_014.id} == set(query_result_test.sample_set)
assert 0 == len(query_result_test.unknown_set)
assert 3 == len(query_result_test.universe_set)
# Test HGVSGN.p single mutation
query_result_test = query_result.hasa('hgvs_gn:NC_011083:murF:p.Ala166Glu')
assert 3 == len(query_result_test)
assert {sample_sh10_014.id, sample_sh14_001.id, sample_sh14_014.id} == set(query_result_test.sample_set)
assert 0 == len(query_result_test.unknown_set)
assert 3 == len(query_result_test.universe_set)
# Test HGVSGN.c single mutation 2 results
query_result_test = query_result.hasa('hgvs_gn:NC_011083:oadA:c.609T>C')
assert 2 == len(query_result_test)
assert {sample_sh10_014.id, sample_sh14_001.id} == set(query_result_test.sample_set)
assert 0 == len(query_result_test.unknown_set)
assert 3 == len(query_result_test.universe_set)
# Test HGVSGN.p single mutation 2 results
query_result_test = query_result.hasa('hgvs_gn:NC_011083:oadA:p.Cys203Cys')
assert 2 == len(query_result_test)
assert {sample_sh10_014.id, sample_sh14_001.id} == set(query_result_test.sample_set)
assert 0 == len(query_result_test.unknown_set)
assert 3 == len(query_result_test.universe_set)
# Test HGVS.c single mutation 2 results
query_result_test = query_result.hasa('hgvs:NC_011083:SEHA_RS17780:c.609T>C')
assert 2 == len(query_result_test)
assert {sample_sh10_014.id, sample_sh14_001.id} == set(query_result_test.sample_set)
assert 0 == len(query_result_test.unknown_set)
assert 3 == len(query_result_test.universe_set)
# Test HGVS.c single mutation 2 results
query_result_test = query_result.hasa('hgvs:NC_011083:SEHA_RS17780:p.Cys203Cys')
assert 2 == len(query_result_test)
assert {sample_sh10_014.id, sample_sh14_001.id} == set(query_result_test.sample_set)
assert 0 == len(query_result_test.unknown_set)
assert 3 == len(query_result_test.universe_set)
# Test equivalent SPDI identifier for above
query_result_test = query_result.hasa('NC_011083:3535635:A:G')
assert 2 == len(query_result_test)
assert {sample_sh10_014.id, sample_sh14_001.id} == set(query_result_test.sample_set)
assert 0 == len(query_result_test.unknown_set)
assert 3 == len(query_result_test.universe_set)
def test_query_hasa_string_features_snpeff_duplicate_genes(
loaded_database_connection_annotations_duplicate_genes: DataIndexConnection):
db = loaded_database_connection_annotations_duplicate_genes.database
sample1 = db.get_session().query(Sample).filter(Sample.name == 'SH10-014-dup-gene-variant').one()
sample2 = db.get_session().query(Sample).filter(Sample.name == 'SH10-014-dup-gene-variant-2').one()
query_result = query(loaded_database_connection_annotations_duplicate_genes)
# Test HGVSGN.c single mutation but there are two different copies of murF gene and so query should investigate both
query_result_test = query_result.hasa('hgvs_gn:NC_011083:murF:c.497C>A')
assert 2 == len(query_result_test)
assert {sample1.id, sample2.id} == set(query_result_test.sample_set)
assert 0 == len(query_result_test.unknown_set)
assert 2 == len(query_result_test.universe_set)
# Test HGVSGN.p single mutation but there are two different copies of murF gene and so query should investigate both
query_result_test = query_result.hasa('hgvs_gn:NC_011083:murF:p.Ala166Glu')
assert 2 == len(query_result_test)
assert {sample1.id, sample2.id} == set(query_result_test.sample_set)
assert 0 == len(query_result_test.unknown_set)
assert 2 == len(query_result_test.universe_set)
# Test HGVS.c single mutation which, because it's selecting by locus identifier which is unique, should give 1 result
query_result_test = query_result.hasa('hgvs:NC_011083:SEHA_RS01180:c.497C>A')
assert 1 == len(query_result_test)
assert {sample1.id} == set(query_result_test.sample_set)
assert 0 == len(query_result_test.unknown_set)
assert 2 == len(query_result_test.universe_set)
# Test HGVS.p single mutation which, because it's selecting by locus identifier which is unique, should give 1 result
query_result_test = query_result.hasa('hgvs:NC_011083:SEHA_RS01180:p.Ala166Glu')
assert 1 == len(query_result_test)
assert {sample1.id} == set(query_result_test.sample_set)
assert 0 == len(query_result_test.unknown_set)
assert 2 == len(query_result_test.universe_set)
def test_query_single_mutation_no_results_is_empty(loaded_database_connection: DataIndexConnection):
# Test is_empty for something with unknown positions
query_result = query(loaded_database_connection).hasa(QueryFeatureMutationSPDI('reference:1:1:A'))
assert 0 == len(query_result)
assert query_result.is_empty()
assert 9 == len(query_result.universe_set)
assert not query_result.is_empty(include_unknown=True)
# Test is_empty for something without unknown positions
query_result = query(loaded_database_connection).hasa(QueryFeatureMutationSPDI('reference:3000:1:A'))
assert 0 == len(query_result)
assert query_result.is_empty()
assert 9 == len(query_result.universe_set)
assert query_result.is_empty(include_unknown=True)
def test_query_chained_mutation(loaded_database_connection: DataIndexConnection):
db = loaded_database_connection.database
sampleB = db.get_session().query(Sample).filter(Sample.name == 'SampleB').one()
query_result = query(loaded_database_connection).hasa(
QueryFeatureMutationSPDI('reference:839:C:G')).hasa(
QueryFeatureMutationSPDI('reference:5061:G:A'))
assert 1 == len(query_result)
assert {sampleB.id} == set(query_result.sample_set)
assert 9 == len(query_result.universe_set)
def test_query_chained_mutation_has_mutation(loaded_database_connection: DataIndexConnection):
db = loaded_database_connection.database
sampleB = db.get_session().query(Sample).filter(Sample.name == 'SampleB').one()
query_result = query(loaded_database_connection).hasa(
'reference:839:C:G', kind='mutation').hasa(
'reference:5061:G:A', kind='mutation')
assert 1 == len(query_result)
assert {sampleB.id} == set(query_result.sample_set)
assert 9 == len(query_result.universe_set)
def test_query_mlst_allele(loaded_database_connection: DataIndexConnection):
db = loaded_database_connection.database
sample_CFSAN002349 = db.get_session().query(Sample).filter(Sample.name == 'CFSAN002349').one()
sample_CFSAN023463 = db.get_session().query(Sample).filter(Sample.name == 'CFSAN023463').one()
sample_2014D_0067 = db.get_session().query(Sample).filter(Sample.name == '2014D-0067').one()
sample_2014D_0068 = db.get_session().query(Sample).filter(Sample.name == '2014D-0068').one()
sampleA = db.get_session().query(Sample).filter(Sample.name == 'SampleA').one()
sampleB = db.get_session().query(Sample).filter(Sample.name == 'SampleB').one()
sampleC = db.get_session().query(Sample).filter(Sample.name == 'SampleC').one()
# No unknowns
query_result = query(loaded_database_connection).hasa(QueryFeatureMLST('mlst:lmonocytogenes:abcZ:1'))
assert 5 == len(query_result)
assert {sample_CFSAN002349.id, sample_CFSAN023463.id, sampleA.id, sampleB.id, sampleC.id} == set(
query_result.sample_set)
assert 0 == len(query_result.unknown_set)
assert 4 == len(query_result.absent_set)
assert 9 == len(query_result.universe_set)
assert {'CFSAN002349', 'CFSAN023463', 'SampleA', 'SampleB', 'SampleC'} == set(query_result.tolist())
assert {sample_CFSAN002349.id, sample_CFSAN023463.id, sampleA.id, sampleB.id, sampleC.id} == set(
query_result.tolist(names=False))
# With unknown and present
query_result = query(loaded_database_connection).hasa(QueryFeatureMLST('mlst:campylobacter:uncA:6'))
assert 1 == len(query_result)
assert {sample_2014D_0068.id} == set(query_result.sample_set)
assert 1 == len(query_result.unknown_set)
assert {sample_2014D_0067.id} == set(query_result.unknown_set)
assert 7 == len(query_result.absent_set)
assert 9 == len(query_result.universe_set)
# With unknown and absent
query_result = query(loaded_database_connection).hasa(QueryFeatureMLST('mlst:campylobacter:uncA:5'))
assert 0 == len(query_result)
assert 1 == len(query_result.unknown_set)
assert {sample_2014D_0067.id} == set(query_result.unknown_set)
assert 8 == len(query_result.absent_set)
assert 9 == len(query_result.universe_set)
# Direct from string
query_result = query(loaded_database_connection).hasa('mlst:campylobacter:uncA:6')
assert 1 == len(query_result)
assert {sample_2014D_0068.id} == set(query_result.sample_set)
assert 1 == len(query_result.unknown_set)
assert {sample_2014D_0067.id} == set(query_result.unknown_set)
assert 7 == len(query_result.absent_set)
assert 9 == len(query_result.universe_set)
def test_query_chained_mlst_alleles(loaded_database_connection: DataIndexConnection):
db = loaded_database_connection.database
sample1 = db.get_session().query(Sample).filter(Sample.name == 'CFSAN002349').one()
query_result = query(loaded_database_connection).hasa(
QueryFeatureMLST('mlst:lmonocytogenes:abcZ:1')).hasa(
QueryFeatureMLST('mlst:lmonocytogenes:lhkA:4'))
assert 1 == len(query_result)
assert {sample1.id} == set(query_result.sample_set)
assert 9 == len(query_result.universe_set)
def test_query_chained_mlst_alleles_has_allele(loaded_database_connection: DataIndexConnection):
db = loaded_database_connection.database
sample1 = db.get_session().query(Sample).filter(Sample.name == 'CFSAN002349').one()
query_result = query(loaded_database_connection) \
.hasa('mlst:lmonocytogenes:abcZ:1', kind='mlst') \
.hasa('mlst:lmonocytogenes:lhkA:4', kind='mlst')
assert 1 == len(query_result)
assert {sample1.id} == set(query_result.sample_set)
assert 9 == len(query_result.universe_set)
def test_query_chained_mlst_nucleotide(loaded_database_connection: DataIndexConnection):
db = loaded_database_connection.database
sampleA = db.get_session().query(Sample).filter(Sample.name == 'SampleA').one()
sampleB = db.get_session().query(Sample).filter(Sample.name == 'SampleB').one()
sampleC = db.get_session().query(Sample).filter(Sample.name == 'SampleC').one()
all_sample_ids = {i for i, in db.get_session().query(Sample.id).all()}
# Test query mutation then MLST
query_result = query(loaded_database_connection) \
.hasa('reference:839:C:G', kind='mutation') \
.hasa('mlst:lmonocytogenes:cat:12', kind='mlst')
assert 1 == len(query_result)
assert {sampleC.id} == set(query_result.sample_set)
assert 0 == len(query_result.unknown_set)
assert 8 == len(query_result.absent_set)
assert all_sample_ids - {sampleC.id} == set(query_result.absent_set)
assert 9 == len(query_result.universe_set)
assert ['SampleC'] == query_result.tolist()
assert [sampleC.id] == query_result.tolist(names=False)
# Test query MLST then mutation that will be switched to unknown
query_result = query(loaded_database_connection) \
.hasa('mlst:lmonocytogenes:bglA:52', kind='mlst') \
.hasa('reference:3319:1:G', kind='mutation')
assert 0 == len(query_result)
assert 1 == len(query_result.unknown_set)
assert {sampleB.id} == set(query_result.unknown_set)
assert 8 == len(query_result.absent_set)
assert all_sample_ids - {sampleB.id} == set(query_result.absent_set)
assert 9 == len(query_result.universe_set)
# Test query MLST (with unknown allele) then mutation that will be switched to unknown
query_result = query(loaded_database_connection) \
.hasa('mlst:lmonocytogenes:ldh:5', kind='mlst') \
.hasa('reference:1:1:G', kind='mutation')
assert 0 == len(query_result)
assert 3 == len(query_result.unknown_set)
assert {sampleA.id, sampleB.id, sampleC.id} == set(query_result.unknown_set)
assert 6 == len(query_result.absent_set)
assert all_sample_ids - {sampleA.id, sampleB.id, sampleC.id} == set(query_result.absent_set)
assert 9 == len(query_result.universe_set)
# Test the unknown allele of MLST with a mutation that will be switched to unknown
query_result = query(loaded_database_connection) \
.hasa('mlst:lmonocytogenes:ldh:?', kind='mlst') \
.hasa('reference:3319:1:G', kind='mutation')
assert 0 == len(query_result)
assert 1 == len(query_result.unknown_set)
assert {sampleB.id} == set(query_result.unknown_set)
assert 8 == len(query_result.absent_set)
assert all_sample_ids - {sampleB.id} == set(query_result.absent_set)
assert 9 == len(query_result.universe_set)
# Test query MLST (with unknown allele) then mutation (no issues with unknown/found overlap)
query_result = query(loaded_database_connection) \
.hasa('mlst:lmonocytogenes:ldh:5', kind='mlst') \
.hasa('reference:839:C:G', kind='mutation')
print(query_result.toframe(include_unknown=True)[['Sample Name', 'Status']])
assert 1 == len(query_result)
assert {sampleC.id} == set(query_result.sample_set)
assert 1 == len(query_result.unknown_set)
assert {sampleB.id} == set(query_result.unknown_set)
assert 7 == len(query_result.absent_set)
assert all_sample_ids - {sampleB.id, sampleC.id} == set(query_result.absent_set)
assert 9 == len(query_result.universe_set)
# Test query mutation (no issues with unknown/found overlap) then MLST (with unknown allele)
query_result = query(loaded_database_connection) \
.hasa('reference:839:C:G', kind='mutation') \
.hasa('mlst:lmonocytogenes:ldh:5', kind='mlst')
assert 1 == len(query_result)
assert {sampleC.id} == set(query_result.sample_set)
assert 1 == len(query_result.unknown_set)
assert {sampleB.id} == set(query_result.unknown_set)
assert 7 == len(query_result.absent_set)
assert all_sample_ids - {sampleB.id, sampleC.id} == set(query_result.absent_set)
assert 9 == len(query_result.universe_set)
def test_query_single_mutation_dataframe(loaded_database_connection: DataIndexConnection):
db = loaded_database_connection.database
sampleB = db.get_session().query(Sample).filter(Sample.name == 'SampleB').one()
sampleC = db.get_session().query(Sample).filter(Sample.name == 'SampleC').one()
# Case with no unknowns
df = query(loaded_database_connection).hasa('reference:839:C:G', kind='mutation').toframe()
assert 2 == len(df)
assert ['Query', 'Sample Name', 'Sample ID', 'Status'] == df.columns.tolist()
df = df.sort_values(['Sample Name'])
assert ['SampleB', 'SampleC'] == df['Sample Name'].tolist()
assert [sampleB.id, sampleC.id] == df['Sample ID'].tolist()
assert {'reference:839:C:G'} == set(df['Query'].tolist())
assert ['Present', 'Present'] == df['Status'].tolist()
# Case with no unknowns, exclude present
df = query(loaded_database_connection).hasa('reference:839:C:G', kind='mutation').toframe(include_present=False)
assert 0 == len(df)
assert ['Query', 'Sample Name', 'Sample ID', 'Status'] == df.columns.tolist()
# Case with some unknowns
df = query(loaded_database_connection).hasa('reference:5061:G:A', kind='mutation').toframe()
assert 1 == len(df)
assert ['Query', 'Sample Name', 'Sample ID', 'Status'] == df.columns.tolist()
df = df.sort_values(['Sample Name'])
assert ['SampleB'] == df['Sample Name'].tolist()
assert [sampleB.id] == df['Sample ID'].tolist()
assert {'reference:5061:G:A'} == set(df['Query'].tolist())
assert ['Present'] == df['Status'].tolist()
# Case with some unknowns, exclude present
df = query(loaded_database_connection).hasa('reference:5061:G:A', kind='mutation').toframe(include_present=False)
assert 0 == len(df)
assert ['Query', 'Sample Name', 'Sample ID', 'Status'] == df.columns.tolist()
def test_query_single_mutation_dataframe_include_all(loaded_database_connection: DataIndexConnection):
# Case of no unknowns
df = query(loaded_database_connection).hasa(
'reference:839:C:G', kind='mutation').toframe(include_absent=True, include_unknown=True)
assert 9 == len(df)
assert ['Query', 'Sample Name', 'Sample ID', 'Status'] == df.columns.tolist()
df = df.sort_values(['Sample Name'])
assert ['2014C-3598', '2014C-3599', '2014D-0067', '2014D-0068',
'CFSAN002349', 'CFSAN023463',
'SampleA', 'SampleB', 'SampleC'] == df['Sample Name'].tolist()
assert ['Absent', 'Absent', 'Absent', 'Absent',
'Absent', 'Absent',
'Absent', 'Present', 'Present'] == df['Status'].tolist()
assert {'reference:839:C:G'} == set(df['Query'].tolist())
# Case of no unknowns but 'include_unknown' is False
df = query(loaded_database_connection).hasa(
'reference:839:C:G', kind='mutation').toframe(include_absent=True, include_unknown=False)
assert 9 == len(df)
assert ['Query', 'Sample Name', 'Sample ID', 'Status'] == df.columns.tolist()
df = df.sort_values(['Sample Name'])
assert ['2014C-3598', '2014C-3599', '2014D-0067', '2014D-0068',
'CFSAN002349', 'CFSAN023463',
'SampleA', 'SampleB', 'SampleC'] == df['Sample Name'].tolist()
assert ['Absent', 'Absent', 'Absent', 'Absent',
'Absent', 'Absent',
'Absent', 'Present', 'Present'] == df['Status'].tolist()
assert {'reference:839:C:G'} == set(df['Query'].tolist())
# Case of some unknowns
df = query(loaded_database_connection).hasa(
'reference:5061:G:A', kind='mutation').toframe(include_absent=True, include_unknown=True)
assert 9 == len(df)
assert ['Query', 'Sample Name', 'Sample ID', 'Status'] == df.columns.tolist()
df = df.sort_values(['Sample Name'])
assert ['2014C-3598', '2014C-3599', '2014D-0067', '2014D-0068',
'CFSAN002349', 'CFSAN023463',
'SampleA', 'SampleB', 'SampleC'] == df['Sample Name'].tolist()
assert ['Absent', 'Absent', 'Absent', 'Absent',
'Absent', 'Absent',
'Unknown', 'Present', 'Absent'] == df['Status'].tolist()
assert {'reference:5061:G:A'} == set(df['Query'].tolist())
# Case of some unknowns excluding unknowns
df = query(loaded_database_connection).hasa(
'reference:5061:G:A', kind='mutation').toframe(include_absent=True, include_unknown=False)
assert 8 == len(df)
assert ['Query', 'Sample Name', 'Sample ID', 'Status'] == df.columns.tolist()
df = df.sort_values(['Sample Name'])
assert ['2014C-3598', '2014C-3599', '2014D-0067', '2014D-0068',
'CFSAN002349', 'CFSAN023463',
'SampleB', 'SampleC'] == df['Sample Name'].tolist()
assert ['Absent', 'Absent', 'Absent', 'Absent',
'Absent', 'Absent',
'Present', 'Absent'] == df['Status'].tolist()
assert {'reference:5061:G:A'} == set(df['Query'].tolist())
# Case of some unknowns, excluding absent
df = query(loaded_database_connection).hasa(
'reference:5061:G:A', kind='mutation').toframe(include_absent=False, include_unknown=True)
assert 2 == len(df)
assert ['Query', 'Sample Name', 'Sample ID', 'Status'] == df.columns.tolist()
df = df.sort_values(['Sample Name'])
assert ['SampleA', 'SampleB'] == df['Sample Name'].tolist()
assert ['Unknown', 'Present'] == df['Status'].tolist()
assert {'reference:5061:G:A'} == set(df['Query'].tolist())
# Case of some unknowns, only unknowns
df = query(loaded_database_connection).hasa(
'reference:5061:G:A', kind='mutation').toframe(include_present=False,
include_absent=False, include_unknown=True)
assert 1 == len(df)
assert ['Query', 'Sample Name', 'Sample ID', 'Status'] == df.columns.tolist()
df = df.sort_values(['Sample Name'])
assert ['SampleA'] == df['Sample Name'].tolist()
assert ['Unknown'] == df['Status'].tolist()
assert {'reference:5061:G:A'} == set(df['Query'].tolist())
# Case of some unknowns, only absent
df = query(loaded_database_connection).hasa(
'reference:5061:G:A', kind='mutation').toframe(include_present=False, include_absent=True,
include_unknown=False)
assert 7 == len(df)
assert ['Query', 'Sample Name', 'Sample ID', 'Status'] == df.columns.tolist()
df = df.sort_values(['Sample Name'])
assert ['2014C-3598', '2014C-3599', '2014D-0067', '2014D-0068',
'CFSAN002349', 'CFSAN023463',
'SampleC'] == df['Sample Name'].tolist()
assert ['Absent', 'Absent', 'Absent', 'Absent',
'Absent', 'Absent',
'Absent'] == df['Status'].tolist()
assert {'reference:5061:G:A'} == set(df['Query'].tolist())
def test_query_chained_allele_dataframe(loaded_database_connection: DataIndexConnection):
db = loaded_database_connection.database
sample1 = db.get_session().query(Sample).filter(Sample.name == 'CFSAN002349').one()
df = query(loaded_database_connection) \
.hasa('mlst:lmonocytogenes:abcZ:1', kind='mlst') \
.hasa('mlst:lmonocytogenes:lhkA:4', kind='mlst').toframe()
assert 1 == len(df)
assert ['Query', 'Sample Name', 'Sample ID', 'Status'] == df.columns.tolist()
df = df.sort_values(['Sample Name'])
assert ['CFSAN002349'] == df['Sample Name'].tolist()
assert [sample1.id] == df['Sample ID'].tolist()
assert {'mlst:lmonocytogenes:abcZ:1 AND mlst:lmonocytogenes:lhkA:4'} == set(df['Query'].tolist())
def test_query_single_mutation_no_results_toframe(loaded_database_connection: DataIndexConnection):
df = query(loaded_database_connection).hasa('reference:1:1:A', kind='mutation').toframe()
assert 0 == len(df)
assert ['Query', 'Sample Name', 'Sample ID', 'Status'] == df.columns.tolist()
def test_query_single_mutation_all_unknown_summary(loaded_database_connection: DataIndexConnection):
df = query(loaded_database_connection).hasa('reference:1:1:A', kind='mutation').summary()
assert 1 == len(df)
assert ['Query', 'Present', 'Absent', 'Unknown', 'Total',
'% Present', '% Absent', '% Unknown'] == df.columns.tolist()
assert 'reference:1:1:A' == df.iloc[0]['Query']
assert 0 == df.iloc[0]['Present']
assert 6 == df.iloc[0]['Absent']
assert 3 == df.iloc[0]['Unknown']
assert 9 == df.iloc[0]['Total']
assert math.isclose((0 / 9) * 100, df.iloc[0]['% Present'])
assert math.isclose((6 / 9) * 100, df.iloc[0]['% Absent'])
assert math.isclose((3 / 9) * 100, df.iloc[0]['% Unknown'])
def test_query_single_mutation_all_absent_summary(loaded_database_connection: DataIndexConnection):
df = query(loaded_database_connection).hasa('reference:3000:1:A', kind='mutation').summary()
assert 1 == len(df)
assert ['Query', 'Present', 'Absent', 'Unknown', 'Total',
'% Present', '% Absent', '% Unknown'] == df.columns.tolist()
assert 'reference:3000:1:A' == df.iloc[0]['Query']
assert 0 == df.iloc[0]['Present']
assert 9 == df.iloc[0]['Absent']
assert 0 == df.iloc[0]['Unknown']
assert 9 == df.iloc[0]['Total']
assert math.isclose((0 / 9) * 100, df.iloc[0]['% Present'])
assert math.isclose((9 / 9) * 100, df.iloc[0]['% Absent'])
assert math.isclose((0 / 9) * 100, df.iloc[0]['% Unknown'])
def test_all_samples_summary(loaded_database_connection: DataIndexConnection):
df = query(loaded_database_connection).summary()
assert 1 == len(df)
assert ['Query', 'Present', 'Absent', 'Unknown', 'Total',
'% Present', '% Absent', '% Unknown'] == df.columns.tolist()
assert '' == df.iloc[0]['Query']
assert 9 == df.iloc[0]['Present']
assert 0 == df.iloc[0]['Absent']
assert 0 == df.iloc[0]['Unknown']
assert 9 == df.iloc[0]['Total']
assert math.isclose((9 / 9) * 100, df.iloc[0]['% Present'])
assert math.isclose((0 / 9) * 100, df.iloc[0]['% Absent'])
assert math.isclose((0 / 9) * 100, df.iloc[0]['% Unknown'])
def test_join_custom_dataframe_no_query(loaded_database_connection: DataIndexConnection):
db = loaded_database_connection.database
sampleA = db.get_session().query(Sample).filter(Sample.name == 'SampleA').one()
sampleB = db.get_session().query(Sample).filter(Sample.name == 'SampleB').one()
sampleC = db.get_session().query(Sample).filter(Sample.name == 'SampleC').one()
df = pd.DataFrame([
[sampleA.id, 'red'],
[sampleB.id, 'green'],
[sampleC.id, 'blue']
], columns=['Sample ID', 'Color'])
query_result = query(loaded_database_connection, universe='dataframe',
data_frame=df, sample_ids_column='Sample ID')
assert 3 == len(query_result)
assert 3 == len(query_result.universe_set)
assert {sampleA.id, sampleB.id, sampleC.id} == set(query_result.sample_set)
assert {'dataframe(ids_col=[Sample ID])'} == set(query_result.toframe()['Query'].tolist())
def test_query_custom_dataframe_isin_samples(loaded_database_connection: DataIndexConnection):
db = loaded_database_connection.database
sampleA = db.get_session().query(Sample).filter(Sample.name == 'SampleA').one()
sampleB = db.get_session().query(Sample).filter(Sample.name == 'SampleB').one()
sampleC = db.get_session().query(Sample).filter(Sample.name == 'SampleC').one()
df = pd.DataFrame([
[sampleA.id, 'red'],
[sampleB.id, 'green'],
[sampleC.id, 'blue']
], columns=['Sample ID', 'Color'])
query_result = query(loaded_database_connection, universe='dataframe',
data_frame=df, sample_ids_column='Sample ID')
query_result = query_result.isin(['SampleA', 'SampleC'])
assert 2 == len(query_result)
assert 3 == len(query_result.universe_set)
assert {sampleA.id, sampleC.id} == set(query_result.sample_set)
assert {"dataframe(ids_col=[Sample ID]) AND isin_samples(['SampleA', 'SampleC'])"} == set(
query_result.toframe()['Query'].tolist())
def test_query_custom_dataframe_isin_kmer_distance(loaded_database_connection: DataIndexConnection):
db = loaded_database_connection.database
sampleA = db.get_session().query(Sample).filter(Sample.name == 'SampleA').one()
sampleB = db.get_session().query(Sample).filter(Sample.name == 'SampleB').one()
sampleC = db.get_session().query(Sample).filter(Sample.name == 'SampleC').one()
df = pd.DataFrame([
[sampleA.id, 'red'],
[sampleB.id, 'blue'],
[sampleC.id, 'blue']
], columns=['Sample ID', 'Color'])
main_query = query(loaded_database_connection, universe='dataframe',
data_frame=df, sample_ids_column='Sample ID')
# Test isin with sample name
query_result = main_query.isin('SampleA', kind='distance', distance=0.5,
units='kmer_jaccard')
assert 2 == len(query_result)
assert 3 == len(query_result.universe_set)
assert {sampleA.id, sampleC.id} == set(query_result.sample_set)
# Test isin with sample set
query_result_A = main_query.isa('SampleA', kind='sample')
query_result = main_query.isin(query_result_A, kind='distance', distance=0.5,
units='kmer_jaccard')
assert 2 == len(query_result)
assert 3 == len(query_result.universe_set)
assert {sampleA.id, sampleC.id} == set(query_result.sample_set)
# Test query with series expression
query_result = query_result.isin(df['Color'] == 'blue', kind='dataframe')
assert 1 == len(query_result)
assert 3 == len(query_result.universe_set)
assert {sampleC.id} == set(query_result.sample_set)
def test_query_custom_dataframe_kmer_to_distances(loaded_database_connection: DataIndexConnection):
db = loaded_database_connection.database
sampleA = db.get_session().query(Sample).filter(Sample.name == 'SampleA').one()
sampleB = db.get_session().query(Sample).filter(Sample.name == 'SampleB').one()
sampleC = db.get_session().query(Sample).filter(Sample.name == 'SampleC').one()
df = pd.DataFrame([
[sampleA.id, 'red'],
[sampleB.id, 'blue'],
[sampleC.id, 'blue']
], columns=['Sample ID', 'Color'])
query_result = query(loaded_database_connection, universe='dataframe',
data_frame=df, sample_ids_column='Sample ID')
query_result = query_result.isin('SampleA', kind='distance', distance=0.5,
units='kmer_jaccard')
assert {sampleA.id, sampleC.id} == set(query_result.sample_set)
results_d, labels = query_result.to_distances(kind='kmer')
assert (2, 2) == results_d.shape
assert {'SampleA', 'SampleC'} == set(labels)
l = {element: idx for idx, element in enumerate(labels)}
assert math.isclose(results_d[l['SampleA']][l['SampleA']], 0, rel_tol=1e-3)
assert math.isclose(results_d[l['SampleA']][l['SampleC']], 0.5, rel_tol=1e-3)
assert math.isclose(results_d[l['SampleC']][l['SampleA']], 0.5, rel_tol=1e-3)
assert math.isclose(results_d[l['SampleC']][l['SampleC']], 0, rel_tol=1e-3)
def test_join_custom_dataframe_single_query(loaded_database_connection: DataIndexConnection):
db = loaded_database_connection.database
sampleA = db.get_session().query(Sample).filter(Sample.name == 'SampleA').one()
sampleB = db.get_session().query(Sample).filter(Sample.name == 'SampleB').one()
sampleC = db.get_session().query(Sample).filter(Sample.name == 'SampleC').one()
df = pd.DataFrame([
[sampleA.id, 'red'],
[sampleB.id, 'green'],
[sampleC.id, 'blue']
], columns=['Sample ID', 'Color'])
query_result = query(loaded_database_connection,
universe='dataframe',
data_frame=df,
sample_ids_column='Sample ID')
assert 3 == len(query_result)
assert 3 == len(query_result.universe_set)
query_result = query_result.hasa('reference:839:C:G', kind='mutation')
assert 2 == len(query_result)
assert 3 == len(query_result.universe_set)
assert {sampleB.id, sampleC.id} == set(query_result.sample_set)
df = query_result.toframe()
assert 2 == len(df)
assert {'Query', 'Sample Name', 'Sample ID', 'Status', 'Color'} == set(df.columns.tolist())
df = df.sort_values(['Sample Name'])
assert ['SampleB', 'SampleC'] == df['Sample Name'].tolist()
assert [sampleB.id, sampleC.id] == df['Sample ID'].tolist()
assert ['green', 'blue'] == df['Color'].tolist()
assert {'dataframe(ids_col=[Sample ID]) AND reference:839:C:G'} == set(df['Query'].tolist())
def test_join_custom_dataframe_query_reset_universe(loaded_database_connection: DataIndexConnection):
db = loaded_database_connection.database
sampleA = db.get_session().query(Sample).filter(Sample.name == 'SampleA').one()
sampleB = db.get_session().query(Sample).filter(Sample.name == 'SampleB').one()
sampleC = db.get_session().query(Sample).filter(Sample.name == 'SampleC').one()
df = pd.DataFrame([
[sampleA.id, 'red'],
[sampleB.id, 'green'],
[sampleC.id, 'blue']
], columns=['Sample ID', 'Color'])
query_result = query(loaded_database_connection,
universe='dataframe',
data_frame=df,
sample_ids_column='Sample ID')
query_result = query_result.hasa('reference:839:C:G', kind='mutation')
assert 2 == len(query_result)
assert 3 == len(query_result.universe_set)
assert {sampleB.id, sampleC.id} == set(query_result.sample_set)
query_result = query_result.reset_universe()
assert 2 == len(query_result)
assert 2 == len(query_result.universe_set)
def test_join_custom_dataframe_single_query_sample_names_with_unknowns(loaded_database_connection: DataIndexConnection):
db = loaded_database_connection.database
sampleA = db.get_session().query(Sample).filter(Sample.name == 'SampleA').one()
sampleB = db.get_session().query(Sample).filter(Sample.name == 'SampleB').one()
sampleC = db.get_session().query(Sample).filter(Sample.name == 'SampleC').one()
df = pd.DataFrame([
['SampleA', 'red'],
['SampleB', 'green'],
['SampleC', 'blue']
], columns=['Samples', 'Color'])
query_result = query(loaded_database_connection,
universe='dataframe',
data_frame=df,
sample_names_column='Samples')
assert 3 == len(query_result)
assert 3 == len(query_result.universe_set)
# No unknowns
query_result_test = query_result.hasa('reference:839:C:G', kind='mutation')
assert 2 == len(query_result_test)
assert 0 == len(query_result_test.unknown_set)
assert 3 == len(query_result_test.universe_set)
assert {sampleB.id, sampleC.id} == set(query_result_test.sample_set)
df = query_result_test.toframe()
assert 2 == len(df)
assert ['Query', 'Sample Name', 'Sample ID', 'Status', 'Samples', 'Color'] == df.columns.tolist()
df = df.sort_values(['Sample Name'])
assert ['SampleB', 'SampleC'] == df['Sample Name'].tolist()
assert [sampleB.id, sampleC.id] == df['Sample ID'].tolist()
assert ['green', 'blue'] == df['Color'].tolist()
assert {'dataframe(names_col=[Samples]) AND reference:839:C:G'} == set(df['Query'].tolist())
# With one unknown and one present
query_result_test = query_result.hasa('reference:5061:G:A', kind='mutation')
assert 1 == len(query_result_test)
assert 1 == len(query_result_test.unknown_set)
assert 3 == len(query_result_test.universe_set)
assert {sampleA.id} == set(query_result_test.unknown_set)
assert {sampleB.id} == set(query_result_test.sample_set)
df = query_result_test.toframe(include_unknown=True)
assert 2 == len(df)
assert ['Query', 'Sample Name', 'Sample ID', 'Status', 'Samples', 'Color'] == df.columns.tolist()
df = df.sort_values(['Sample Name'])
assert ['SampleA', 'SampleB'] == df['Sample Name'].tolist()
assert ['Unknown', 'Present'] == df['Status'].tolist()
assert [sampleA.id, sampleB.id] == df['Sample ID'].tolist()
assert ['red', 'green'] == df['Color'].tolist()
assert {'dataframe(names_col=[Samples]) AND reference:5061:G:A'} == set(df['Query'].tolist())
# With all unknown
query_result_test = query_result.hasa('reference:1:1:T', kind='mutation')
assert 0 == len(query_result_test)
assert 3 == len(query_result_test.unknown_set)
assert 3 == len(query_result_test.universe_set)
assert {sampleA.id, sampleB.id, sampleC.id} == set(query_result_test.unknown_set)
df = query_result_test.toframe(include_unknown=True)
assert 3 == len(df)
assert ['Query', 'Sample Name', 'Sample ID', 'Status', 'Samples', 'Color'] == df.columns.tolist()
df = df.sort_values(['Sample Name'])
assert ['SampleA', 'SampleB', 'SampleC'] == df['Sample Name'].tolist()
assert ['Unknown', 'Unknown', 'Unknown'] == df['Status'].tolist()
assert [sampleA.id, sampleB.id, sampleC.id] == df['Sample ID'].tolist()
assert ['red', 'green', 'blue'] == df['Color'].tolist()
assert {'dataframe(names_col=[Samples]) AND reference:1:1:T'} == set(df['Query'].tolist())
def test_join_custom_dataframe_extra_sample_names(loaded_database_connection: DataIndexConnection):
db = loaded_database_connection.database
sampleA = db.get_session().query(Sample).filter(Sample.name == 'SampleA').one()
sampleB = db.get_session().query(Sample).filter(Sample.name == 'SampleB').one()
sampleC = db.get_session().query(Sample).filter(Sample.name == 'SampleC').one()
df = pd.DataFrame([
['SampleA', 'red'],
['SampleB', 'green'],
['SampleC', 'blue'],
['Extra', 'purple']
], columns=['Samples', 'Color'])
query_result = query(loaded_database_connection,
universe='dataframe',
data_frame=df,
sample_names_column='Samples')
assert 3 == len(query_result)
assert 3 == len(query_result.universe_set)
# No unknowns
query_result_test = query_result.hasa('reference:839:C:G', kind='mutation')
assert 2 == len(query_result_test)
assert 0 == len(query_result_test.unknown_set)
assert 1 == len(query_result_test.absent_set)
assert 3 == len(query_result_test.universe_set)
df = query_result_test.toframe(include_unknown=True)
assert 2 == len(df)
assert ['Query', 'Sample Name', 'Sample ID', 'Status', 'Samples', 'Color'] == df.columns.tolist()
df = df.sort_values(['Sample Name'])
assert ['SampleB', 'SampleC'] == df['Sample Name'].tolist()
assert ['Present', 'Present'] == df['Status'].tolist()
assert [sampleB.id, sampleC.id] == df['Sample ID'].tolist()
assert ['green', 'blue'] == df['Color'].tolist()
assert {'dataframe(names_col=[Samples]) AND reference:839:C:G'} == set(df['Query'].tolist())
# With unknowns
query_result_test = query_result.hasa('reference:5061:G:A', kind='mutation')
assert 1 == len(query_result_test)
assert 1 == len(query_result_test.unknown_set)
assert 1 == len(query_result_test.absent_set)
assert 3 == len(query_result_test.universe_set)
df = query_result_test.toframe(include_unknown=True)
assert 2 == len(df)
assert ['Query', 'Sample Name', 'Sample ID', 'Status', 'Samples', 'Color'] == df.columns.tolist()
df = df.sort_values(['Sample Name'])
assert ['SampleA', 'SampleB'] == df['Sample Name'].tolist()
assert ['Unknown', 'Present'] == df['Status'].tolist()
assert [sampleA.id, sampleB.id] == df['Sample ID'].tolist()
assert ['red', 'green'] == df['Color'].tolist()
assert {'dataframe(names_col=[Samples]) AND reference:5061:G:A'} == set(df['Query'].tolist())
def test_join_custom_dataframe_missing_sample_names(loaded_database_connection: DataIndexConnection):
db = loaded_database_connection.database
sampleA = db.get_session().query(Sample).filter(Sample.name == 'SampleA').one()
sampleC = db.get_session().query(Sample).filter(Sample.name == 'SampleC').one()
df = pd.DataFrame([
['SampleA', 'red'],
['SampleC', 'blue'],
], columns=['Samples', 'Color'])
query_result = query(loaded_database_connection,
universe='dataframe',
data_frame=df,
sample_names_column='Samples')
assert 2 == len(df)
assert 2 == len(query_result.universe_set)
# No unknowns
query_result_test = query_result.hasa('reference:839:C:G', kind='mutation')
assert 2 == len(query_result_test.universe_set)
assert 0 == len(query_result_test.unknown_set)
assert 1 == len(query_result_test.absent_set)
assert 1 == len(query_result_test)
df = query_result_test.toframe(include_unknown=True).sort_values(['Sample Name'])
assert ['Query', 'Sample Name', 'Sample ID', 'Status', 'Samples', 'Color'] == df.columns.tolist()
assert 1 == len(df)
assert ['SampleC'] == df['Sample Name'].tolist()
assert ['Present'] == df['Status'].tolist()
assert [sampleC.id] == df['Sample ID'].tolist()
assert ['blue'] == df['Color'].tolist()
assert {'dataframe(names_col=[Samples]) AND reference:839:C:G'} == set(df['Query'].tolist())
# One unknown, one present but it's missing from dataframe
query_result_test = query_result.hasa('reference:5061:G:A', kind='mutation')
assert 2 == len(query_result_test.universe_set)
assert 1 == len(query_result_test.unknown_set)
assert 1 == len(query_result_test.absent_set)
assert 0 == len(query_result_test)
df = query_result_test.toframe(include_unknown=True).sort_values(['Sample Name'])
assert ['Query', 'Sample Name', 'Sample ID', 'Status', 'Samples', 'Color'] == df.columns.tolist()
assert 1 == len(df)
assert ['SampleA'] == df['Sample Name'].tolist()
assert ['Unknown'] == df['Status'].tolist()
assert [sampleA.id] == df['Sample ID'].tolist()
assert ['red'] == df['Color'].tolist()
assert {'dataframe(names_col=[Samples]) AND reference:5061:G:A'} == set(df['Query'].tolist())
def test_query_then_join_dataframe_single_query(loaded_database_connection: DataIndexConnection):
db = loaded_database_connection.database
sampleA = db.get_session().query(Sample).filter(Sample.name == 'SampleA').one()
sampleB = db.get_session().query(Sample).filter(Sample.name == 'SampleB').one()
sampleC = db.get_session().query(Sample).filter(Sample.name == 'SampleC').one()
metadata_df = pd.DataFrame([
[sampleA.id, 'red'],
[sampleB.id, 'green'],
[sampleC.id, 'blue']
], columns=['Sample ID', 'Color'])
query_result = query(loaded_database_connection)
assert 9 == len(query_result)
assert 9 == len(query_result.universe_set)
# Case: no unknowns
query_result_test = query_result.hasa('reference:839:C:G', kind='mutation')
assert 2 == len(query_result_test)
assert 0 == len(query_result_test.unknown_set)
assert 7 == len(query_result_test.absent_set)
assert 9 == len(query_result_test.universe_set)
assert {sampleB.id, sampleC.id} == set(query_result_test.sample_set)
df = query_result_test.toframe()
assert 2 == len(df)
assert ['Query', 'Sample Name', 'Sample ID', 'Status'] == df.columns.tolist()
assert {'reference:839:C:G'} == set(df['Query'].tolist())
# Now join data frame
query_result_test = query_result_test.join(data_frame=metadata_df, sample_ids_column='Sample ID')
assert 2 == len(query_result_test)
assert 3 == len(query_result_test.universe_set)
df = query_result_test.toframe(include_unknown=True)
assert 2 == len(df)
assert ['Query', 'Sample Name', 'Sample ID', 'Status', 'Color'] == df.columns.tolist()
assert {'reference:839:C:G AND dataframe(ids_col=[Sample ID])'} == set(df['Query'].tolist())
df = df.sort_values(['Sample Name'])
assert ['SampleB', 'SampleC'] == df['Sample Name'].tolist()
assert ['Present', 'Present'] == df['Status'].tolist()
assert [sampleB.id, sampleC.id] == df['Sample ID'].tolist()
assert ['green', 'blue'] == df['Color'].tolist()
# Case: some unknowns
query_result_test = query_result.hasa('reference:5061:G:A', kind='mutation')
assert 1 == len(query_result_test)
assert 1 == len(query_result_test.unknown_set)
assert 7 == len(query_result_test.absent_set)
assert 9 == len(query_result_test.universe_set)
df = query_result_test.toframe(include_unknown=True)
assert 2 == len(df)
assert ['Query', 'Sample Name', 'Sample ID', 'Status'] == df.columns.tolist()
assert {'reference:5061:G:A'} == set(df['Query'].tolist())
# Now join data frame
query_result_test = query_result_test.join(data_frame=metadata_df, sample_ids_column='Sample ID')
assert 1 == len(query_result_test)
assert 1 == len(query_result_test.unknown_set)
assert 1 == len(query_result_test.absent_set)
assert 3 == len(query_result_test.universe_set)
df = query_result_test.toframe(include_unknown=True)
assert 2 == len(df)
assert ['Query', 'Sample Name', 'Sample ID', 'Status', 'Color'] == df.columns.tolist()
assert {'reference:5061:G:A AND dataframe(ids_col=[Sample ID])'} == set(df['Query'].tolist())
df = df.sort_values(['Sample Name'])
assert ['SampleA', 'SampleB'] == df['Sample Name'].tolist()
assert ['Unknown', 'Present'] == df['Status'].tolist()
assert [sampleA.id, sampleB.id] == df['Sample ID'].tolist()
assert ['red', 'green'] == df['Color'].tolist()
def test_query_join_dataframe_isa_dataframe_column(loaded_database_connection: DataIndexConnection):
db = loaded_database_connection.database
sampleA = db.get_session().query(Sample).filter(Sample.name == 'SampleA').one()
sampleB = db.get_session().query(Sample).filter(Sample.name == 'SampleB').one()
sampleC = db.get_session().query(Sample).filter(Sample.name == 'SampleC').one()
metadata_df = pd.DataFrame([
[sampleA.id, 'red'],
[sampleB.id, 'green'],
[sampleC.id, 'blue']
], columns=['Sample ID', 'Color'])
query_result = query(loaded_database_connection).join(data_frame=metadata_df, sample_ids_column='Sample ID')
assert 3 == len(query_result)
assert 3 == len(query_result.universe_set)
# By default, isa should select by 'sample'
sub_result = query_result.isa('SampleB')
assert 1 == len(sub_result)
assert 3 == len(sub_result.universe_set)
assert {sampleB.id} == set(sub_result.sample_set)
df = sub_result.toframe()
assert ['SampleB'] == df['Sample Name'].tolist()
assert {"dataframe(ids_col=[Sample ID]) AND isa_sample('SampleB')"} == set(df['Query'].tolist())
# Make sure isa also works when we pass the kind
sub_result = query_result.isa('SampleB', kind='sample')
assert 1 == len(sub_result)
assert 3 == len(sub_result.universe_set)
assert {sampleB.id} == set(sub_result.sample_set)
df = sub_result.toframe()
assert ['SampleB'] == df['Sample Name'].tolist()
assert {"dataframe(ids_col=[Sample ID]) AND isa_sample('SampleB')"} == set(df['Query'].tolist())
# If we explicitly pass kind='dataframe' should select by column in dataframe
sub_result = query_result.isa('red', isa_column='Color', kind='dataframe')
assert 1 == len(sub_result)
assert 3 == len(sub_result.universe_set)
assert {sampleA.id} == set(sub_result.sample_set)
df = sub_result.toframe()
assert ['SampleA'] == df['Sample Name'].tolist()
assert {"dataframe(ids_col=[Sample ID]) AND isa('Color' is 'red')"} == set(df['Query'].tolist())
# If we pass default_isa_kind when joining, should be able to override defaults
query_result = query(loaded_database_connection).join(
data_frame=metadata_df, sample_ids_column='Sample ID', default_isa_kind='dataframe',
default_isa_column='Color')
sub_result = query_result.isa('red')
assert 1 == len(sub_result)
assert 3 == len(sub_result.universe_set)
assert {sampleA.id} == set(sub_result.sample_set)
df = sub_result.toframe()
assert ['SampleA'] == df['Sample Name'].tolist()
assert {"dataframe(ids_col=[Sample ID]) AND isa('Color' is 'red')"} == set(df['Query'].tolist())
# Setting regex=True should let me pass regexes
sub_result = query_result.isa(r'^re', regex=True)
assert 1 == len(sub_result)
assert 3 == len(sub_result.universe_set)
assert {sampleA.id} == set(sub_result.sample_set)
df = sub_result.toframe()
assert ['SampleA'] == df['Sample Name'].tolist()
assert {"dataframe(ids_col=[Sample ID]) AND isa('Color' contains '^re')"} == set(df['Query'].tolist())
# Nothing should match this below regex
sub_result = query_result.isa(r'^ed', regex=True)
assert 0 == len(sub_result)
assert 3 == len(sub_result.universe_set)
def test_query_with_unknown_join_dataframe_isa_dataframe_column(loaded_database_connection: DataIndexConnection):
db = loaded_database_connection.database
sampleA = db.get_session().query(Sample).filter(Sample.name == 'SampleA').one()
sampleB = db.get_session().query(Sample).filter(Sample.name == 'SampleB').one()
sampleC = db.get_session().query(Sample).filter(Sample.name == 'SampleC').one()
metadata_df = pd.DataFrame([
[sampleA.id, 'red', 'small'],
[sampleB.id, 'red', 'big'],
[sampleC.id, 'blue', 'big']
], columns=['Sample ID', 'Color', 'Size'])
query_result = query(loaded_database_connection).hasa('reference:5061:G:A').join(data_frame=metadata_df,
sample_ids_column='Sample ID')
assert 1 == len(query_result)
assert 1 == len(query_result.unknown_set)
assert 1 == len(query_result.absent_set)
assert 3 == len(query_result.universe_set)
# By default, isa should select by 'sample'
sub_result = query_result.isa('SampleB')
assert 1 == len(sub_result)
assert 0 == len(sub_result.unknown_set)
assert 2 == len(sub_result.absent_set)
assert 3 == len(sub_result.universe_set)
assert {sampleB.id} == set(sub_result.sample_set)
# isa by sample where sample is unknown
sub_result = query_result.isa('SampleA')
assert 0 == len(sub_result)
assert 1 == len(sub_result.unknown_set)
assert 2 == len(sub_result.absent_set)
assert 3 == len(sub_result.universe_set)
assert {sampleA.id} == set(sub_result.unknown_set)
# If we explicitly pass kind='dataframe' should select by column in dataframe
sub_result = query_result.isa('red', isa_column='Color', kind='dataframe')
assert 1 == len(sub_result)
assert 1 == len(sub_result.unknown_set)
assert 1 == len(sub_result.absent_set)
assert 3 == len(sub_result.universe_set)
assert {sampleA.id} == set(sub_result.unknown_set)
assert {sampleB.id} == set(sub_result.sample_set)
# isa where we exclude unknown
sub_result = query_result.isa('big', isa_column='Size', kind='dataframe')
assert 1 == len(sub_result)
assert 0 == len(sub_result.unknown_set)
assert 2 == len(sub_result.absent_set)
assert 3 == len(sub_result.universe_set)
assert {sampleB.id} == set(sub_result.sample_set)
def test_query_join_dataframe_isin_dataframe_column(loaded_database_connection: DataIndexConnection):
db = loaded_database_connection.database
sampleA = db.get_session().query(Sample).filter(Sample.name == 'SampleA').one()
sampleB = db.get_session().query(Sample).filter(Sample.name == 'SampleB').one()
sampleC = db.get_session().query(Sample).filter(Sample.name == 'SampleC').one()
metadata_df = pd.DataFrame([
[sampleA.id, 'red'],
[sampleB.id, 'green'],
[sampleC.id, 'blue']
], columns=['Sample ID', 'Color'])
query_result = query(loaded_database_connection).join(data_frame=metadata_df, sample_ids_column='Sample ID')
assert not query_result.has_tree()
assert 3 == len(query_result)
assert 3 == len(query_result.universe_set)
query_result = query_result.isin(metadata_df['Color'] == 'red', kind='dataframe')
assert 1 == len(query_result)
assert 3 == len(query_result.universe_set)
df = query_result.toframe()
assert ['SampleA'] == df['Sample Name'].tolist()
assert {'dataframe(ids_col=[Sample ID]) AND isin(subset from series)'} == set(df['Query'].tolist())
def test_query_join_dataframe_isin_dataframe_column_select_two_samples(loaded_database_connection: DataIndexConnection):
db = loaded_database_connection.database
sampleA = db.get_session().query(Sample).filter(Sample.name == 'SampleA').one()
sampleB = db.get_session().query(Sample).filter(Sample.name == 'SampleB').one()
sampleC = db.get_session().query(Sample).filter(Sample.name == 'SampleC').one()
metadata_df = pd.DataFrame([
[sampleA.id, 'red'],
[sampleB.id, 'red'],
[sampleC.id, 'blue']
], columns=['Sample ID', 'Color'])
query_result = query(loaded_database_connection).join(data_frame=metadata_df, sample_ids_column='Sample ID')
assert 3 == len(query_result)
assert 3 == len(query_result.universe_set)
query_result = query_result.isin(metadata_df['Color'] == 'red', kind='dataframe')
assert 2 == len(query_result)
assert 3 == len(query_result.universe_set)
df = query_result.toframe()
df = df.sort_values(['Sample Name'])
assert ['SampleA', 'SampleB'] == df['Sample Name'].tolist()
assert {'dataframe(ids_col=[Sample ID]) AND isin(subset from series)'} == set(df['Query'].tolist())
def test_query_join_dataframe_isin_dataframe_column_invalid_series_index(
loaded_database_connection: DataIndexConnection):
db = loaded_database_connection.database
sampleA = db.get_session().query(Sample).filter(Sample.name == 'SampleA').one()
sampleB = db.get_session().query(Sample).filter(Sample.name == 'SampleB').one()
sampleC = db.get_session().query(Sample).filter(Sample.name == 'SampleC').one()
metadata_df = pd.DataFrame([
[sampleA.id, 'red'],
[sampleB.id, 'green'],
[sampleC.id, 'blue']
], columns=['Sample ID', 'Color'])
invalid_series_select = pd.Series([True, True], dtype=bool)
query_result = query(loaded_database_connection).join(data_frame=metadata_df, sample_ids_column='Sample ID')
with pytest.raises(Exception) as execinfo:
query_result.isin(data=invalid_series_select, kind='dataframe')
assert 'does not have same index as internal data frame' in str(execinfo.value)
def test_query_and_build_mutation_tree(loaded_database_connection: DataIndexConnection):
query_result = query(loaded_database_connection).hasa('reference:839:C:G', kind='mutation')
assert 2 == len(query_result)
assert 9 == len(query_result.universe_set)
query_result = query_result.build_tree(kind='mutation', scope='genome', include_reference=True)
assert 2 == len(query_result)
assert 9 == len(query_result.universe_set)
assert isinstance(query_result, MutationTreeSamplesQuery)
query_result = cast(MutationTreeSamplesQuery, query_result)
assert query_result.reference_included
assert 'genome' == query_result.reference_name
assert query_result.tree is not None
assert {'SampleB', 'SampleC', 'genome'} == set(query_result.tree.get_leaf_names())
def test_build_mutation_tree_include_reference(loaded_database_connection: DataIndexConnection):
query_result = query(loaded_database_connection)
assert 9 == len(query_result)
assert 9 == len(query_result.universe_set)
query_result = query_result.build_tree(kind='mutation', scope='genome', include_reference=True)
assert 3 == len(query_result)
assert 9 == len(query_result.universe_set)
assert isinstance(query_result, MutationTreeSamplesQuery)
query_result = cast(MutationTreeSamplesQuery, query_result)
assert query_result.reference_included
assert 'genome' == query_result.reference_name
assert query_result.tree is not None
assert {'SampleA', 'SampleB', 'SampleC', 'genome'} == set(query_result.tree.get_leaf_names())
def test_build_mutation_tree_no_include_reference(loaded_database_connection: DataIndexConnection):
query_result = query(loaded_database_connection)
assert 9 == len(query_result)
assert 9 == len(query_result.universe_set)
query_result = query_result.build_tree(kind='mutation', scope='genome', include_reference=False)
assert 3 == len(query_result)
assert 9 == len(query_result.universe_set)
assert isinstance(query_result, MutationTreeSamplesQuery)
query_result = cast(MutationTreeSamplesQuery, query_result)
assert not query_result.reference_included
assert 'genome' == query_result.reference_name
assert query_result.tree is not None
assert {'SampleA', 'SampleB', 'SampleC'} == set(query_result.tree.get_leaf_names())
def test_query_build_tree_and_query(loaded_database_connection: DataIndexConnection):
query_result = query(loaded_database_connection).hasa('reference:839:C:G', kind='mutation')
assert 2 == len(query_result)
assert 9 == len(query_result.universe_set)
query_result = query_result.build_tree(kind='mutation', scope='genome', include_reference=True)
assert 2 == len(query_result)
assert 9 == len(query_result.universe_set)
query_result = query_result.hasa('reference:5061:G:A', kind='mutation')
assert 1 == len(query_result)
assert 9 == len(query_result.universe_set)
# Tree should still be complete
assert {'SampleB', 'SampleC', 'genome'} == set(query_result.tree.get_leaf_names())
def test_query_build_tree_and_within_kmer(loaded_database_connection: DataIndexConnection):
db = loaded_database_connection.database
sampleB = db.get_session().query(Sample).filter(Sample.name == 'SampleB').one()
sampleC = db.get_session().query(Sample).filter(Sample.name == 'SampleC').one()
query_result = query(loaded_database_connection).hasa('reference:839:C:G', kind='mutation') \
.build_tree(kind='mutation', scope='genome', include_reference=True)
assert 2 == len(query_result)
assert 9 == len(query_result.universe_set)
assert {sampleB.id, sampleC.id} == set(query_result.sample_set)
assert query_result.has_tree()
query_result = query_result.within('SampleA', distance=0.5,
units='kmer_jaccard')
assert 1 == len(query_result)
assert 9 == len(query_result.universe_set)
assert {sampleC.id} == set(query_result.sample_set)
def test_query_build_tree_dataframe(loaded_database_connection: DataIndexConnection):
db = loaded_database_connection.database
sampleB = db.get_session().query(Sample).filter(Sample.name == 'SampleB').one()
df = query(loaded_database_connection).hasa(
'reference:839:C:G', kind='mutation').build_tree(
kind='mutation', scope='genome', include_reference=True).hasa(
'reference:5061:G:A', kind='mutation').toframe()
assert 1 == len(df)
assert ['Query', 'Sample Name', 'Sample ID', 'Status'] == df.columns.tolist()
assert ['SampleB'] == df['Sample Name'].tolist()
assert ['Present'] == df['Status'].tolist()
assert [sampleB.id] == df['Sample ID'].tolist()
assert ['reference:839:C:G AND mutation_tree(genome) AND reference:5061:G:A'] == df['Query'].tolist()
def test_query_then_build_tree_then_join_dataframe(loaded_database_connection: DataIndexConnection):
db = loaded_database_connection.database
sampleA = db.get_session().query(Sample).filter(Sample.name == 'SampleA').one()
sampleB = db.get_session().query(Sample).filter(Sample.name == 'SampleB').one()
sampleC = db.get_session().query(Sample).filter(Sample.name == 'SampleC').one()
metadata_df = pd.DataFrame([
[sampleA.id, 'red'],
[sampleB.id, 'green'],
[sampleC.id, 'blue']
], columns=['Sample ID', 'Color'])
query_result = query(loaded_database_connection).hasa('reference:839:C:G', kind='mutation')
assert 2 == len(query_result)
assert 9 == len(query_result.universe_set)
query_result = query_result.build_tree(kind='mutation', scope='genome', include_reference=True)
assert 2 == len(query_result)
assert 9 == len(query_result.universe_set)
assert isinstance(query_result, TreeSamplesQuery)
df = query_result.toframe()
assert 2 == len(df)
assert ['Query', 'Sample Name', 'Sample ID', 'Status'] == df.columns.tolist()
assert {'reference:839:C:G AND mutation_tree(genome)'} == set(df['Query'].tolist())
# Now join data frame
query_result_join = query_result.join(data_frame=metadata_df, sample_ids_column='Sample ID')
assert 2 == len(query_result_join)
assert 3 == len(query_result_join.universe_set)
assert isinstance(query_result_join, TreeSamplesQuery)
df = query_result_join.toframe()
assert 2 == len(df)
assert ['Query', 'Sample Name', 'Sample ID', 'Status', 'Color'] == df.columns.tolist()
assert {'reference:839:C:G AND mutation_tree(genome) AND dataframe(ids_col=[Sample ID])'} == set(
df['Query'].tolist())
df = df.sort_values(['Sample Name'])
assert ['SampleB', 'SampleC'] == df['Sample Name'].tolist()
assert [sampleB.id, sampleC.id] == df['Sample ID'].tolist()
assert ['green', 'blue'] == df['Color'].tolist()
# I should still be able to perform within queries since I have a tree attached
query_result = query_result_join.isin(['SampleB', 'SampleC'], kind='mrca')
assert 2 == len(query_result)
assert 3 == len(query_result.universe_set)
assert {'SampleB', 'SampleC'} == set(query_result.tolist())
# mrca from samples query
query_result_BC = query_result_join.isin(['SampleB', 'SampleC'], kind='samples')
query_result = query_result_join.isin(query_result_BC, kind='mrca')
assert 2 == len(query_result)
assert 3 == len(query_result.universe_set)
assert {'SampleB', 'SampleC'} == set(query_result.tolist())
# mrca from samples set
sample_set_BC = SampleSet([sampleB.id, sampleC.id])
query_result = query_result_join.isin(sample_set_BC, kind='mrca')
assert 2 == len(query_result)
assert 3 == len(query_result.universe_set)
assert {'SampleB', 'SampleC'} == set(query_result.tolist())
# mrca from samples query, empty result
query_result_empty = query_result_join.isin([], kind='samples')
query_result = query_result_join.isin(query_result_empty, kind='mrca')
assert 0 == len(query_result)
assert 3 == len(query_result.universe_set)
# Resetting universe should work properly
query_result_BC = query_result_join.isin(['SampleB', 'SampleC'], kind='samples')
query_result = query_result_join.isin(query_result_BC, kind='mrca')
assert 2 == len(query_result)
assert 3 == len(query_result.universe_set)
query_result = query_result.reset_universe()
assert 2 == len(query_result)
assert 2 == len(query_result.universe_set)
def test_empty_universe_tree_query(prebuilt_tree: Tree, loaded_database_connection: DataIndexConnection):
query_result = query(loaded_database_connection).join_tree(
tree=prebuilt_tree, kind='mutation',
reference_name='genome',
alignment_length=5180)
query_result = query_result.isa('invalid_name')
assert 0 == len(query_result)
assert 0 == len(query_result.unknown_set)
assert 9 == len(query_result.absent_set)
assert 9 == len(query_result.universe_set)
query_result = query_result.reset_universe()
assert 0 == len(query_result)
assert 0 == len(query_result.unknown_set)
assert 0 == len(query_result.absent_set)
assert 0 == len(query_result.universe_set)
assert '<MutationTreeSamplesQuery[' in str(query_result)
assert 'selected=NA%' in str(query_result)
assert 'unknown=NA%' in str(query_result)
def test_query_tree_join_dataframe_isa_dataframe_column(loaded_database_connection: DataIndexConnection):
db = loaded_database_connection.database
sampleA = db.get_session().query(Sample).filter(Sample.name == 'SampleA').one()
sampleB = db.get_session().query(Sample).filter(Sample.name == 'SampleB').one()
sampleC = db.get_session().query(Sample).filter(Sample.name == 'SampleC').one()
metadata_df = pd.DataFrame([
[sampleA.id, 'red'],
[sampleB.id, 'green'],
[sampleC.id, pd.NA]
], columns=['Sample ID', 'Color'])
query_result = query(loaded_database_connection).hasa('reference:839:C:G', kind='mutation')
assert 2 == len(query_result)
assert {sampleB.id, sampleC.id} == set(query_result.sample_set)
assert not query_result.has_tree()
query_result = query_result.build_tree(kind='mutation', scope='genome', include_reference=True)
assert 2 == len(query_result)
assert isinstance(query_result, TreeSamplesQuery)
assert query_result.has_tree()
query_result = query_result.join(data_frame=metadata_df, sample_ids_column='Sample ID')
assert 2 == len(query_result)
assert query_result.has_tree()
# Now try to do isa on tree + dataframe query
query_result = query_result.isa('green', kind='dataframe', isa_column='Color')
assert 1 == len(query_result)
assert {sampleB.id} == set(query_result.sample_set)
# Do isa with regex and an NA in the dataframe
query_result = query_result.isa(r'^gr', kind='dataframe', isa_column='Color', regex=True)
assert 1 == len(query_result)
assert {sampleB.id} == set(query_result.sample_set)
def test_query_join_tree_join_dataframe(prebuilt_tree: Tree, loaded_database_connection: DataIndexConnection):
db = loaded_database_connection.database
sampleA = db.get_session().query(Sample).filter(Sample.name == 'SampleA').one()
sampleB = db.get_session().query(Sample).filter(Sample.name == 'SampleB').one()
sampleC = db.get_session().query(Sample).filter(Sample.name == 'SampleC').one()
metadata_df = pd.DataFrame([
[sampleA.id, 'red'],
[sampleB.id, 'green'],
[sampleC.id, 'blue']
], columns=['Sample ID', 'Color'])
query_result = query(loaded_database_connection).join_tree(tree=prebuilt_tree, kind='mutation',
reference_name='genome',
alignment_length=5180)
assert 3 == len(query_result)
assert 9 == len(query_result.universe_set)
assert {sampleA.id, sampleB.id, sampleC.id} == set(query_result.sample_set)
assert query_result.has_tree()
query_result = query_result.join(data_frame=metadata_df, sample_ids_column='Sample ID')
assert 3 == len(query_result)
assert query_result.has_tree()
# Now try to do isa on tree + dataframe query
query_result_color = query_result.isa('green', kind='dataframe', isa_column='Color')
assert 1 == len(query_result_color)
assert {sampleB.id} == set(query_result_color.sample_set)
# mrca A and B
df = query_result.isin(['SampleA', 'SampleC'], kind='mrca').toframe().sort_values('Sample Name')
assert 3 == len(df)
assert ['Query', 'Sample Name', 'Sample ID', 'Status', 'Color'] == df.columns.tolist()
assert ['SampleA', 'SampleB', 'SampleC'] == df['Sample Name'].tolist()
assert ['red', 'green', 'blue'] == df['Color'].tolist()
def test_query_tree_join_dataframe_hasa_snpeff(loaded_database_connection_annotations: DataIndexConnection):
db = loaded_database_connection_annotations.database
sample_sh14_001 = db.get_session().query(Sample).filter(Sample.name == 'SH14-001').one()
sample_sh14_014 = db.get_session().query(Sample).filter(Sample.name == 'SH14-014').one()
sample_sh10_014 = db.get_session().query(Sample).filter(Sample.name == 'SH10-014').one()
metadata_df = pd.DataFrame([
[sample_sh14_001.id, 'red'],
[sample_sh14_014.id, 'red'],
[sample_sh10_014.id, 'blue']
], columns=['Sample ID', 'Color'])
tree = Tree(str(snpeff_tree_file))
# Query hasa with tree query
query_result = query(loaded_database_connection_annotations).join_tree(tree,
kind='mutation',
alignment_length=4888768,
reference_name='NC_011083')
assert 3 == len(query_result)
assert isinstance(query_result, TreeSamplesQuery)
assert query_result.has_tree()
query_result = query_result.hasa('hgvs:NC_011083:SEHA_RS04550:p.Ile224fs')
assert 2 == len(query_result)
assert {sample_sh14_001.id, sample_sh14_014.id} == set(query_result.sample_set)
# Query hasa with database query
query_result = query(loaded_database_connection_annotations).join(data_frame=metadata_df,
sample_ids_column='Sample ID')
assert 3 == len(query_result)
assert isinstance(query_result, DataFrameSamplesQuery)
assert not query_result.has_tree()
query_result = query_result.hasa('hgvs:NC_011083:SEHA_RS04550:p.Ile224fs')
assert 2 == len(query_result)
assert {sample_sh14_001.id, sample_sh14_014.id} == set(query_result.sample_set)
# Query hasa with tree joined to dataframe
query_result = query(loaded_database_connection_annotations).join_tree(tree,
kind='mutation',
alignment_length=4888768,
reference_name='NC_011083')
query_result = query_result.join(data_frame=metadata_df, sample_ids_column='Sample ID')
assert 3 == len(query_result)
assert isinstance(query_result, TreeSamplesQuery)
assert query_result.has_tree()
query_result = query_result.hasa('hgvs:NC_011083:SEHA_RS04550:p.Ile224fs')
assert 2 == len(query_result)
assert {sample_sh14_001.id, sample_sh14_014.id} == set(query_result.sample_set)
# Query hasa with experimental tree query
query_result = query(loaded_database_connection_annotations).join_tree(tree,
kind='mutation',
alignment_length=4888768,
reference_name='NC_011083')
query_result = cast(MutationTreeSamplesQuery, query_result)
query_result = ExperimentalTreeSamplesQuery.from_tree_query(query_result)
assert 3 == len(query_result)
assert isinstance(query_result, ExperimentalTreeSamplesQuery)
assert query_result.has_tree()
query_result = query_result.hasa('hgvs:NC_011083:SEHA_RS04550:p.Ile224fs')
assert 2 == len(query_result)
assert {sample_sh14_001.id, sample_sh14_014.id} == set(query_result.sample_set)
def test_within_constructed_tree(loaded_database_connection: DataIndexConnection):
query_result = query(loaded_database_connection).hasa(
'reference:839:C:G', kind='mutation').build_tree(
kind='mutation', scope='genome', include_reference=True, extra_params='--seed 42 -m GTR')
assert 2 == len(query_result)
assert 9 == len(query_result.universe_set)
# subs/site
df = query_result.isin('SampleC', kind='distance', distance=0.005,
units='substitutions/site').toframe().sort_values('Sample Name')
assert 2 == len(df)
assert ['Query', 'Sample Name', 'Sample ID', 'Status'] == df.columns.tolist()
assert ['SampleB', 'SampleC'] == df['Sample Name'].tolist()
assert {"reference:839:C:G AND mutation_tree(genome) AND within(0.005 substitutions/site of 'SampleC')"
} == set(df['Query'].tolist())
# subs/site using within
df = query_result.within('SampleC', distance=0.005, units='substitutions/site').toframe().sort_values('Sample Name')
assert 2 == len(df)
assert ['Query', 'Sample Name', 'Sample ID', 'Status'] == df.columns.tolist()
assert ['SampleB', 'SampleC'] == df['Sample Name'].tolist()
assert {"reference:839:C:G AND mutation_tree(genome) AND within(0.005 substitutions/site of 'SampleC')"
} == set(df['Query'].tolist())
# subs
df = query_result.isin('SampleC', kind='distance', distance=26, units='substitutions').toframe().sort_values(
'Sample Name')
assert 2 == len(df)
assert ['SampleB', 'SampleC'] == df['Sample Name'].tolist()
assert {"reference:839:C:G AND mutation_tree(genome) AND within(26 substitutions of 'SampleC')"
} == set(df['Query'].tolist())
# subs using samples query
query_result_C = query_result.isin(['SampleC'], kind='samples')
df = query_result.isin(query_result_C, kind='distance', distance=26, units='substitutions').toframe().sort_values(
'Sample Name')
assert 2 == len(df)
assert ['SampleB', 'SampleC'] == df['Sample Name'].tolist()
assert {'reference:839:C:G AND mutation_tree(genome) AND within(26 substitutions of '
'<MutationTreeSamplesQuery[selected=11% (1/9) samples, unknown=0% (0/9) samples]>)'
} == set(df['Query'].tolist())
# subs using samples query, empty result
query_result_empty = query_result.isin([], kind='samples')
df = query_result.isin(query_result_empty, kind='distance', distance=26,
units='substitutions').toframe().sort_values(
'Sample Name')
assert 0 == len(df)
# should not include reference genome
df = query_result.isin('SampleC', kind='distance', distance=100, units='substitutions').toframe().sort_values(
'Sample Name')
assert 2 == len(df)
assert ['SampleB', 'SampleC'] == df['Sample Name'].tolist()
assert {"reference:839:C:G AND mutation_tree(genome) AND within(100 substitutions of 'SampleC')"
} == set(df['Query'].tolist())
# should have only query sample
df = query_result.isin('SampleC', kind='distance', distance=1, units='substitutions').toframe().sort_values(
'Sample Name')
assert 1 == len(df)
assert ['SampleC'] == df['Sample Name'].tolist()
assert {"reference:839:C:G AND mutation_tree(genome) AND within(1 substitutions of 'SampleC')"
} == set(df['Query'].tolist())
# should have only query sample, using samples query as input
query_result_C = query_result.isin(['SampleC'], kind='samples')
df = query_result.isin(query_result_C, kind='distance', distance=1, units='substitutions').toframe().sort_values(
'Sample Name')
assert 1 == len(df)
assert ['SampleC'] == df['Sample Name'].tolist()
assert {"reference:839:C:G AND mutation_tree(genome) AND within(1 substitutions of "
"<MutationTreeSamplesQuery[selected=11% (1/9) samples, unknown=0% (0/9) samples]>)"
} == set(df['Query'].tolist())
# mrca
df = query_result.isin(['SampleB', 'SampleC'], kind='mrca').toframe().sort_values('Sample Name')
assert 2 == len(df)
assert ['Query', 'Sample Name', 'Sample ID', 'Status'] == df.columns.tolist()
assert ['SampleB', 'SampleC'] == df['Sample Name'].tolist()
assert {"reference:839:C:G AND mutation_tree(genome) AND within(mrca of ['SampleB', 'SampleC'])"
} == set(df['Query'].tolist())
# mrca of samples query
query_result_BC = query_result.isin(['SampleB', 'SampleC'], kind='samples')
df = query_result.isin(query_result_BC, kind='mrca').toframe().sort_values('Sample Name')
assert 2 == len(df)
assert ['Query', 'Sample Name', 'Sample ID', 'Status'] == df.columns.tolist()
assert ['SampleB', 'SampleC'] == df['Sample Name'].tolist()
assert {"reference:839:C:G AND mutation_tree(genome) AND within(mrca of "
"<MutationTreeSamplesQuery[selected=22% (2/9) samples, unknown=0% (0/9) samples]>)"
} == set(df['Query'].tolist())
# mrca of samples query, single sample as result
query_result_B = query_result.isin(['SampleB'], kind='samples')
df = query_result.isin(query_result_B, kind='mrca').toframe().sort_values('Sample Name')
assert 1 == len(df)
assert ['Query', 'Sample Name', 'Sample ID', 'Status'] == df.columns.tolist()
assert ['SampleB'] == df['Sample Name'].tolist()
assert {"reference:839:C:G AND mutation_tree(genome) AND within(mrca of "
"<MutationTreeSamplesQuery[selected=11% (1/9) samples, unknown=0% (0/9) samples]>)"
} == set(df['Query'].tolist())
# mrca of samples query, empty_results
query_result_empty = query_result.isin([], kind='samples')
df = query_result.isin(query_result_empty, kind='mrca').toframe().sort_values('Sample Name')
assert 0 == len(df)
assert ['Query', 'Sample Name', 'Sample ID', 'Status'] == df.columns.tolist()
# Samples isin()
df = query_result.isin(['SampleA', 'SampleC']).toframe().sort_values('Sample Name')
assert 1 == len(df)
assert ['Query', 'Sample Name', 'Sample ID', 'Status'] == df.columns.tolist()
assert ['SampleC'] == df['Sample Name'].tolist()
assert {"reference:839:C:G AND mutation_tree(genome) AND isin_samples(['SampleA', 'SampleC'])"
} == set(df['Query'].tolist())
# Samples isin from samples query()
query_result_AC = query_result.isin(['SampleA', 'SampleC'], kind='samples')
df = query_result.isin(query_result_AC).toframe().sort_values('Sample Name')
assert 1 == len(df)
assert ['Query', 'Sample Name', 'Sample ID', 'Status'] == df.columns.tolist()
assert ['SampleC'] == df['Sample Name'].tolist()
assert {
"reference:839:C:G AND mutation_tree(genome) AND "
"isin_samples(<MutationTreeSamplesQuery[selected=11% (1/9) samples, unknown=0% (0/9) samples]>)"
} == set(df['Query'].tolist())
# Sample isa()
df = query_result.isa('SampleC').toframe().sort_values('Sample Name')
assert 1 == len(df)
assert ['Query', 'Sample Name', 'Sample ID', 'Status'] == df.columns.tolist()
assert ['SampleC'] == df['Sample Name'].tolist()
assert {"reference:839:C:G AND mutation_tree(genome) AND isa_sample('SampleC')"
} == set(df['Query'].tolist())
# Sample isa() empty
df = query_result.isa('SampleA').toframe().sort_values('Sample Name')
assert 0 == len(df)
assert ['Query', 'Sample Name', 'Sample ID', 'Status'] == df.columns.tolist()
# kmer jaccard still works
df = query_result.within('SampleA', distance=0.5, units='kmer_jaccard').toframe().sort_values('Sample Name')
assert 1 == len(df)
assert ['Query', 'Sample Name', 'Sample ID', 'Status'] == df.columns.tolist()
assert ['SampleC'] == df['Sample Name'].tolist()
assert {"reference:839:C:G AND mutation_tree(genome) AND isin_kmer_jaccard('SampleA', dist=0.5, k=31)"
} == set(df['Query'].tolist())
def test_build_tree_experimental(loaded_database_connection: DataIndexConnection):
query_result = query(loaded_database_connection).hasa(
'reference:839:C:G', kind='mutation').build_tree(
kind='mutation_experimental', scope='genome', include_reference=True, extra_params='--seed 42 -m GTR')
assert 2 == len(query_result)
assert isinstance(query_result, ExperimentalTreeSamplesQuery)
# isin should still work with ExperimentalTreeSamplesQuery
df = query_result.isin('SampleC', kind='distance', distance=0.005,
units='substitutions/site').toframe().sort_values('Sample Name')
assert 2 == len(df)
def test_within_constructed_tree_larger_tree(loaded_database_connection: DataIndexConnection):
db = loaded_database_connection.database
sampleA = db.get_session().query(Sample).filter(Sample.name == 'SampleA').one()
sampleB = db.get_session().query(Sample).filter(Sample.name == 'SampleB').one()
# Construct new tree with all the samples
query_result = query(loaded_database_connection).build_tree(
kind='mutation', scope='genome', include_reference=True, extra_params='--seed 42 -m GTR')
assert 3 == len(query_result)
# mrca B and C
df = query_result.isin(['SampleB', 'SampleC'], kind='mrca').toframe().sort_values('Sample Name')
assert 2 == len(df)
assert ['Query', 'Sample Name', 'Sample ID', 'Status'] == df.columns.tolist()
assert ['SampleB', 'SampleC'] == df['Sample Name'].tolist()
assert {"mutation_tree(genome) AND within(mrca of ['SampleB', 'SampleC'])"
} == set(df['Query'].tolist())
# mrca of B and C, samples query
query_result_BC = query_result.isin(['SampleB', 'SampleC'], kind='samples')
df = query_result.isin(query_result_BC, kind='mrca').toframe().sort_values('Sample Name')
assert 2 == len(df)
assert ['Query', 'Sample Name', 'Sample ID', 'Status'] == df.columns.tolist()
assert ['SampleB', 'SampleC'] == df['Sample Name'].tolist()
assert {"mutation_tree(genome) AND within(mrca of "
"<MutationTreeSamplesQuery[selected=22% (2/9) samples, unknown=0% (0/9) samples]>)"
} == set(df['Query'].tolist())
# mrca A and B
df = query_result.isin(['SampleA', 'SampleC'], kind='mrca').toframe().sort_values('Sample Name')
assert 3 == len(df)
assert ['Query', 'Sample Name', 'Sample ID', 'Status'] == df.columns.tolist()
assert ['SampleA', 'SampleB', 'SampleC'] == df['Sample Name'].tolist()
assert {"mutation_tree(genome) AND within(mrca of ['SampleA', 'SampleC'])"
} == set(df['Query'].tolist())
# mrca of A and B, samples query
query_result_AB = query_result.isin(['SampleA', 'SampleB'], kind='samples')
df = query_result.isin(query_result_AB, kind='mrca').toframe().sort_values('Sample Name')
assert 3 == len(df)
assert ['Query', 'Sample Name', 'Sample ID', 'Status'] == df.columns.tolist()
assert ['SampleA', 'SampleB', 'SampleC'] == df['Sample Name'].tolist()
assert {"mutation_tree(genome) AND within(mrca of "
"<MutationTreeSamplesQuery[selected=22% (2/9) samples, unknown=0% (0/9) samples]>)"
} == set(df['Query'].tolist())
# mrca of A and B, samples set
sample_set_AB = SampleSet([sampleA.id, sampleB.id])
df = query_result.isin(sample_set_AB, kind='mrca').toframe().sort_values('Sample Name')
assert 3 == len(df)
assert ['Query', 'Sample Name', 'Sample ID', 'Status'] == df.columns.tolist()
assert ['SampleA', 'SampleB', 'SampleC'] == df['Sample Name'].tolist()
assert {"mutation_tree(genome) AND within(mrca of "
"set(2 samples))"
} == set(df['Query'].tolist())
# hasa putting all in unknown and then mrca of A and B, samples query
query_result_AB = query_result.isin(['SampleA', 'SampleB'], kind='samples')
df = query_result.hasa('reference:1:1:T').isin(
query_result_AB, kind='mrca').toframe(include_unknown=True).sort_values('Sample Name')
assert 3 == len(df)
assert ['Query', 'Sample Name', 'Sample ID', 'Status'] == df.columns.tolist()
assert ['SampleA', 'SampleB', 'SampleC'] == df['Sample Name'].tolist()
assert ['Unknown', 'Unknown', 'Unknown'] == df['Status'].tolist()
assert {"mutation_tree(genome) AND reference:1:1:T AND within(mrca of "
"<MutationTreeSamplesQuery[selected=22% (2/9) samples, unknown=0% (0/9) samples]>)"
} == set(df['Query'].tolist())
# hasa putting all in unknown and then mrca of A and B, by name
df = query_result.hasa('reference:1:1:T').isin(
['SampleA', 'SampleB'], kind='mrca').toframe(include_unknown=True).sort_values('Sample Name')
assert 3 == len(df)
assert ['Query', 'Sample Name', 'Sample ID', 'Status'] == df.columns.tolist()
assert ['SampleA', 'SampleB', 'SampleC'] == df['Sample Name'].tolist()
assert ['Unknown', 'Unknown', 'Unknown'] == df['Status'].tolist()
assert {"mutation_tree(genome) AND reference:1:1:T AND within(mrca of ['SampleA', 'SampleB'])"
} == set(df['Query'].tolist())
# hasa mrca of A and B (by name) and then putting all in unknown
df = query_result.isin(
['SampleA', 'SampleB'], kind='mrca').hasa(
'reference:1:1:T').toframe(include_unknown=True).sort_values('Sample Name')
assert 3 == len(df)
assert ['Query', 'Sample Name', 'Sample ID', 'Status'] == df.columns.tolist()
assert ['SampleA', 'SampleB', 'SampleC'] == df['Sample Name'].tolist()
assert ['Unknown', 'Unknown', 'Unknown'] == df['Status'].tolist()
assert {"mutation_tree(genome) AND within(mrca of ['SampleA', 'SampleB']) AND reference:1:1:T"
} == set(df['Query'].tolist())
# hasa only A in unknown and then mrca of A and B, samples query
query_result_AB = query_result.isin(['SampleA', 'SampleB'], kind='samples')
df = query_result.hasa('reference:5061:G:A').isin(
query_result_AB, kind='mrca').toframe(include_unknown=True).sort_values('Sample Name')
assert 2 == len(df)
assert ['Query', 'Sample Name', 'Sample ID', 'Status'] == df.columns.tolist()
assert ['SampleA', 'SampleB'] == df['Sample Name'].tolist()
assert ['Unknown', 'Present'] == df['Status'].tolist()
assert {"mutation_tree(genome) AND reference:5061:G:A AND within(mrca of "
"<MutationTreeSamplesQuery[selected=22% (2/9) samples, unknown=0% (0/9) samples]>)"
} == set(df['Query'].tolist())
# hasa only A in unknown and then mrca of A and B, by name
df = query_result.hasa('reference:5061:G:A').isin(
['SampleA', 'SampleB'], kind='mrca').toframe(include_unknown=True).sort_values('Sample Name')
assert 2 == len(df)
assert ['Query', 'Sample Name', 'Sample ID', 'Status'] == df.columns.tolist()
assert ['SampleA', 'SampleB'] == df['Sample Name'].tolist()
assert ['Unknown', 'Present'] == df['Status'].tolist()
assert {"mutation_tree(genome) AND reference:5061:G:A AND within(mrca of ['SampleA', 'SampleB'])"
} == set(df['Query'].tolist())
# mrca of A and B (by name) and hasa putting only A in unknown (and matching B)
df = query_result.isin(
['SampleA', 'SampleB'], kind='mrca').hasa(
'reference:5061:G:A').toframe(include_unknown=True).sort_values('Sample Name')
assert 2 == len(df)
assert ['Query', 'Sample Name', 'Sample ID', 'Status'] == df.columns.tolist()
assert ['SampleA', 'SampleB'] == df['Sample Name'].tolist()
assert ['Unknown', 'Present'] == df['Status'].tolist()
assert {"mutation_tree(genome) AND within(mrca of ['SampleA', 'SampleB']) AND reference:5061:G:A"
} == set(df['Query'].tolist())
# hasa only A in unknown and then mrca of A, by name
df = query_result.hasa('reference:5061:G:A').isin(
'SampleA', kind='mrca').toframe(include_unknown=True).sort_values('Sample Name')
assert 1 == len(df)
assert ['Query', 'Sample Name', 'Sample ID', 'Status'] == df.columns.tolist()
assert ['SampleA'] == df['Sample Name'].tolist()
assert ['Unknown'] == df['Status'].tolist()
assert {"mutation_tree(genome) AND reference:5061:G:A AND within(mrca of ['SampleA'])"
} == set(df['Query'].tolist())
def test_within_joined_mutations_tree(prebuilt_tree: Tree, loaded_database_connection: DataIndexConnection):
db = loaded_database_connection.database
sampleA = db.get_session().query(Sample).filter(Sample.name == 'SampleA').one()
sampleB = db.get_session().query(Sample).filter(Sample.name == 'SampleB').one()
query_result = query(loaded_database_connection).join_tree(tree=prebuilt_tree, kind='mutation',
reference_name='genome',
alignment_length=5180)
assert 3 == len(query_result)
# mrca B and C
df = query_result.isin(['SampleB', 'SampleC'], kind='mrca').toframe().sort_values('Sample Name')
assert 2 == len(df)
assert ['Query', 'Sample Name', 'Sample ID', 'Status'] == df.columns.tolist()
assert ['SampleB', 'SampleC'] == df['Sample Name'].tolist()
assert {"join_tree(4 leaves) AND within(mrca of ['SampleB', 'SampleC'])"
} == set(df['Query'].tolist())
# mrca of B and C, samples query
query_result_BC = query_result.isin(['SampleB', 'SampleC'], kind='samples')
df = query_result.isin(query_result_BC, kind='mrca').toframe().sort_values('Sample Name')
assert 2 == len(df)
assert ['Query', 'Sample Name', 'Sample ID', 'Status'] == df.columns.tolist()
assert ['SampleB', 'SampleC'] == df['Sample Name'].tolist()
assert {"join_tree(4 leaves) AND within(mrca of "
"<MutationTreeSamplesQuery[selected=22% (2/9) samples, unknown=0% (0/9) samples]>)"
} == set(df['Query'].tolist())
# mrca A and B
df = query_result.isin(['SampleA', 'SampleC'], kind='mrca').toframe().sort_values('Sample Name')
assert 3 == len(df)
assert ['Query', 'Sample Name', 'Sample ID', 'Status'] == df.columns.tolist()
assert ['SampleA', 'SampleB', 'SampleC'] == df['Sample Name'].tolist()
assert {"join_tree(4 leaves) AND within(mrca of ['SampleA', 'SampleC'])"
} == set(df['Query'].tolist())
# mrca of A and B, samples query
query_result_AB = query_result.isin(['SampleA', 'SampleB'], kind='samples')
df = query_result.isin(query_result_AB, kind='mrca').toframe().sort_values('Sample Name')
assert 3 == len(df)
assert ['Query', 'Sample Name', 'Sample ID', 'Status'] == df.columns.tolist()
assert ['SampleA', 'SampleB', 'SampleC'] == df['Sample Name'].tolist()
assert {"join_tree(4 leaves) AND within(mrca of "
"<MutationTreeSamplesQuery[selected=22% (2/9) samples, unknown=0% (0/9) samples]>)"
} == set(df['Query'].tolist())
# mrca of A and B, samples set
sample_set_AB = SampleSet([sampleA.id, sampleB.id])
df = query_result.isin(sample_set_AB, kind='mrca').toframe().sort_values('Sample Name')
assert 3 == len(df)
assert ['Query', 'Sample Name', 'Sample ID', 'Status'] == df.columns.tolist()
assert ['SampleA', 'SampleB', 'SampleC'] == df['Sample Name'].tolist()
assert {"join_tree(4 leaves) AND within(mrca of "
"set(2 samples))"
} == set(df['Query'].tolist())
# subs/site using samples query
query_result_C = query_result.isin(['SampleC'], kind='samples')
df = query_result.isin(query_result_C, kind='distance', distance=2,
units='substitutions/site').toframe().sort_values(
'Sample Name')
assert 2 == len(df)
assert ['SampleB', 'SampleC'] == df['Sample Name'].tolist()
assert {'join_tree(4 leaves) AND within(2 substitutions/site of '
'<MutationTreeSamplesQuery[selected=11% (1/9) samples, unknown=0% (0/9) samples]>)'
} == set(df['Query'].tolist())
# hasa to put A, B, and C in unknown and then subs/site using samples query to only select B and C
query_result_C = query_result.isin(['SampleC'], kind='samples')
df = query_result.hasa('reference:1:1:T').isin(query_result_C, kind='distance', distance=2,
units='substitutions/site').toframe(
include_unknown=True).sort_values('Sample Name')
assert 2 == len(df)
assert ['SampleB', 'SampleC'] == df['Sample Name'].tolist()
assert ['Unknown', 'Unknown'] == df['Status'].tolist()
assert {'join_tree(4 leaves) AND reference:1:1:T AND within(2 substitutions/site of '
'<MutationTreeSamplesQuery[selected=11% (1/9) samples, unknown=0% (0/9) samples]>)'
} == set(df['Query'].tolist())
# hasa to put A, B, and C in unknown and then subs/site using samples query to select A, B, and C
query_result_C = query_result.isin(['SampleC'], kind='samples')
df = query_result.hasa('reference:1:1:T').isin(query_result_C, kind='distance', distance=4,
units='substitutions/site').toframe(
include_unknown=True).sort_values('Sample Name')
assert 3 == len(df)
assert ['SampleA', 'SampleB', 'SampleC'] == df['Sample Name'].tolist()
assert ['Unknown', 'Unknown', 'Unknown'] == df['Status'].tolist()
assert {'join_tree(4 leaves) AND reference:1:1:T AND within(4 substitutions/site of '
'<MutationTreeSamplesQuery[selected=11% (1/9) samples, unknown=0% (0/9) samples]>)'
} == set(df['Query'].tolist())
# hasa to put A, B, and C in unknown and then subs/site using sample names to only select B and C
df = query_result.hasa('reference:1:1:T').isin(['SampleC'], kind='distance', distance=2,
units='substitutions/site').toframe(
include_unknown=True).sort_values('Sample Name')
assert 2 == len(df)
assert ['SampleB', 'SampleC'] == df['Sample Name'].tolist()
assert ['Unknown', 'Unknown'] == df['Status'].tolist()
assert {"join_tree(4 leaves) AND reference:1:1:T AND within(2 substitutions/site of ['SampleC'])"
} == set(df['Query'].tolist())
# hasa to put A, B, and C in unknown and then subs/site using sample names to select A, B, and C
df = query_result.hasa('reference:1:1:T').isin(['SampleA', 'SampleC'], kind='distance', distance=2,
units='substitutions/site').toframe(
include_unknown=True).sort_values('Sample Name')
assert 3 == len(df)
assert ['SampleA', 'SampleB', 'SampleC'] == df['Sample Name'].tolist()
assert ['Unknown', 'Unknown', 'Unknown'] == df['Status'].tolist()
assert {"join_tree(4 leaves) AND reference:1:1:T AND within(2 substitutions/site of ['SampleA', 'SampleC'])"
} == set(df['Query'].tolist())
# hasa to put A in unknown, select B and then subs/site using sample names to select A, B, and C
df = query_result.hasa('reference:5061:G:A').isin(['SampleA', 'SampleC'], kind='distance', distance=2,
units='substitutions/site').toframe(
include_unknown=True).sort_values('Sample Name')
assert 2 == len(df)
assert ['SampleA', 'SampleB'] == df['Sample Name'].tolist()
assert ['Unknown', 'Present'] == df['Status'].tolist()
assert {"join_tree(4 leaves) AND reference:5061:G:A AND within(2 substitutions/site of ['SampleA', 'SampleC'])"
} == set(df['Query'].tolist())
# subs/site using sample names to select A, B, and C, and then hasa to put A in unknown, select B
df = query_result.isin(['SampleA', 'SampleC'], kind='distance', distance=2, units='substitutions/site').hasa(
'reference:5061:G:A').toframe(include_unknown=True).sort_values('Sample Name')
assert 2 == len(df)
assert ['SampleA', 'SampleB'] == df['Sample Name'].tolist()
assert ['Unknown', 'Present'] == df['Status'].tolist()
assert {"join_tree(4 leaves) AND within(2 substitutions/site of ['SampleA', 'SampleC']) AND reference:5061:G:A"
} == set(df['Query'].tolist())
# subs samples query
df = query_result.isin(['SampleC'], kind='distance', distance=2 * 5180,
units='substitutions').toframe().sort_values('Sample Name')
assert 2 == len(df)
assert ['SampleB', 'SampleC'] == df['Sample Name'].tolist()
assert {f'join_tree(4 leaves) AND within({2 * 5180} substitutions of '
"['SampleC'])"
} == set(df['Query'].tolist())
# kmer jaccard still works
df = query_result.within('SampleA', distance=0.5, units='kmer_jaccard').toframe().sort_values('Sample Name')
assert 2 == len(df)
assert ['Query', 'Sample Name', 'Sample ID', 'Status'] == df.columns.tolist()
assert ['SampleA', 'SampleC'] == df['Sample Name'].tolist()
assert {"join_tree(4 leaves) AND isin_kmer_jaccard('SampleA', dist=0.5, k=31)"
} == set(df['Query'].tolist())
def test_join_kmer_tree(prebuilt_tree: Tree, loaded_database_connection: DataIndexConnection):
query_result = query(loaded_database_connection).join_tree(tree=prebuilt_tree, kind='kmer')
assert 3 == len(query_result)
assert 9 == len(query_result.universe_set)
assert isinstance(query_result.tree, ClusterTree)
assert {'SampleA', 'SampleB', 'SampleC'} == set(query_result.tree.get_leaf_names())
def test_summary_features_kindmutations(loaded_database_connection: DataIndexConnection):
dfA = pd.read_csv(snippy_all_dataframes['SampleA'], sep='\t')
dfB = pd.read_csv(snippy_all_dataframes['SampleB'], sep='\t')
dfC = pd.read_csv(snippy_all_dataframes['SampleC'], sep='\t')
expected_df = pd.concat([dfA, dfB, dfC])
expected_df = expected_df.groupby('Mutation').agg({
'Sequence': 'first',
'Position': 'first',
'Deletion': 'first',
'Insertion': 'first',
'Type': 'first',
'Mutation': 'count',
}).rename(columns={'Mutation': 'Count'}).sort_index()
expected_df['Total'] = 9
expected_df['Percent'] = 100 * (expected_df['Count'] / expected_df['Total'])
mutations_df = query(loaded_database_connection).features_summary(ignore_annotations=True)
mutations_df = mutations_df.sort_index()
assert len(expected_df) == len(mutations_df)
assert list(expected_df.columns) == list(mutations_df.columns)
assert list(expected_df.index) == list(mutations_df.index)
assert list(expected_df['Count']) == list(mutations_df['Count'])
assert list(expected_df['Total']) == list(mutations_df['Total'])
assert list(expected_df['Type']) == list(mutations_df['Type'])
assert math.isclose(100 * (2 / 9), mutations_df.loc['reference:619:G:C', 'Percent'])
assert math.isclose(100 * (1 / 9), mutations_df.loc['reference:461:AAAT:G', 'Percent'])
# Test including unknowns
mutations_df = query(loaded_database_connection).features_summary(ignore_annotations=True,
include_unknown_features=True)
mutations_df['Percent'] = mutations_df['Percent'].astype(int) # Convert to int for easier comparison
mutations_df = mutations_df.sort_index()
assert 112 + 440 == len(mutations_df)
assert list(expected_df.columns) == list(mutations_df.columns)
assert 2 == mutations_df.loc['reference:619:G:C', 'Count']
assert 'SNP' == mutations_df.loc['reference:619:G:C', 'Type']
assert 2 == mutations_df.loc['reference:3063:A:ATGCAGC', 'Count']
assert 1 == mutations_df.loc['reference:1984:GTGATTG:TTGA', 'Count']
assert 1 == mutations_df.loc['reference:866:GCCAGATCC:G', 'Count']
assert 3 == mutations_df.loc['reference:90:T:?', 'Count']
assert 'UNKNOWN_MISSING' == mutations_df.loc['reference:90:T:?', 'Type']
assert 2 == mutations_df.loc['reference:190:A:?', 'Count']
assert 1 == mutations_df.loc['reference:210:C:?', 'Count']
# Test only include unknowns
mutations_df = query(loaded_database_connection).features_summary(ignore_annotations=True,
include_unknown_features=True,
include_present_features=False)
mutations_df['Percent'] = mutations_df['Percent'].astype(int) # Convert to int for easier comparison
mutations_df = mutations_df.sort_index()
assert 440 == len(mutations_df)
assert list(expected_df.columns) == list(mutations_df.columns)
assert 3 == mutations_df.loc['reference:90:T:?', 'Count']
assert 'UNKNOWN_MISSING' == mutations_df.loc['reference:90:T:?', 'Type']
assert 2 == mutations_df.loc['reference:190:A:?', 'Count']
assert 1 == mutations_df.loc['reference:210:C:?', 'Count']
assert 'reference:619:G:C' not in mutations_df
def test_summary_features_kindmutations_unique(loaded_database_connection: DataIndexConnection):
dfA = pd.read_csv(snippy_all_dataframes['SampleA'], sep='\t')
dfB = pd.read_csv(snippy_all_dataframes['SampleB'], sep='\t')
dfC = pd.read_csv(snippy_all_dataframes['SampleC'], sep='\t')
# Unique to A
expected_df = dfA
expected_df = expected_df.groupby('Mutation').agg({
'Sequence': 'first',
'Position': 'first',
'Deletion': 'first',
'Insertion': 'first',
'Type': 'first',
'Mutation': 'count',
}).rename(columns={'Mutation': 'Count'}).sort_index()
expected_df['Total'] = 1
expected_df['Percent'] = 100 * (expected_df['Count'] / expected_df['Total'])
q = query(loaded_database_connection)
mutations_df = q.isa('SampleA').features_summary(selection='unique', ignore_annotations=True)
mutations_df = mutations_df.sort_index()
assert len(expected_df) == len(mutations_df)
assert 46 == len(mutations_df) # Check length against independently generated length
assert list(expected_df.index) == list(mutations_df.index)
assert list(expected_df['Count']) == list(mutations_df['Count'])
assert list(expected_df['Total']) == list(mutations_df['Total'])
assert list(expected_df['Type']) == list(mutations_df['Type'])
assert math.isclose(100 * (1 / 1), mutations_df.loc['reference:3656:CATT:C', 'Percent'])
# Unique to B
dfAC = pd.concat([dfA, dfC])
expected_df = dfB[~dfB['Mutation'].isin(list(dfAC['Mutation']))]
expected_df = expected_df.groupby('Mutation').agg({
'Sequence': 'first',
'Position': 'first',
'Deletion': 'first',
'Insertion': 'first',
'Type': 'first',
'Mutation': 'count',
}).rename(columns={'Mutation': 'Count'}).sort_index()
expected_df['Total'] = 1
expected_df['Percent'] = 100 * (expected_df['Count'] / expected_df['Total'])
mutations_df = q.isa('SampleB').features_summary(selection='unique', ignore_annotations=True)
mutations_df = mutations_df.sort_index()
assert len(expected_df) == len(mutations_df)
assert list(expected_df.index) == list(mutations_df.index)
assert list(expected_df['Count']) == list(mutations_df['Count'])
assert list(expected_df['Type']) == list(mutations_df['Type'])
assert list(expected_df['Total']) == list(mutations_df['Total'])
assert math.isclose(100 * (1 / 1), mutations_df.loc['reference:349:AAGT:A', 'Percent'])
# Unique to C
dfAB = pd.concat([dfA, dfB])
expected_df = dfC[~dfC['Mutation'].isin(list(dfAB['Mutation']))]
expected_df = expected_df.groupby('Mutation').agg({
'Sequence': 'first',
'Position': 'first',
'Deletion': 'first',
'Insertion': 'first',
'Type': 'first',
'Mutation': 'count',
}).rename(columns={'Mutation': 'Count'}).sort_index()
expected_df['Total'] = 1
expected_df['Percent'] = 100 * (expected_df['Count'] / expected_df['Total'])
mutations_df = q.isa('SampleC').features_summary(selection='unique', ignore_annotations=True)
mutations_df = mutations_df.sort_index()
assert len(expected_df) == len(mutations_df)
assert list(expected_df.index) == list(mutations_df.index)
assert list(expected_df['Count']) == list(mutations_df['Count'])
assert list(expected_df['Total']) == list(mutations_df['Total'])
assert list(expected_df['Type']) == list(mutations_df['Type'])
assert math.isclose(100 * (1 / 1), mutations_df.loc['reference:866:GCCAGATCC:G', 'Percent'])
# Unique to BC
dfBC = pd.concat([dfB, dfC])
expected_df = dfBC[~dfBC['Mutation'].isin(list(dfA['Mutation']))]
expected_df = expected_df.groupby('Mutation').agg({
'Sequence': 'first',
'Position': 'first',
'Deletion': 'first',
'Insertion': 'first',
'Type': 'first',
'Mutation': 'count',
}).rename(columns={'Mutation': 'Count'}).sort_index()
expected_df['Total'] = 2
expected_df['Percent'] = 100 * (expected_df['Count'] / expected_df['Total'])
mutations_df = q.isin(['SampleB', 'SampleC']).features_summary(selection='unique', ignore_annotations=True)
mutations_df = mutations_df.sort_index()
assert len(expected_df) == len(mutations_df)
assert 66 == len(mutations_df) # Check length against independently generated length
assert list(expected_df.index) == list(mutations_df.index)
assert list(expected_df['Count']) == list(mutations_df['Count'])
assert list(expected_df['Total']) == list(mutations_df['Total'])
assert list(expected_df['Type']) == list(mutations_df['Type'])
assert math.isclose(100 * (2 / 2), mutations_df.loc['reference:619:G:C', 'Percent'])
assert math.isclose(100 * (1 / 2), mutations_df.loc['reference:866:GCCAGATCC:G', 'Percent'])
assert math.isclose(100 * (1 / 2), mutations_df.loc['reference:349:AAGT:A', 'Percent'])
# Unique to ABC (all with mutations)
dfABC = pd.concat([dfA, dfB, dfC])
expected_df = dfABC
expected_df = expected_df.groupby('Mutation').agg({
'Sequence': 'first',
'Position': 'first',
'Deletion': 'first',
'Insertion': 'first',
'Type': 'first',
'Mutation': 'count',
}).rename(columns={'Mutation': 'Count'}).sort_index()
expected_df['Total'] = 3
expected_df['Percent'] = 100 * (expected_df['Count'] / expected_df['Total'])
mutations_df = q.isin(['SampleA', 'SampleB', 'SampleC']).features_summary(selection='unique',
ignore_annotations=True)
mutations_df = mutations_df.sort_index()
assert len(expected_df) == len(mutations_df)
assert 112 == len(mutations_df) # Check length against independently generated length
assert list(expected_df.index) == list(mutations_df.index)
assert list(expected_df['Count']) == list(mutations_df['Count'])
assert list(expected_df['Type']) == list(mutations_df['Type'])
assert list(expected_df['Total']) == list(mutations_df['Total'])
assert math.isclose(100 * (2 / 3), mutations_df.loc['reference:619:G:C', 'Percent'])
assert math.isclose(100 * (1 / 3), mutations_df.loc['reference:866:GCCAGATCC:G', 'Percent'])
assert math.isclose(100 * (1 / 3), mutations_df.loc['reference:349:AAGT:A', 'Percent'])
assert math.isclose(100 * (1 / 3), mutations_df.loc['reference:3656:CATT:C', 'Percent'])
# Unique to None
mutations_df = q.isin([]).features_summary(selection='unique', ignore_annotations=True)
mutations_df = mutations_df.sort_index()
assert 0 == len(mutations_df)
assert ['Sequence', 'Position', 'Deletion', 'Insertion', 'Type',
'Count', 'Total', 'Percent'] == list(mutations_df.columns)
def test_summary_features_kindmutations_annotations(loaded_database_connection_annotations: DataIndexConnection):
q = query(loaded_database_connection_annotations)
# 1 sample
mutations_df = q.isa('SH10-014').features_summary(ignore_annotations=False)
assert ['Sequence', 'Position', 'Deletion', 'Insertion', 'Type',
'Count', 'Total', 'Percent', 'Annotation', 'Annotation_Impact',
'Gene_Name', 'Gene_ID', 'Feature_Type', 'Transcript_BioType',
'HGVS.c', 'HGVS.p', 'ID_HGVS.c', 'ID_HGVS.p', 'ID_HGVS_GN.c', 'ID_HGVS_GN.p'] == list(mutations_df.columns)
assert 139 == len(mutations_df)
## Convert percent to int to make it easier to compare in assert statements
mutations_df['Percent'] = mutations_df['Percent'].astype(int)
## missense variant
assert ['NC_011083', 140658, 'C', 'A', 'SNP', 1, 1, 100,
'missense_variant', 'MODERATE', 'murF', 'SEHA_RS01180', 'transcript', 'protein_coding',
'c.497C>A', 'p.Ala166Glu',
'hgvs:NC_011083:SEHA_RS01180:c.497C>A', 'hgvs:NC_011083:SEHA_RS01180:p.Ala166Glu',
'hgvs_gn:NC_011083:murF:c.497C>A', 'hgvs_gn:NC_011083:murF:p.Ala166Glu'] == list(
mutations_df.loc['NC_011083:140658:C:A'])
## inframe deletion
assert ['NC_011083', 4465400, 'GGCCGAA', 'G', 'INDEL', 1, 1, 100,
'conservative_inframe_deletion', 'MODERATE', 'tyrB', 'SEHA_RS22180', 'transcript', 'protein_coding',
'c.157_162delGAAGCC', 'p.Glu53_Ala54del',
'hgvs:NC_011083:SEHA_RS22180:c.157_162delGAAGCC', 'hgvs:NC_011083:SEHA_RS22180:p.Glu53_Ala54del',
'hgvs_gn:NC_011083:tyrB:c.157_162delGAAGCC', 'hgvs_gn:NC_011083:tyrB:p.Glu53_Ala54del'] == list(
mutations_df.loc['NC_011083:4465400:GGCCGAA:G'])
## Intergenic variant (with some NA values in fields)
assert ['NC_011083', 4555461, 'T', 'TC', 'INDEL', 1, 1, 100,
'intergenic_region', 'MODIFIER', 'SEHA_RS22510-SEHA_RS26685', 'SEHA_RS22510-SEHA_RS26685',
'intergenic_region', 'NA',
'n.4555461_4555462insC', 'NA',
'hgvs:NC_011083:n.4555461_4555462insC', 'NA',
'hgvs_gn:NC_011083:n.4555461_4555462insC', 'NA'] == list(
mutations_df.loc['NC_011083:4555461:T:TC'].fillna('NA'))
# 3 samples
mutations_df = q.isin(['SH10-014', 'SH14-001', 'SH14-014']).features_summary(ignore_annotations=False)
## Convert percent to int to make it easier to compare in assert statements
mutations_df['Percent'] = mutations_df['Percent'].astype(int)
assert ['Sequence', 'Position', 'Deletion', 'Insertion', 'Type',
'Count', 'Total', 'Percent', 'Annotation', 'Annotation_Impact',
'Gene_Name', 'Gene_ID', 'Feature_Type', 'Transcript_BioType',
'HGVS.c', 'HGVS.p', 'ID_HGVS.c', 'ID_HGVS.p', 'ID_HGVS_GN.c', 'ID_HGVS_GN.p'] == list(mutations_df.columns)
assert 177 == len(mutations_df)
## missense variant (3/3)
assert ['NC_011083', 140658, 'C', 'A', 'SNP', 3, 3, 100,
'missense_variant', 'MODERATE', 'murF', 'SEHA_RS01180', 'transcript', 'protein_coding',
'c.497C>A', 'p.Ala166Glu',
'hgvs:NC_011083:SEHA_RS01180:c.497C>A', 'hgvs:NC_011083:SEHA_RS01180:p.Ala166Glu',
'hgvs_gn:NC_011083:murF:c.497C>A', 'hgvs_gn:NC_011083:murF:p.Ala166Glu'] == list(
mutations_df.loc['NC_011083:140658:C:A'])
## Intergenic variant (1/3)
assert ['NC_011083', 4555461, 'T', 'TC', 'INDEL', 1, 3, 33,
'intergenic_region', 'MODIFIER', 'SEHA_RS22510-SEHA_RS26685', 'SEHA_RS22510-SEHA_RS26685',
'intergenic_region', 'NA',
'n.4555461_4555462insC', 'NA',
'hgvs:NC_011083:n.4555461_4555462insC', 'NA',
'hgvs_gn:NC_011083:n.4555461_4555462insC', 'NA'] == list(
mutations_df.loc['NC_011083:4555461:T:TC'].fillna('NA'))
# Test ignore annotations
mutations_df = q.isin(['SH10-014', 'SH14-001', 'SH14-014']).features_summary(ignore_annotations=True)
assert ['Sequence', 'Position', 'Deletion', 'Insertion', 'Type',
'Count', 'Total', 'Percent'] == list(mutations_df.columns)
assert 177 == len(mutations_df)
## Test unique
mutations_df = q.isa('SH10-014').features_summary(selection='unique', ignore_annotations=False)
## Convert percent to int to make it easier to compare in assert statements
mutations_df['Percent'] = mutations_df['Percent'].astype(int)
assert ['Sequence', 'Position', 'Deletion', 'Insertion', 'Type',
'Count', 'Total', 'Percent', 'Annotation', 'Annotation_Impact',
'Gene_Name', 'Gene_ID', 'Feature_Type', 'Transcript_BioType',
'HGVS.c', 'HGVS.p', 'ID_HGVS.c', 'ID_HGVS.p', 'ID_HGVS_GN.c', 'ID_HGVS_GN.p'] == list(mutations_df.columns)
assert 60 == len(mutations_df)
## missense variant
assert ['NC_011083', 2049576, 'A', 'C', 'SNP', 1, 1, 100,
'missense_variant', 'MODERATE', 'cutC', 'SEHA_RS10675', 'transcript', 'protein_coding',
'c.536T>G', 'p.Val179Gly',
'hgvs:NC_011083:SEHA_RS10675:c.536T>G', 'hgvs:NC_011083:SEHA_RS10675:p.Val179Gly',
'hgvs_gn:NC_011083:cutC:c.536T>G', 'hgvs_gn:NC_011083:cutC:p.Val179Gly'] == list(
mutations_df.loc['NC_011083:2049576:A:C'])
def test_summary_features_two(loaded_database_connection: DataIndexConnection):
dfB = pd.read_csv(snippy_all_dataframes['SampleB'], sep='\t')
dfC = pd.read_csv(snippy_all_dataframes['SampleC'], sep='\t')
expected_df = pd.concat([dfB, dfC])
expected_df = expected_df.groupby('Mutation').agg({
'Sequence': 'first',
'Position': 'first',
'Deletion': 'first',
'Insertion': 'first',
'Type': 'first',
'Mutation': 'count',
}).rename(columns={'Mutation': 'Count'}).sort_index()
expected_df['Total'] = 2
expected_df['Percent'] = 100 * (expected_df['Count'] / expected_df['Total'])
mutations_df = query(loaded_database_connection).hasa(
'reference:839:C:G', kind='mutation').features_summary()
mutations_df = mutations_df.sort_index()
assert len(expected_df) == len(mutations_df)
assert list(expected_df.index) == list(mutations_df.index)
assert list(expected_df['Count']) == list(mutations_df['Count'])
assert list(expected_df['Type']) == list(mutations_df['Type'])
assert list(expected_df['Total']) == list(mutations_df['Total'])
assert math.isclose(100 * (2 / 2), mutations_df.loc['reference:839:C:G', 'Percent'])
def test_summary_features_kindmlst(loaded_database_connection: DataIndexConnection):
# Test case of summary of single sample
summary_df = query(loaded_database_connection).isa('SampleA').features_summary(kind='mlst')
summary_df['Percent'] = summary_df['Percent'].astype(int) # Convert to int for easier comparison
assert 7 == len(summary_df)
assert {'lmonocytogenes'} == set(summary_df['Scheme'].tolist())
assert 'MLST Feature' == summary_df.index.name
assert ['Scheme', 'Locus', 'Allele', 'Count', 'Total', 'Percent'] == list(summary_df.columns)
assert ['lmonocytogenes', 'abcZ', '1', 1, 1, 100] == summary_df.loc['mlst:lmonocytogenes:abcZ:1'].tolist()
assert ['lmonocytogenes', 'bglA', '51', 1, 1, 100] == summary_df.loc['mlst:lmonocytogenes:bglA:51'].tolist()
# Test samples across multiple schemes
summary_df = query(loaded_database_connection).isin(['SampleA', 'SampleB', 'CFSAN002349',
'2014D-0067']).features_summary(kind='mlst')
summary_df['Percent'] = summary_df['Percent'].astype(int) # Convert to int for easier comparison
assert 15 == len(summary_df)
assert {'lmonocytogenes', 'campylobacter'} == set(summary_df['Scheme'].tolist())
assert 'MLST Feature' == summary_df.index.name
assert ['Scheme', 'Locus', 'Allele', 'Count', 'Total', 'Percent'] == list(summary_df.columns)
assert ['lmonocytogenes', 'abcZ', '1', 3, 4, 75] == summary_df.loc['mlst:lmonocytogenes:abcZ:1'].tolist()
assert ['lmonocytogenes', 'bglA', '51', 2, 4, 50] == summary_df.loc['mlst:lmonocytogenes:bglA:51'].tolist()
assert ['lmonocytogenes', 'bglA', '52', 1, 4, 25] == summary_df.loc['mlst:lmonocytogenes:bglA:52'].tolist()
assert ['lmonocytogenes', 'ldh', '5', 2, 4, 50] == summary_df.loc['mlst:lmonocytogenes:ldh:5'].tolist()
assert ['lmonocytogenes', 'lhkA', '5', 2, 4, 50] == summary_df.loc['mlst:lmonocytogenes:lhkA:5'].tolist()
assert ['lmonocytogenes', 'lhkA', '4', 1, 4, 25] == summary_df.loc['mlst:lmonocytogenes:lhkA:4'].tolist()
assert ['campylobacter', 'aspA', '2', 1, 4, 25] == summary_df.loc['mlst:campylobacter:aspA:2'].tolist()
assert ['campylobacter', 'glyA', '3', 1, 4, 25] == summary_df.loc['mlst:campylobacter:glyA:3'].tolist()
assert 6 == len(summary_df[summary_df['Scheme'] == 'campylobacter']) # Missing one feature since it's unknown
# Test only unknown
summary_df = query(loaded_database_connection).isin(
['SampleA', 'SampleB', 'CFSAN002349', '2014D-0067']).features_summary(
kind='mlst', include_present_features=False, include_unknown_features=True)
summary_df['Percent'] = summary_df['Percent'].astype(int) # Convert to int for easier comparison
assert 2 == len(summary_df)
assert {'lmonocytogenes', 'campylobacter'} == set(summary_df['Scheme'].tolist())
assert 'MLST Feature' == summary_df.index.name
assert ['Scheme', 'Locus', 'Allele', 'Count', 'Total', 'Percent'] == list(summary_df.columns)
assert ['lmonocytogenes', 'ldh', '?', 1, 4, 25] == summary_df.loc['mlst:lmonocytogenes:ldh:?'].tolist()
assert ['campylobacter', 'uncA', '?', 1, 4, 25] == summary_df.loc['mlst:campylobacter:uncA:?'].tolist()
# Test only unknown, restrict scheme
summary_df = query(loaded_database_connection).isin(
['SampleA', 'SampleB', 'CFSAN002349', '2014D-0067']).features_summary(
kind='mlst', scheme='lmonocytogenes', include_present_features=False, include_unknown_features=True)
summary_df['Percent'] = summary_df['Percent'].astype(int) # Convert to int for easier comparison
assert 1 == len(summary_df)
assert {'lmonocytogenes'} == set(summary_df['Scheme'].tolist())
assert 'MLST Feature' == summary_df.index.name
assert ['Scheme', 'Locus', 'Allele', 'Count', 'Total', 'Percent'] == list(summary_df.columns)
assert ['lmonocytogenes', 'ldh', '?', 1, 4, 25] == summary_df.loc['mlst:lmonocytogenes:ldh:?'].tolist()
def test_features_comparison_kindmutations_annotations(loaded_database_connection_annotations: DataIndexConnection):
q = query(loaded_database_connection_annotations)
category_10 = q.isin('SH10-014')
category_14 = q.isin(['SH14-001', 'SH14-014'])
# Test 2 categories counts
comparison_df = q.features_comparison(sample_categories=[category_10, category_14],
category_prefixes=['10', '14'],
unit='count')
comparison_df = comparison_df.sort_index()
assert comparison_df.index.name == 'Mutation'
assert ['Sequence', 'Position', 'Deletion', 'Insertion', 'Type', 'Total',
'10_count', '14_count',
'10_total', '14_total',
'Annotation', 'Annotation_Impact',
'Gene_Name', 'Gene_ID', 'Feature_Type', 'Transcript_BioType',
'HGVS.c', 'HGVS.p', 'ID_HGVS.c', 'ID_HGVS.p', 'ID_HGVS_GN.c',
'ID_HGVS_GN.p'] == list(comparison_df.columns)
assert 177 == len(comparison_df)
assert {3} == set(comparison_df['Total'].tolist())
assert {1} == set(comparison_df['10_total'].tolist())
assert {2} == set(comparison_df['14_total'].tolist())
assert 1 == comparison_df.loc['NC_011083:140658:C:A', '10_count']
assert 'SNP' == comparison_df.loc['NC_011083:140658:C:A', 'Type']
assert 2 == comparison_df.loc['NC_011083:140658:C:A', '14_count']
assert 'hgvs_gn:NC_011083:murF:p.Ala166Glu' == comparison_df.loc[
'NC_011083:140658:C:A', 'ID_HGVS_GN.p']
assert 1 == comparison_df.loc['NC_011083:4555461:T:TC', '10_count']
assert 0 == comparison_df.loc['NC_011083:4555461:T:TC', '14_count']
assert 'hgvs_gn:NC_011083:n.4555461_4555462insC' == comparison_df.loc[
'NC_011083:4555461:T:TC', 'ID_HGVS_GN.c']
assert 0 == comparison_df.loc['NC_011083:630556:G:A', '10_count']
assert 2 == comparison_df.loc['NC_011083:630556:G:A', '14_count']
assert 'hgvs_gn:NC_011083:SEHA_RS03545:p.Trp295*' == comparison_df.loc[
'NC_011083:630556:G:A', 'ID_HGVS_GN.p']
# Test 2 categories defaults
comparison_df = q.features_comparison(sample_categories=[category_10, category_14])
comparison_df = comparison_df.sort_index()
comparison_df['Category1_percent'] = comparison_df['Category1_percent'].astype(
int) # Convert to int for easier comparison
comparison_df['Category2_percent'] = comparison_df['Category2_percent'].astype(
int) # Convert to int for easier comparison
assert comparison_df.index.name == 'Mutation'
assert ['Sequence', 'Position', 'Deletion', 'Insertion', 'Type', 'Total',
'Category1_percent', 'Category2_percent',
'Category1_total', 'Category2_total',
'Annotation', 'Annotation_Impact',
'Gene_Name', 'Gene_ID', 'Feature_Type', 'Transcript_BioType',
'HGVS.c', 'HGVS.p', 'ID_HGVS.c', 'ID_HGVS.p', 'ID_HGVS_GN.c',
'ID_HGVS_GN.p'] == list(comparison_df.columns)
assert 177 == len(comparison_df)
assert {3} == set(comparison_df['Total'].tolist())
assert {1} == set(comparison_df['Category1_total'].tolist())
assert {2} == set(comparison_df['Category2_total'].tolist())
assert 100 == comparison_df.loc['NC_011083:140658:C:A', 'Category1_percent']
assert 100 == comparison_df.loc['NC_011083:140658:C:A', 'Category2_percent']
assert 'hgvs_gn:NC_011083:murF:p.Ala166Glu' == comparison_df.loc[
'NC_011083:140658:C:A', 'ID_HGVS_GN.p']
assert 100 == comparison_df.loc['NC_011083:4555461:T:TC', 'Category1_percent']
assert 0 == comparison_df.loc['NC_011083:4555461:T:TC', 'Category2_percent']
assert 'hgvs_gn:NC_011083:n.4555461_4555462insC' == comparison_df.loc[
'NC_011083:4555461:T:TC', 'ID_HGVS_GN.c']
assert 0 == comparison_df.loc['NC_011083:630556:G:A', 'Category1_percent']
assert 100 == comparison_df.loc['NC_011083:630556:G:A', 'Category2_percent']
assert 'hgvs_gn:NC_011083:SEHA_RS03545:p.Trp295*' == comparison_df.loc[
'NC_011083:630556:G:A', 'ID_HGVS_GN.p']
# Test 2 categories percents isin subset
comparison_df = q.isin(['SH14-001', 'SH14-014']).features_comparison(
sample_categories=[category_10, category_14],
category_prefixes=['10', '14'],
unit='percent'
)
comparison_df = comparison_df.sort_index()
comparison_df['14_percent'] = comparison_df['14_percent'].astype(int) # Convert to int for easier comparison
assert comparison_df.index.name == 'Mutation'
assert ['Sequence', 'Position', 'Deletion', 'Insertion', 'Type', 'Total',
'10_percent', '14_percent',
'10_total', '14_total',
'Annotation', 'Annotation_Impact',
'Gene_Name', 'Gene_ID', 'Feature_Type', 'Transcript_BioType',
'HGVS.c', 'HGVS.p', 'ID_HGVS.c', 'ID_HGVS.p', 'ID_HGVS_GN.c',
'ID_HGVS_GN.p'] == list(comparison_df.columns)
assert 117 == len(comparison_df)
assert {2} == set(comparison_df['Total'].tolist())
assert {0} == set(comparison_df['10_total'].tolist())
assert {2} == set(comparison_df['14_total'].tolist())
assert pd.isna(comparison_df.loc['NC_011083:140658:C:A', '10_percent'])
assert 100 == comparison_df.loc['NC_011083:140658:C:A', '14_percent']
assert 'hgvs_gn:NC_011083:murF:p.Ala166Glu' == comparison_df.loc[
'NC_011083:140658:C:A', 'ID_HGVS_GN.p']
assert pd.isna(comparison_df.loc['NC_011083:4482211:C:A', '10_percent'])
assert 50 == comparison_df.loc['NC_011083:4482211:C:A', '14_percent']
assert 'hgvs_gn:NC_011083:siiE:p.Arg1263Ser' == comparison_df.loc[
'NC_011083:4482211:C:A', 'ID_HGVS_GN.p']
assert pd.isna(comparison_df.loc['NC_011083:630556:G:A', '10_percent'])
assert 100 == comparison_df.loc['NC_011083:630556:G:A', '14_percent']
assert 'hgvs_gn:NC_011083:SEHA_RS03545:p.Trp295*' == comparison_df.loc[
'NC_011083:630556:G:A', 'ID_HGVS_GN.p']
def test_features_comparison_kindmlst(loaded_database_connection: DataIndexConnection):
q = query(loaded_database_connection)
category_lmonocytogenes = q.isin(['SampleA', 'SampleB', 'SampleC',
'CFSAN002349', 'CFSAN023463'])
category_other = category_lmonocytogenes.complement()
# Test two categories percent: one of lmonocytogenes and one of the rest
comparison_df = q.features_comparison(sample_categories=[category_lmonocytogenes, category_other],
category_prefixes=['lmonocytogenes', 'other'],
unit='percent',
kind='mlst')
assert 24 == len(comparison_df)
assert 'MLST Feature' == comparison_df.index.name
assert ['Scheme', 'Locus', 'Allele', 'Total',
'lmonocytogenes_percent', 'other_percent',
'lmonocytogenes_total', 'other_total'] == list(comparison_df.columns)
comparison_df['lmonocytogenes_percent'] = comparison_df['lmonocytogenes_percent'].astype(
int) # Convert to int for easier comparison
comparison_df['other_percent'] = comparison_df['other_percent'].astype(int) # Convert to int for easier comparison
assert {9} == set(comparison_df['Total'].tolist())
assert {5} == set(comparison_df['lmonocytogenes_total'].tolist())
assert {4} == set(comparison_df['other_total'].tolist())
assert 100 == comparison_df.loc['mlst:lmonocytogenes:abcZ:1', 'lmonocytogenes_percent']
assert 0 == comparison_df.loc['mlst:lmonocytogenes:abcZ:1', 'other_percent']
assert 60 == comparison_df.loc['mlst:lmonocytogenes:bglA:51', 'lmonocytogenes_percent']
assert 0 == comparison_df.loc['mlst:lmonocytogenes:bglA:51', 'other_percent']
assert 40 == comparison_df.loc['mlst:lmonocytogenes:bglA:52', 'lmonocytogenes_percent']
assert 0 == comparison_df.loc['mlst:lmonocytogenes:bglA:52', 'other_percent']
assert 0 == comparison_df.loc['mlst:ecoli:adk:100', 'lmonocytogenes_percent']
assert 50 == comparison_df.loc['mlst:ecoli:adk:100', 'other_percent']
assert 0 == comparison_df.loc['mlst:ecoli:recA:7', 'lmonocytogenes_percent']
assert 50 == comparison_df.loc['mlst:ecoli:recA:7', 'other_percent']
assert 0 == comparison_df.loc['mlst:campylobacter:uncA:6', 'lmonocytogenes_percent']
assert 25 == comparison_df.loc['mlst:campylobacter:uncA:6', 'other_percent']
# Test two categories sample_set: one of lmonocytogenes and one of the rest
comparison_df = q.features_comparison(sample_categories=[category_lmonocytogenes.sample_set,
category_other.sample_set],
category_prefixes=['lmonocytogenes', 'other'],
unit='percent',
kind='mlst')
assert 24 == len(comparison_df)
assert 'MLST Feature' == comparison_df.index.name
assert ['Scheme', 'Locus', 'Allele', 'Total',
'lmonocytogenes_percent', 'other_percent',
'lmonocytogenes_total', 'other_total'] == list(comparison_df.columns)
comparison_df['lmonocytogenes_percent'] = comparison_df['lmonocytogenes_percent'].astype(
int) # Convert to int for easier comparison
comparison_df['other_percent'] = comparison_df['other_percent'].astype(int) # Convert to int for easier comparison
assert {9} == set(comparison_df['Total'].tolist())
assert {5} == set(comparison_df['lmonocytogenes_total'].tolist())
assert {4} == set(comparison_df['other_total'].tolist())
assert 100 == comparison_df.loc['mlst:lmonocytogenes:abcZ:1', 'lmonocytogenes_percent']
assert 0 == comparison_df.loc['mlst:lmonocytogenes:abcZ:1', 'other_percent']
assert 60 == comparison_df.loc['mlst:lmonocytogenes:bglA:51', 'lmonocytogenes_percent']
assert 0 == comparison_df.loc['mlst:lmonocytogenes:bglA:51', 'other_percent']
assert 40 == comparison_df.loc['mlst:lmonocytogenes:bglA:52', 'lmonocytogenes_percent']
assert 0 == comparison_df.loc['mlst:lmonocytogenes:bglA:52', 'other_percent']
assert 0 == comparison_df.loc['mlst:ecoli:adk:100', 'lmonocytogenes_percent']
assert 50 == comparison_df.loc['mlst:ecoli:adk:100', 'other_percent']
assert 0 == comparison_df.loc['mlst:ecoli:recA:7', 'lmonocytogenes_percent']
assert 50 == comparison_df.loc['mlst:ecoli:recA:7', 'other_percent']
assert 0 == comparison_df.loc['mlst:campylobacter:uncA:6', 'lmonocytogenes_percent']
assert 25 == comparison_df.loc['mlst:campylobacter:uncA:6', 'other_percent']
# Test two categories subset percent: one of lmonocytogenes and one of the rest
q_subset = q.isin(['SampleA', 'SampleB', 'SampleC', '2014C-3598', '2014C-3599'])
comparison_df = q_subset.features_comparison(sample_categories=[category_lmonocytogenes, category_other],
category_prefixes=['lmonocytogenes', 'other'],
unit='percent',
kind='mlst')
assert 16 == len(comparison_df)
assert 'MLST Feature' == comparison_df.index.name
assert ['Scheme', 'Locus', 'Allele', 'Total',
'lmonocytogenes_percent', 'other_percent',
'lmonocytogenes_total', 'other_total'] == list(comparison_df.columns)
comparison_df['lmonocytogenes_percent'] = comparison_df['lmonocytogenes_percent'].astype(
int) # Convert to int for easier comparison
comparison_df['other_percent'] = comparison_df['other_percent'].astype(int) # Convert to int for easier comparison
assert {5} == set(comparison_df['Total'].tolist())
assert {3} == set(comparison_df['lmonocytogenes_total'].tolist())
assert {2} == set(comparison_df['other_total'].tolist())
assert 100 == comparison_df.loc['mlst:lmonocytogenes:abcZ:1', 'lmonocytogenes_percent']
assert 0 == comparison_df.loc['mlst:lmonocytogenes:abcZ:1', 'other_percent']
assert 33 == comparison_df.loc['mlst:lmonocytogenes:bglA:51', 'lmonocytogenes_percent']
assert 0 == comparison_df.loc['mlst:lmonocytogenes:bglA:51', 'other_percent']
assert 66 == comparison_df.loc['mlst:lmonocytogenes:bglA:52', 'lmonocytogenes_percent']
assert 0 == comparison_df.loc['mlst:lmonocytogenes:bglA:52', 'other_percent']
assert 0 == comparison_df.loc['mlst:ecoli:adk:100', 'lmonocytogenes_percent']
assert 100 == comparison_df.loc['mlst:ecoli:adk:100', 'other_percent']
assert 0 == comparison_df.loc['mlst:ecoli:recA:7', 'lmonocytogenes_percent']
assert 100 == comparison_df.loc['mlst:ecoli:recA:7', 'other_percent']
def test_features_comparison_kindmutations_with_dataframe(loaded_database_connection_annotations: DataIndexConnection):
db = loaded_database_connection_annotations.database
sample_sh14_001 = db.get_session().query(Sample).filter(Sample.name == 'SH14-001').one()
sample_sh14_014 = db.get_session().query(Sample).filter(Sample.name == 'SH14-014').one()
sample_sh10_014 = db.get_session().query(Sample).filter(Sample.name == 'SH10-014').one()
df = pd.DataFrame([
[sample_sh14_001.id, 'red'],
[sample_sh14_014.id, 'red'],
[sample_sh10_014.id, 'blue']
], columns=['Sample ID', 'Color'])
q = query(loaded_database_connection_annotations, universe='dataframe',
data_frame=df, sample_ids_column='Sample ID')
category_10 = q.isin('SH10-014')
category_14 = q.isin(['SH14-001', 'SH14-014'])
# Test 2 categories counts on dataframe query: dataframe column groupby
comparison_df = q.features_comparison(sample_categories='Color',
categories_kind='dataframe',
unit='count')
comparison_df = comparison_df.sort_index()
assert comparison_df.index.name == 'Mutation'
assert ['Sequence', 'Position', 'Deletion', 'Insertion', 'Type', 'Total',
'blue_count', 'red_count',
'blue_total', 'red_total',
'Annotation', 'Annotation_Impact',
'Gene_Name', 'Gene_ID', 'Feature_Type', 'Transcript_BioType',
'HGVS.c', 'HGVS.p', 'ID_HGVS.c', 'ID_HGVS.p', 'ID_HGVS_GN.c',
'ID_HGVS_GN.p'] == list(comparison_df.columns)
assert 177 == len(comparison_df)
assert {3} == set(comparison_df['Total'].tolist())
assert {2} == set(comparison_df['red_total'].tolist())
assert {1} == set(comparison_df['blue_total'].tolist())
assert 1 == comparison_df.loc['NC_011083:140658:C:A', 'blue_count']
assert 2 == comparison_df.loc['NC_011083:140658:C:A', 'red_count']
assert 'hgvs_gn:NC_011083:murF:p.Ala166Glu' == comparison_df.loc[
'NC_011083:140658:C:A', 'ID_HGVS_GN.p']
assert 'SNP' == comparison_df.loc['NC_011083:140658:C:A', 'Type']
assert 1 == comparison_df.loc['NC_011083:4555461:T:TC', 'blue_count']
assert 0 == comparison_df.loc['NC_011083:4555461:T:TC', 'red_count']
assert 'hgvs_gn:NC_011083:n.4555461_4555462insC' == comparison_df.loc[
'NC_011083:4555461:T:TC', 'ID_HGVS_GN.c']
assert 'INDEL' == comparison_df.loc['NC_011083:4555461:T:TC', 'Type']
assert 0 == comparison_df.loc['NC_011083:630556:G:A', 'blue_count']
assert 2 == comparison_df.loc['NC_011083:630556:G:A', 'red_count']
assert 'hgvs_gn:NC_011083:SEHA_RS03545:p.Trp295*' == comparison_df.loc[
'NC_011083:630556:G:A', 'ID_HGVS_GN.p']
assert 'SNP' == comparison_df.loc['NC_011083:630556:G:A', 'Type']
# Test 2 categories counts on dataframe query: sample_query
comparison_df = q.features_comparison(sample_categories=[category_10, category_14],
category_prefixes=['10', '14'],
unit='count')
comparison_df = comparison_df.sort_index()
assert comparison_df.index.name == 'Mutation'
assert ['Sequence', 'Position', 'Deletion', 'Insertion', 'Type', 'Total',
'10_count', '14_count',
'10_total', '14_total',
'Annotation', 'Annotation_Impact',
'Gene_Name', 'Gene_ID', 'Feature_Type', 'Transcript_BioType',
'HGVS.c', 'HGVS.p', 'ID_HGVS.c', 'ID_HGVS.p', 'ID_HGVS_GN.c',
'ID_HGVS_GN.p'] == list(comparison_df.columns)
assert 177 == len(comparison_df)
assert {3} == set(comparison_df['Total'].tolist())
assert {1} == set(comparison_df['10_total'].tolist())
assert {2} == set(comparison_df['14_total'].tolist())
assert 1 == comparison_df.loc['NC_011083:140658:C:A', '10_count']
assert 2 == comparison_df.loc['NC_011083:140658:C:A', '14_count']
assert 'hgvs_gn:NC_011083:murF:p.Ala166Glu' == comparison_df.loc[
'NC_011083:140658:C:A', 'ID_HGVS_GN.p']
assert 1 == comparison_df.loc['NC_011083:4555461:T:TC', '10_count']
assert 0 == comparison_df.loc['NC_011083:4555461:T:TC', '14_count']
assert 'hgvs_gn:NC_011083:n.4555461_4555462insC' == comparison_df.loc[
'NC_011083:4555461:T:TC', 'ID_HGVS_GN.c']
assert 0 == comparison_df.loc['NC_011083:630556:G:A', '10_count']
assert 2 == comparison_df.loc['NC_011083:630556:G:A', '14_count']
assert 'hgvs_gn:NC_011083:SEHA_RS03545:p.Trp295*' == comparison_df.loc[
'NC_011083:630556:G:A', 'ID_HGVS_GN.p']
# Test 2 categories counts on dataframe query: dataframe column groupby, lower threshold
comparison_df = q.features_comparison(sample_categories='Color',
categories_kind='dataframe',
category_samples_threshold=1,
unit='count')
comparison_df = comparison_df.sort_index()
assert comparison_df.index.name == 'Mutation'
assert ['Sequence', 'Position', 'Deletion', 'Insertion', 'Type', 'Total',
'blue_count', 'red_count',
'blue_total', 'red_total',
'Annotation', 'Annotation_Impact',
'Gene_Name', 'Gene_ID', 'Feature_Type', 'Transcript_BioType',
'HGVS.c', 'HGVS.p', 'ID_HGVS.c', 'ID_HGVS.p', 'ID_HGVS_GN.c',
'ID_HGVS_GN.p'] == list(comparison_df.columns)
assert 177 == len(comparison_df)
assert {3} == set(comparison_df['Total'].tolist())
assert {2} == set(comparison_df['red_total'].tolist())
assert {1} == set(comparison_df['blue_total'].tolist())
assert 1 == comparison_df.loc['NC_011083:140658:C:A', 'blue_count']
assert 2 == comparison_df.loc['NC_011083:140658:C:A', 'red_count']
assert 'hgvs_gn:NC_011083:murF:p.Ala166Glu' == comparison_df.loc[
'NC_011083:140658:C:A', 'ID_HGVS_GN.p']
assert 1 == comparison_df.loc['NC_011083:4555461:T:TC', 'blue_count']
assert 0 == comparison_df.loc['NC_011083:4555461:T:TC', 'red_count']
assert 'hgvs_gn:NC_011083:n.4555461_4555462insC' == comparison_df.loc[
'NC_011083:4555461:T:TC', 'ID_HGVS_GN.c']
assert 0 == comparison_df.loc['NC_011083:630556:G:A', 'blue_count']
assert 2 == comparison_df.loc['NC_011083:630556:G:A', 'red_count']
assert 'hgvs_gn:NC_011083:SEHA_RS03545:p.Trp295*' == comparison_df.loc[
'NC_011083:630556:G:A', 'ID_HGVS_GN.p']
# Test 2 categories counts on dataframe query: dataframe column groupby, higher threshold
comparison_df = q.features_comparison(sample_categories='Color',
categories_kind='dataframe',
category_samples_threshold=2,
unit='count')
comparison_df = comparison_df.sort_index()
assert comparison_df.index.name == 'Mutation'
assert ['Sequence', 'Position', 'Deletion', 'Insertion', 'Type', 'Total',
'red_count',
'red_total',
'Annotation', 'Annotation_Impact',
'Gene_Name', 'Gene_ID', 'Feature_Type', 'Transcript_BioType',
'HGVS.c', 'HGVS.p', 'ID_HGVS.c', 'ID_HGVS.p', 'ID_HGVS_GN.c',
'ID_HGVS_GN.p'] == list(comparison_df.columns)
assert 177 == len(comparison_df)
assert {3} == set(comparison_df['Total'].tolist())
assert {2} == set(comparison_df['red_total'].tolist())
assert 2 == comparison_df.loc['NC_011083:140658:C:A', 'red_count']
assert 'hgvs_gn:NC_011083:murF:p.Ala166Glu' == comparison_df.loc[
'NC_011083:140658:C:A', 'ID_HGVS_GN.p']
assert 0 == comparison_df.loc['NC_011083:4555461:T:TC', 'red_count']
assert 'hgvs_gn:NC_011083:n.4555461_4555462insC' == comparison_df.loc[
'NC_011083:4555461:T:TC', 'ID_HGVS_GN.c']
assert 2 == comparison_df.loc['NC_011083:630556:G:A', 'red_count']
assert 'hgvs_gn:NC_011083:SEHA_RS03545:p.Trp295*' == comparison_df.loc[
'NC_011083:630556:G:A', 'ID_HGVS_GN.p']
# Test 2 categories counts on dataframe query: sample_query with threshold
comparison_df = q.features_comparison(sample_categories=[category_10, category_14],
category_prefixes=['10', '14'],
category_samples_threshold=2,
unit='count')
comparison_df = comparison_df.sort_index()
assert comparison_df.index.name == 'Mutation'
assert ['Sequence', 'Position', 'Deletion', 'Insertion', 'Type', 'Total',
'14_count',
'14_total',
'Annotation', 'Annotation_Impact',
'Gene_Name', 'Gene_ID', 'Feature_Type', 'Transcript_BioType',
'HGVS.c', 'HGVS.p', 'ID_HGVS.c', 'ID_HGVS.p', 'ID_HGVS_GN.c',
'ID_HGVS_GN.p'] == list(comparison_df.columns)
assert 177 == len(comparison_df)
assert {3} == set(comparison_df['Total'].tolist())
assert {2} == set(comparison_df['14_total'].tolist())
assert 2 == comparison_df.loc['NC_011083:140658:C:A', '14_count']
assert 'hgvs_gn:NC_011083:murF:p.Ala166Glu' == comparison_df.loc[
'NC_011083:140658:C:A', 'ID_HGVS_GN.p']
assert 0 == comparison_df.loc['NC_011083:4555461:T:TC', '14_count']
assert 'hgvs_gn:NC_011083:n.4555461_4555462insC' == comparison_df.loc[
'NC_011083:4555461:T:TC', 'ID_HGVS_GN.c']
assert 2 == comparison_df.loc['NC_011083:630556:G:A', '14_count']
assert 'hgvs_gn:NC_011083:SEHA_RS03545:p.Trp295*' == comparison_df.loc[
'NC_011083:630556:G:A', 'ID_HGVS_GN.p']
# Test 3 categories counts on dataframe query: dataframe column groupby
df = pd.DataFrame([
[sample_sh14_001.id, 'green'],
[sample_sh14_014.id, 'red'],
[sample_sh10_014.id, 'blue']
], columns=['Sample ID', 'Color'])
q = query(loaded_database_connection_annotations, universe='dataframe',
data_frame=df, sample_ids_column='Sample ID')
comparison_df = q.features_comparison(sample_categories='Color',
categories_kind='dataframe',
unit='count')
comparison_df = comparison_df.sort_index()
assert comparison_df.index.name == 'Mutation'
assert ['Sequence', 'Position', 'Deletion', 'Insertion', 'Type', 'Total',
'blue_count', 'green_count', 'red_count',
'blue_total', 'green_total', 'red_total',
'Annotation', 'Annotation_Impact',
'Gene_Name', 'Gene_ID', 'Feature_Type', 'Transcript_BioType',
'HGVS.c', 'HGVS.p', 'ID_HGVS.c', 'ID_HGVS.p', 'ID_HGVS_GN.c',
'ID_HGVS_GN.p'] == list(comparison_df.columns)
assert 177 == len(comparison_df)
assert {3} == set(comparison_df['Total'].tolist())
assert {1} == set(comparison_df['red_total'].tolist())
assert {1} == set(comparison_df['green_total'].tolist())
assert {1} == set(comparison_df['blue_total'].tolist())
assert 1 == comparison_df.loc['NC_011083:140658:C:A', 'blue_count']
assert 1 == comparison_df.loc['NC_011083:140658:C:A', 'red_count']
assert 1 == comparison_df.loc['NC_011083:140658:C:A', 'green_count']
assert 'hgvs_gn:NC_011083:murF:p.Ala166Glu' == comparison_df.loc[
'NC_011083:140658:C:A', 'ID_HGVS_GN.p']
assert 1 == comparison_df.loc['NC_011083:4555461:T:TC', 'blue_count']
assert 0 == comparison_df.loc['NC_011083:4555461:T:TC', 'red_count']
assert 0 == comparison_df.loc['NC_011083:4555461:T:TC', 'green_count']
assert 'hgvs_gn:NC_011083:n.4555461_4555462insC' == comparison_df.loc[
'NC_011083:4555461:T:TC', 'ID_HGVS_GN.c']
assert 0 == comparison_df.loc['NC_011083:630556:G:A', 'blue_count']
assert 1 == comparison_df.loc['NC_011083:630556:G:A', 'red_count']
assert 1 == comparison_df.loc['NC_011083:630556:G:A', 'green_count']
assert 'hgvs_gn:NC_011083:SEHA_RS03545:p.Trp295*' == comparison_df.loc[
'NC_011083:630556:G:A', 'ID_HGVS_GN.p']
# Test 3 categories counts on dataframe query: one category has NA
df = pd.DataFrame([
[sample_sh14_001.id, 'green'],
[sample_sh14_014.id, 'red'],
[sample_sh10_014.id, pd.NA]
], columns=['Sample ID', 'Color'])
q = query(loaded_database_connection_annotations, universe='dataframe',
data_frame=df, sample_ids_column='Sample ID')
comparison_df = q.features_comparison(sample_categories='Color',
categories_kind='dataframe',
unit='count')
comparison_df = comparison_df.sort_index()
assert comparison_df.index.name == 'Mutation'
assert ['Sequence', 'Position', 'Deletion', 'Insertion', 'Type', 'Total',
'green_count', 'red_count',
'green_total', 'red_total',
'Annotation', 'Annotation_Impact',
'Gene_Name', 'Gene_ID', 'Feature_Type', 'Transcript_BioType',
'HGVS.c', 'HGVS.p', 'ID_HGVS.c', 'ID_HGVS.p', 'ID_HGVS_GN.c',
'ID_HGVS_GN.p'] == list(comparison_df.columns)
assert 177 == len(comparison_df)
assert {3} == set(comparison_df['Total'].tolist())
assert {1} == set(comparison_df['red_total'].tolist())
assert {1} == set(comparison_df['green_total'].tolist())
assert 1 == comparison_df.loc['NC_011083:140658:C:A', 'red_count']
assert 1 == comparison_df.loc['NC_011083:140658:C:A', 'green_count']
assert 'hgvs_gn:NC_011083:murF:p.Ala166Glu' == comparison_df.loc[
'NC_011083:140658:C:A', 'ID_HGVS_GN.p']
assert 0 == comparison_df.loc['NC_011083:4555461:T:TC', 'red_count']
assert 0 == comparison_df.loc['NC_011083:4555461:T:TC', 'green_count']
assert 'hgvs_gn:NC_011083:n.4555461_4555462insC' == comparison_df.loc[
'NC_011083:4555461:T:TC', 'ID_HGVS_GN.c']
assert 1 == comparison_df.loc['NC_011083:630556:G:A', 'red_count']
assert 1 == comparison_df.loc['NC_011083:630556:G:A', 'green_count']
assert 'hgvs_gn:NC_011083:SEHA_RS03545:p.Trp295*' == comparison_df.loc[
'NC_011083:630556:G:A', 'ID_HGVS_GN.p']
def test_tofeaturesset_all(loaded_database_only_snippy: DataIndexConnection):
dfA = | pd.read_csv(snippy_all_dataframes['SampleA'], sep='\t') | pandas.read_csv |
#!/usr/bin/env python3.7
# Copyright [2020] EMBL-European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import numpy as np
import fnmatch #module for unix style pattern matching
import glob #module is used to retrieve files/pathnames matching a specified pattern
from yattag import Doc, indent
import argparse, hashlib, os, subprocess, sys, time
parser = argparse.ArgumentParser(prog='ena-metadata-xml-generator.py', formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
+ =================================================================================================================================== +
| European Nucleotide Archive (ENA) Analysis Submission Tool |
| |
| Tool to register study and sample metadata to an ENA project, mainly in the drag and drop tool context. |
|example: python3 metadata_xml_generator.py -u Webin-### -p 'password' -f <dir to the spreadsheet> -a <add/modify> -t <for test server|
+ =================================================================================================================================== +
""")
parser.add_argument('-u', '--username', help='Webin submission account username (e.g. Webin-XXXXX)', type=str, required=True)
parser.add_argument('-p', '--password', help='password for Webin submission account', type=str, required=True)
parser.add_argument('-t', '--test', help='Specify whether to use ENA test server for submission', action='store_true')
parser.add_argument('-f', '--file', help='path for the metadata spreadsheet', type=str, required=True)
parser.add_argument('-a', '--action', help='Specify the type of action needed ( ADD or MODIFY)', type=str, required=True)
args = parser.parse_args()
os.listdir(".") #list files and dirs in wd - make sure you are in the one where the user metadata spreadsheet will be found
files_xlsx = glob.glob(args.file) #should we accept other spreadsheet extensions?
"""
General trimming to the metadata in the spreadsheet and save it in a panda dataframe object
"""
def trimming_the_spreadsheet(df):
trimmed_df = df.iloc[3: ,].copy()
trimmed_df.insert(6,"submission_tool",'drag and drop uploader tool',allow_duplicates=True) #study #to inject constant into trimmed df
trimmed_df.insert(24,"submission_tool",'drag and drop uploader tool',allow_duplicates=True) #sample
trimmed_df.insert(26,"sample capture status",'active surveillance in response to outbreak',allow_duplicates=False)
trimmed_df.rename(columns = {'collecting institute':'collecting institution'}, inplace = True) #####temp fix for collecting institute error
trimmed_df.rename(columns={'collecting institute': 'collecting institution'}, inplace=True)
trimmed_df["release_date"] = pd.to_datetime(trimmed_df["release_date"], errors='coerce').dt.strftime("%Y-%m-%d")
trimmed_df["collection date"] = pd.to_datetime(trimmed_df["collection date"], errors='coerce').dt.strftime("%Y-%m-%d")
trimmed_df["receipt date"] = pd.to_datetime(trimmed_df["receipt date"], errors='coerce').dt.strftime("%Y-%m-%d")
trimmed_df['collection date'] = trimmed_df['collection date'].fillna('not provided')
return trimmed_df
"""
Write pandas dataframe object to study xml file
"""
def study_xml_generator(df):
doc, tag, text = Doc().tagtext()
xml_header = '<?xml version="1.0" encoding="UTF-8"?>'
df = df.loc[3: ,'study_alias':'release_date'] # trim the dataframe to the study section only
df = df.iloc[:, :-1]
modified_df = df.where(pd.notnull(df), None) # replace the nan with none values
doc.asis(xml_header)
with tag('STUDY_SET'):
for item in modified_df.to_dict('records'):
if item['study_alias'] != None:
cleaned_item_dict = {k: v for k, v in item.items() if v not in [None, ' ']} # remove all the none and " " values
with tag('STUDY', alias=cleaned_item_dict['study_alias']):
with tag('DESCRIPTOR'):
with tag("STUDY_TITLE"):
text(cleaned_item_dict['study_name'])
doc.stag('STUDY_TYPE', existing_study_type="Other")
with tag('STUDY_ABSTRACT'):
text(cleaned_item_dict['abstract'])
with tag('CENTER_PROJECT_NAME'):
text(cleaned_item_dict['short_description'])
with tag('STUDY_ATTRIBUTES'):
for header, object in cleaned_item_dict.items():
if header not in ['study_alias', 'email_address', 'center_name', 'study_name',
'short_description', 'abstract']:
with tag("STUDY_ATTRIBUTE"):
with tag("TAG"):
text(header)
with tag("VALUE"):
text(object)
result_study = indent(
doc.getvalue(),
indent_text=False
)
with open("study.xml", "w") as f:
f.write(result_study)
"""
Write pandas dataframe object to sample xml file
"""
def sample_xml_generator(df):
doc, tag, text = Doc().tagtext()
xml_header = '<?xml version="1.0" encoding="UTF-8"?>'
df = df.loc[3:, 'sample_alias':'experiment_name'] # trim the dataframe to the sample section including the "experiment name" to include any user defined fields
df = df.iloc[:, :-1] # remove the last column in the trimmed dataframe ( the "experiment name" column)
modified_df = df.where( | pd.notnull(df) | pandas.notnull |
#
# Copyright 2020 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Testing out the datacompy functionality
"""
import io
import logging
import sys
from datetime import datetime
from decimal import Decimal
from unittest import mock
import numpy as np
import pandas as pd
import pytest
from pandas.util.testing import assert_series_equal
from pytest import raises
import datacompy
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
def test_numeric_columns_equal_abs():
data = """a|b|expected
1|1|True
2|2.1|True
3|4|False
4|NULL|False
NULL|4|False
NULL|NULL|True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, abs_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_numeric_columns_equal_rel():
data = """a|b|expected
1|1|True
2|2.1|True
3|4|False
4|NULL|False
NULL|4|False
NULL|NULL|True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_string_columns_equal():
data = """a|b|expected
Hi|Hi|True
Yo|Yo|True
Hey|Hey |False
résumé|resume|False
résumé|résumé|True
💩|💩|True
💩|🤔|False
| |True
| |False
datacompy|DataComPy|False
something||False
|something|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_string_columns_equal_with_ignore_spaces():
data = """a|b|expected
Hi|Hi|True
Yo|Yo|True
Hey|Hey |True
résumé|resume|False
résumé|résumé|True
💩|💩|True
💩|🤔|False
| |True
| |True
datacompy|DataComPy|False
something||False
|something|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_string_columns_equal_with_ignore_spaces_and_case():
data = """a|b|expected
Hi|Hi|True
Yo|Yo|True
Hey|Hey |True
résumé|resume|False
résumé|résumé|True
💩|💩|True
💩|🤔|False
| |True
| |True
datacompy|DataComPy|True
something||False
|something|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(
df.a, df.b, rel_tol=0.2, ignore_spaces=True, ignore_case=True
)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_date_columns_equal():
data = """a|b|expected
2017-01-01|2017-01-01|True
2017-01-02|2017-01-02|True
2017-10-01|2017-10-10|False
2017-01-01||False
|2017-01-01|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
# First compare just the strings
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# Then compare converted to datetime objects
df["a"] = pd.to_datetime(df["a"])
df["b"] = pd.to_datetime(df["b"])
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# and reverse
actual_out_rev = datacompy.columns_equal(df.b, df.a, rel_tol=0.2)
assert_series_equal(expect_out, actual_out_rev, check_names=False)
def test_date_columns_equal_with_ignore_spaces():
data = """a|b|expected
2017-01-01|2017-01-01 |True
2017-01-02 |2017-01-02|True
2017-10-01 |2017-10-10 |False
2017-01-01||False
|2017-01-01|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
# First compare just the strings
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# Then compare converted to datetime objects
df["a"] = pd.to_datetime(df["a"])
df["b"] = pd.to_datetime(df["b"])
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# and reverse
actual_out_rev = datacompy.columns_equal(df.b, df.a, rel_tol=0.2, ignore_spaces=True)
assert_series_equal(expect_out, actual_out_rev, check_names=False)
def test_date_columns_equal_with_ignore_spaces_and_case():
data = """a|b|expected
2017-01-01|2017-01-01 |True
2017-01-02 |2017-01-02|True
2017-10-01 |2017-10-10 |False
2017-01-01||False
|2017-01-01|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
# First compare just the strings
actual_out = datacompy.columns_equal(
df.a, df.b, rel_tol=0.2, ignore_spaces=True, ignore_case=True
)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# Then compare converted to datetime objects
df["a"] = pd.to_datetime(df["a"])
df["b"] = pd.to_datetime(df["b"])
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# and reverse
actual_out_rev = datacompy.columns_equal(df.b, df.a, rel_tol=0.2, ignore_spaces=True)
assert_series_equal(expect_out, actual_out_rev, check_names=False)
def test_date_columns_unequal():
"""I want datetime fields to match with dates stored as strings
"""
df = pd.DataFrame([{"a": "2017-01-01", "b": "2017-01-02"}, {"a": "2017-01-01"}])
df["a_dt"] = pd.to_datetime(df["a"])
df["b_dt"] = pd.to_datetime(df["b"])
assert datacompy.columns_equal(df.a, df.a_dt).all()
assert datacompy.columns_equal(df.b, df.b_dt).all()
assert datacompy.columns_equal(df.a_dt, df.a).all()
assert datacompy.columns_equal(df.b_dt, df.b).all()
assert not datacompy.columns_equal(df.b_dt, df.a).any()
assert not datacompy.columns_equal(df.a_dt, df.b).any()
assert not datacompy.columns_equal(df.a, df.b_dt).any()
assert not datacompy.columns_equal(df.b, df.a_dt).any()
def test_bad_date_columns():
"""If strings can't be coerced into dates then it should be false for the
whole column.
"""
df = pd.DataFrame(
[{"a": "2017-01-01", "b": "2017-01-01"}, {"a": "2017-01-01", "b": "217-01-01"}]
)
df["a_dt"] = pd.to_datetime(df["a"])
assert not datacompy.columns_equal(df.a_dt, df.b).any()
def test_rounded_date_columns():
"""If strings can't be coerced into dates then it should be false for the
whole column.
"""
df = pd.DataFrame(
[
{"a": "2017-01-01", "b": "2017-01-01 00:00:00.000000", "exp": True},
{"a": "2017-01-01", "b": "2017-01-01 00:00:00.123456", "exp": False},
{"a": "2017-01-01", "b": "2017-01-01 00:00:01.000000", "exp": False},
{"a": "2017-01-01", "b": "2017-01-01 00:00:00", "exp": True},
]
)
df["a_dt"] = pd.to_datetime(df["a"])
actual = datacompy.columns_equal(df.a_dt, df.b)
expected = df["exp"]
assert_series_equal(actual, expected, check_names=False)
def test_decimal_float_columns_equal():
df = pd.DataFrame(
[
{"a": Decimal("1"), "b": 1, "expected": True},
{"a": Decimal("1.3"), "b": 1.3, "expected": True},
{"a": Decimal("1.000003"), "b": 1.000003, "expected": True},
{"a": Decimal("1.000000004"), "b": 1.000000003, "expected": False},
{"a": Decimal("1.3"), "b": 1.2, "expected": False},
{"a": np.nan, "b": np.nan, "expected": True},
{"a": np.nan, "b": 1, "expected": False},
{"a": Decimal("1"), "b": np.nan, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_decimal_float_columns_equal_rel():
df = pd.DataFrame(
[
{"a": Decimal("1"), "b": 1, "expected": True},
{"a": Decimal("1.3"), "b": 1.3, "expected": True},
{"a": Decimal("1.000003"), "b": 1.000003, "expected": True},
{"a": Decimal("1.000000004"), "b": 1.000000003, "expected": True},
{"a": Decimal("1.3"), "b": 1.2, "expected": False},
{"a": np.nan, "b": np.nan, "expected": True},
{"a": np.nan, "b": 1, "expected": False},
{"a": Decimal("1"), "b": np.nan, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b, abs_tol=0.001)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_decimal_columns_equal():
df = pd.DataFrame(
[
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": Decimal("1.3"), "b": Decimal("1.3"), "expected": True},
{"a": Decimal("1.000003"), "b": Decimal("1.000003"), "expected": True},
{"a": Decimal("1.000000004"), "b": Decimal("1.000000003"), "expected": False},
{"a": Decimal("1.3"), "b": Decimal("1.2"), "expected": False},
{"a": np.nan, "b": np.nan, "expected": True},
{"a": np.nan, "b": Decimal("1"), "expected": False},
{"a": Decimal("1"), "b": np.nan, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_decimal_columns_equal_rel():
df = pd.DataFrame(
[
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": Decimal("1.3"), "b": Decimal("1.3"), "expected": True},
{"a": Decimal("1.000003"), "b": Decimal("1.000003"), "expected": True},
{"a": Decimal("1.000000004"), "b": Decimal("1.000000003"), "expected": True},
{"a": Decimal("1.3"), "b": Decimal("1.2"), "expected": False},
{"a": np.nan, "b": np.nan, "expected": True},
{"a": np.nan, "b": Decimal("1"), "expected": False},
{"a": Decimal("1"), "b": np.nan, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b, abs_tol=0.001)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_infinity_and_beyond():
df = pd.DataFrame(
[
{"a": np.inf, "b": np.inf, "expected": True},
{"a": -np.inf, "b": -np.inf, "expected": True},
{"a": -np.inf, "b": np.inf, "expected": False},
{"a": np.inf, "b": -np.inf, "expected": False},
{"a": 1, "b": 1, "expected": True},
{"a": 1, "b": 0, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_mixed_column():
df = pd.DataFrame(
[
{"a": "hi", "b": "hi", "expected": True},
{"a": 1, "b": 1, "expected": True},
{"a": np.inf, "b": np.inf, "expected": True},
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": 1, "b": "1", "expected": False},
{"a": 1, "b": "yo", "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b)
expect_out = df["expected"]
| assert_series_equal(expect_out, actual_out, check_names=False) | pandas.util.testing.assert_series_equal |
import pandas as pd
from pandas import Period, offsets
from pandas.util import testing as tm
from pandas.tseries.frequencies import _period_code_map
class TestFreqConversion(tm.TestCase):
"Test frequency conversion of date objects"
def test_asfreq_corner(self):
val = Period(freq='A', year=2007)
result1 = val.asfreq('5t')
result2 = val.asfreq('t')
expected = Period('2007-12-31 23:59', freq='t')
self.assertEqual(result1.ordinal, expected.ordinal)
self.assertEqual(result1.freqstr, '5T')
self.assertEqual(result2.ordinal, expected.ordinal)
self.assertEqual(result2.freqstr, 'T')
def test_conv_annual(self):
# frequency conversion tests: from Annual Frequency
ival_A = Period(freq='A', year=2007)
ival_AJAN = Period(freq="A-JAN", year=2007)
ival_AJUN = Period(freq="A-JUN", year=2007)
ival_ANOV = Period(freq="A-NOV", year=2007)
ival_A_to_Q_start = Period(freq='Q', year=2007, quarter=1)
ival_A_to_Q_end = Period(freq='Q', year=2007, quarter=4)
ival_A_to_M_start = Period(freq='M', year=2007, month=1)
ival_A_to_M_end = Period(freq='M', year=2007, month=12)
ival_A_to_W_start = Period(freq='W', year=2007, month=1, day=1)
ival_A_to_W_end = Period(freq='W', year=2007, month=12, day=31)
ival_A_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_A_to_B_end = Period(freq='B', year=2007, month=12, day=31)
ival_A_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_A_to_D_end = Period(freq='D', year=2007, month=12, day=31)
ival_A_to_H_start = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_A_to_H_end = Period(freq='H', year=2007, month=12, day=31,
hour=23)
ival_A_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_A_to_T_end = Period(freq='Min', year=2007, month=12, day=31,
hour=23, minute=59)
ival_A_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_A_to_S_end = Period(freq='S', year=2007, month=12, day=31,
hour=23, minute=59, second=59)
ival_AJAN_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_AJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_AJUN_to_D_end = Period(freq='D', year=2007, month=6, day=30)
ival_AJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_ANOV_to_D_end = Period(freq='D', year=2007, month=11, day=30)
ival_ANOV_to_D_start = Period(freq='D', year=2006, month=12, day=1)
self.assertEqual(ival_A.asfreq('Q', 'S'), ival_A_to_Q_start)
self.assertEqual(ival_A.asfreq('Q', 'e'), ival_A_to_Q_end)
self.assertEqual(ival_A.asfreq('M', 's'), ival_A_to_M_start)
self.assertEqual(ival_A.asfreq('M', 'E'), ival_A_to_M_end)
self.assertEqual(ival_A.asfreq('W', 'S'), ival_A_to_W_start)
self.assertEqual(ival_A.asfreq('W', 'E'), ival_A_to_W_end)
self.assertEqual(ival_A.asfreq('B', 'S'), ival_A_to_B_start)
self.assertEqual(ival_A.asfreq('B', 'E'), ival_A_to_B_end)
self.assertEqual(ival_A.asfreq('D', 'S'), ival_A_to_D_start)
self.assertEqual(ival_A.asfreq('D', 'E'), ival_A_to_D_end)
self.assertEqual(ival_A.asfreq('H', 'S'), ival_A_to_H_start)
self.assertEqual(ival_A.asfreq('H', 'E'), ival_A_to_H_end)
self.assertEqual(ival_A.asfreq('min', 'S'), ival_A_to_T_start)
self.assertEqual(ival_A.asfreq('min', 'E'), ival_A_to_T_end)
self.assertEqual(ival_A.asfreq('T', 'S'), ival_A_to_T_start)
self.assertEqual(ival_A.asfreq('T', 'E'), ival_A_to_T_end)
self.assertEqual(ival_A.asfreq('S', 'S'), ival_A_to_S_start)
self.assertEqual(ival_A.asfreq('S', 'E'), ival_A_to_S_end)
self.assertEqual(ival_AJAN.asfreq('D', 'S'), ival_AJAN_to_D_start)
self.assertEqual(ival_AJAN.asfreq('D', 'E'), ival_AJAN_to_D_end)
self.assertEqual(ival_AJUN.asfreq('D', 'S'), ival_AJUN_to_D_start)
self.assertEqual(ival_AJUN.asfreq('D', 'E'), ival_AJUN_to_D_end)
self.assertEqual(ival_ANOV.asfreq('D', 'S'), ival_ANOV_to_D_start)
self.assertEqual(ival_ANOV.asfreq('D', 'E'), ival_ANOV_to_D_end)
self.assertEqual(ival_A.asfreq('A'), ival_A)
def test_conv_quarterly(self):
# frequency conversion tests: from Quarterly Frequency
ival_Q = Period(freq='Q', year=2007, quarter=1)
ival_Q_end_of_year = Period(freq='Q', year=2007, quarter=4)
ival_QEJAN = Period(freq="Q-JAN", year=2007, quarter=1)
ival_QEJUN = Period(freq="Q-JUN", year=2007, quarter=1)
ival_Q_to_A = Period(freq='A', year=2007)
ival_Q_to_M_start = Period(freq='M', year=2007, month=1)
ival_Q_to_M_end = Period(freq='M', year=2007, month=3)
ival_Q_to_W_start = Period(freq='W', year=2007, month=1, day=1)
ival_Q_to_W_end = Period(freq='W', year=2007, month=3, day=31)
ival_Q_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_Q_to_B_end = Period(freq='B', year=2007, month=3, day=30)
ival_Q_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_Q_to_D_end = Period(freq='D', year=2007, month=3, day=31)
ival_Q_to_H_start = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_Q_to_H_end = Period(freq='H', year=2007, month=3, day=31, hour=23)
ival_Q_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_Q_to_T_end = Period(freq='Min', year=2007, month=3, day=31,
hour=23, minute=59)
ival_Q_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_Q_to_S_end = Period(freq='S', year=2007, month=3, day=31, hour=23,
minute=59, second=59)
ival_QEJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_QEJAN_to_D_end = Period(freq='D', year=2006, month=4, day=30)
ival_QEJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_QEJUN_to_D_end = Period(freq='D', year=2006, month=9, day=30)
self.assertEqual(ival_Q.asfreq('A'), ival_Q_to_A)
self.assertEqual(ival_Q_end_of_year.asfreq('A'), ival_Q_to_A)
self.assertEqual(ival_Q.asfreq('M', 'S'), ival_Q_to_M_start)
self.assertEqual(ival_Q.asfreq('M', 'E'), ival_Q_to_M_end)
self.assertEqual(ival_Q.asfreq('W', 'S'), ival_Q_to_W_start)
self.assertEqual(ival_Q.asfreq('W', 'E'), ival_Q_to_W_end)
self.assertEqual(ival_Q.asfreq('B', 'S'), ival_Q_to_B_start)
self.assertEqual(ival_Q.asfreq('B', 'E'), ival_Q_to_B_end)
self.assertEqual(ival_Q.asfreq('D', 'S'), ival_Q_to_D_start)
self.assertEqual(ival_Q.asfreq('D', 'E'), ival_Q_to_D_end)
self.assertEqual(ival_Q.asfreq('H', 'S'), ival_Q_to_H_start)
self.assertEqual(ival_Q.asfreq('H', 'E'), ival_Q_to_H_end)
self.assertEqual(ival_Q.asfreq('Min', 'S'), ival_Q_to_T_start)
self.assertEqual(ival_Q.asfreq('Min', 'E'), ival_Q_to_T_end)
self.assertEqual(ival_Q.asfreq('S', 'S'), ival_Q_to_S_start)
self.assertEqual(ival_Q.asfreq('S', 'E'), ival_Q_to_S_end)
self.assertEqual(ival_QEJAN.asfreq('D', 'S'), ival_QEJAN_to_D_start)
self.assertEqual(ival_QEJAN.asfreq('D', 'E'), ival_QEJAN_to_D_end)
self.assertEqual(ival_QEJUN.asfreq('D', 'S'), ival_QEJUN_to_D_start)
self.assertEqual(ival_QEJUN.asfreq('D', 'E'), ival_QEJUN_to_D_end)
self.assertEqual(ival_Q.asfreq('Q'), ival_Q)
def test_conv_monthly(self):
# frequency conversion tests: from Monthly Frequency
ival_M = Period(freq='M', year=2007, month=1)
ival_M_end_of_year = Period(freq='M', year=2007, month=12)
ival_M_end_of_quarter = Period(freq='M', year=2007, month=3)
ival_M_to_A = Period(freq='A', year=2007)
ival_M_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_M_to_W_start = Period(freq='W', year=2007, month=1, day=1)
ival_M_to_W_end = Period(freq='W', year=2007, month=1, day=31)
ival_M_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_M_to_B_end = Period(freq='B', year=2007, month=1, day=31)
ival_M_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_M_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_M_to_H_start = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_M_to_H_end = Period(freq='H', year=2007, month=1, day=31, hour=23)
ival_M_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_M_to_T_end = Period(freq='Min', year=2007, month=1, day=31,
hour=23, minute=59)
ival_M_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_M_to_S_end = Period(freq='S', year=2007, month=1, day=31, hour=23,
minute=59, second=59)
self.assertEqual(ival_M.asfreq('A'), ival_M_to_A)
self.assertEqual(ival_M_end_of_year.asfreq('A'), ival_M_to_A)
self.assertEqual(ival_M.asfreq('Q'), ival_M_to_Q)
self.assertEqual(ival_M_end_of_quarter.asfreq('Q'), ival_M_to_Q)
self.assertEqual(ival_M.asfreq('W', 'S'), ival_M_to_W_start)
self.assertEqual(ival_M.asfreq('W', 'E'), ival_M_to_W_end)
self.assertEqual(ival_M.asfreq('B', 'S'), ival_M_to_B_start)
self.assertEqual(ival_M.asfreq('B', 'E'), ival_M_to_B_end)
self.assertEqual(ival_M.asfreq('D', 'S'), ival_M_to_D_start)
self.assertEqual(ival_M.asfreq('D', 'E'), ival_M_to_D_end)
self.assertEqual(ival_M.asfreq('H', 'S'), ival_M_to_H_start)
self.assertEqual(ival_M.asfreq('H', 'E'), ival_M_to_H_end)
self.assertEqual(ival_M.asfreq('Min', 'S'), ival_M_to_T_start)
self.assertEqual(ival_M.asfreq('Min', 'E'), ival_M_to_T_end)
self.assertEqual(ival_M.asfreq('S', 'S'), ival_M_to_S_start)
self.assertEqual(ival_M.asfreq('S', 'E'), ival_M_to_S_end)
self.assertEqual(ival_M.asfreq('M'), ival_M)
def test_conv_weekly(self):
# frequency conversion tests: from Weekly Frequency
ival_W = Period(freq='W', year=2007, month=1, day=1)
ival_WSUN = Period(freq='W', year=2007, month=1, day=7)
ival_WSAT = Period(freq='W-SAT', year=2007, month=1, day=6)
ival_WFRI = Period(freq='W-FRI', year=2007, month=1, day=5)
ival_WTHU = Period(freq='W-THU', year=2007, month=1, day=4)
ival_WWED = Period(freq='W-WED', year=2007, month=1, day=3)
ival_WTUE = Period(freq='W-TUE', year=2007, month=1, day=2)
ival_WMON = Period(freq='W-MON', year=2007, month=1, day=1)
ival_WSUN_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_WSUN_to_D_end = Period(freq='D', year=2007, month=1, day=7)
ival_WSAT_to_D_start = Period(freq='D', year=2006, month=12, day=31)
ival_WSAT_to_D_end = Period(freq='D', year=2007, month=1, day=6)
ival_WFRI_to_D_start = Period(freq='D', year=2006, month=12, day=30)
ival_WFRI_to_D_end = Period(freq='D', year=2007, month=1, day=5)
ival_WTHU_to_D_start = Period(freq='D', year=2006, month=12, day=29)
ival_WTHU_to_D_end = Period(freq='D', year=2007, month=1, day=4)
ival_WWED_to_D_start = Period(freq='D', year=2006, month=12, day=28)
ival_WWED_to_D_end = Period(freq='D', year=2007, month=1, day=3)
ival_WTUE_to_D_start = Period(freq='D', year=2006, month=12, day=27)
ival_WTUE_to_D_end = Period(freq='D', year=2007, month=1, day=2)
ival_WMON_to_D_start = Period(freq='D', year=2006, month=12, day=26)
ival_WMON_to_D_end = Period(freq='D', year=2007, month=1, day=1)
ival_W_end_of_year = Period(freq='W', year=2007, month=12, day=31)
ival_W_end_of_quarter = Period(freq='W', year=2007, month=3, day=31)
ival_W_end_of_month = Period(freq='W', year=2007, month=1, day=31)
ival_W_to_A = Period(freq='A', year=2007)
ival_W_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_W_to_M = Period(freq='M', year=2007, month=1)
if Period(freq='D', year=2007, month=12, day=31).weekday == 6:
ival_W_to_A_end_of_year = Period(freq='A', year=2007)
else:
ival_W_to_A_end_of_year = Period(freq='A', year=2008)
if Period(freq='D', year=2007, month=3, day=31).weekday == 6:
ival_W_to_Q_end_of_quarter = Period(freq='Q', year=2007, quarter=1)
else:
ival_W_to_Q_end_of_quarter = Period(freq='Q', year=2007, quarter=2)
if Period(freq='D', year=2007, month=1, day=31).weekday == 6:
ival_W_to_M_end_of_month = Period(freq='M', year=2007, month=1)
else:
ival_W_to_M_end_of_month = Period(freq='M', year=2007, month=2)
ival_W_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_W_to_B_end = Period(freq='B', year=2007, month=1, day=5)
ival_W_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_W_to_D_end = Period(freq='D', year=2007, month=1, day=7)
ival_W_to_H_start = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_W_to_H_end = Period(freq='H', year=2007, month=1, day=7, hour=23)
ival_W_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_W_to_T_end = Period(freq='Min', year=2007, month=1, day=7,
hour=23, minute=59)
ival_W_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_W_to_S_end = Period(freq='S', year=2007, month=1, day=7, hour=23,
minute=59, second=59)
self.assertEqual(ival_W.asfreq('A'), ival_W_to_A)
self.assertEqual(ival_W_end_of_year.asfreq('A'),
ival_W_to_A_end_of_year)
self.assertEqual(ival_W.asfreq('Q'), ival_W_to_Q)
self.assertEqual(ival_W_end_of_quarter.asfreq('Q'),
ival_W_to_Q_end_of_quarter)
self.assertEqual(ival_W.asfreq('M'), ival_W_to_M)
self.assertEqual(ival_W_end_of_month.asfreq('M'),
ival_W_to_M_end_of_month)
self.assertEqual(ival_W.asfreq('B', 'S'), ival_W_to_B_start)
self.assertEqual(ival_W.asfreq('B', 'E'), ival_W_to_B_end)
self.assertEqual(ival_W.asfreq('D', 'S'), ival_W_to_D_start)
self.assertEqual(ival_W.asfreq('D', 'E'), ival_W_to_D_end)
self.assertEqual(ival_WSUN.asfreq('D', 'S'), ival_WSUN_to_D_start)
self.assertEqual(ival_WSUN.asfreq('D', 'E'), ival_WSUN_to_D_end)
self.assertEqual(ival_WSAT.asfreq('D', 'S'), ival_WSAT_to_D_start)
self.assertEqual(ival_WSAT.asfreq('D', 'E'), ival_WSAT_to_D_end)
self.assertEqual(ival_WFRI.asfreq('D', 'S'), ival_WFRI_to_D_start)
self.assertEqual(ival_WFRI.asfreq('D', 'E'), ival_WFRI_to_D_end)
self.assertEqual(ival_WTHU.asfreq('D', 'S'), ival_WTHU_to_D_start)
self.assertEqual(ival_WTHU.asfreq('D', 'E'), ival_WTHU_to_D_end)
self.assertEqual(ival_WWED.asfreq('D', 'S'), ival_WWED_to_D_start)
self.assertEqual(ival_WWED.asfreq('D', 'E'), ival_WWED_to_D_end)
self.assertEqual(ival_WTUE.asfreq('D', 'S'), ival_WTUE_to_D_start)
self.assertEqual(ival_WTUE.asfreq('D', 'E'), ival_WTUE_to_D_end)
self.assertEqual(ival_WMON.asfreq('D', 'S'), ival_WMON_to_D_start)
self.assertEqual(ival_WMON.asfreq('D', 'E'), ival_WMON_to_D_end)
self.assertEqual(ival_W.asfreq('H', 'S'), ival_W_to_H_start)
self.assertEqual(ival_W.asfreq('H', 'E'), ival_W_to_H_end)
self.assertEqual(ival_W.asfreq('Min', 'S'), ival_W_to_T_start)
self.assertEqual(ival_W.asfreq('Min', 'E'), ival_W_to_T_end)
self.assertEqual(ival_W.asfreq('S', 'S'), ival_W_to_S_start)
self.assertEqual(ival_W.asfreq('S', 'E'), ival_W_to_S_end)
self.assertEqual(ival_W.asfreq('W'), ival_W)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
ival_W.asfreq('WK')
def test_conv_weekly_legacy(self):
# frequency conversion tests: from Weekly Frequency
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
Period(freq='WK', year=2007, month=1, day=1)
with self.assertRaisesRegexp(ValueError, msg):
Period(freq='WK-SAT', year=2007, month=1, day=6)
with self.assertRaisesRegexp(ValueError, msg):
Period(freq='WK-FRI', year=2007, month=1, day=5)
with self.assertRaisesRegexp(ValueError, msg):
Period(freq='WK-THU', year=2007, month=1, day=4)
with self.assertRaisesRegexp(ValueError, msg):
Period(freq='WK-WED', year=2007, month=1, day=3)
with self.assertRaisesRegexp(ValueError, msg):
Period(freq='WK-TUE', year=2007, month=1, day=2)
with self.assertRaisesRegexp(ValueError, msg):
Period(freq='WK-MON', year=2007, month=1, day=1)
def test_conv_business(self):
# frequency conversion tests: from Business Frequency"
ival_B = Period(freq='B', year=2007, month=1, day=1)
ival_B_end_of_year = Period(freq='B', year=2007, month=12, day=31)
ival_B_end_of_quarter = Period(freq='B', year=2007, month=3, day=30)
ival_B_end_of_month = Period(freq='B', year=2007, month=1, day=31)
ival_B_end_of_week = Period(freq='B', year=2007, month=1, day=5)
ival_B_to_A = Period(freq='A', year=2007)
ival_B_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_B_to_M = Period(freq='M', year=2007, month=1)
ival_B_to_W = Period(freq='W', year=2007, month=1, day=7)
ival_B_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_B_to_H_start = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_B_to_H_end = Period(freq='H', year=2007, month=1, day=1, hour=23)
ival_B_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_B_to_T_end = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_B_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_B_to_S_end = Period(freq='S', year=2007, month=1, day=1, hour=23,
minute=59, second=59)
self.assertEqual(ival_B.asfreq('A'), ival_B_to_A)
self.assertEqual(ival_B_end_of_year.asfreq('A'), ival_B_to_A)
self.assertEqual(ival_B.asfreq('Q'), ival_B_to_Q)
self.assertEqual(ival_B_end_of_quarter.asfreq('Q'), ival_B_to_Q)
self.assertEqual(ival_B.asfreq('M'), ival_B_to_M)
self.assertEqual(ival_B_end_of_month.asfreq('M'), ival_B_to_M)
self.assertEqual(ival_B.asfreq('W'), ival_B_to_W)
self.assertEqual(ival_B_end_of_week.asfreq('W'), ival_B_to_W)
self.assertEqual(ival_B.asfreq('D'), ival_B_to_D)
self.assertEqual(ival_B.asfreq('H', 'S'), ival_B_to_H_start)
self.assertEqual(ival_B.asfreq('H', 'E'), ival_B_to_H_end)
self.assertEqual(ival_B.asfreq('Min', 'S'), ival_B_to_T_start)
self.assertEqual(ival_B.asfreq('Min', 'E'), ival_B_to_T_end)
self.assertEqual(ival_B.asfreq('S', 'S'), ival_B_to_S_start)
self.assertEqual(ival_B.asfreq('S', 'E'), ival_B_to_S_end)
self.assertEqual(ival_B.asfreq('B'), ival_B)
def test_conv_daily(self):
# frequency conversion tests: from Business Frequency"
ival_D = Period(freq='D', year=2007, month=1, day=1)
ival_D_end_of_year = Period(freq='D', year=2007, month=12, day=31)
ival_D_end_of_quarter = Period(freq='D', year=2007, month=3, day=31)
ival_D_end_of_month = Period(freq='D', year=2007, month=1, day=31)
ival_D_end_of_week = Period(freq='D', year=2007, month=1, day=7)
ival_D_friday = Period(freq='D', year=2007, month=1, day=5)
ival_D_saturday = Period(freq='D', year=2007, month=1, day=6)
ival_D_sunday = Period(freq='D', year=2007, month=1, day=7)
# TODO: unused?
# ival_D_monday = Period(freq='D', year=2007, month=1, day=8)
ival_B_friday = Period(freq='B', year=2007, month=1, day=5)
ival_B_monday = Period(freq='B', year=2007, month=1, day=8)
ival_D_to_A = Period(freq='A', year=2007)
ival_Deoq_to_AJAN = Period(freq='A-JAN', year=2008)
ival_Deoq_to_AJUN = Period(freq='A-JUN', year=2007)
ival_Deoq_to_ADEC = Period(freq='A-DEC', year=2007)
ival_D_to_QEJAN = Period(freq="Q-JAN", year=2007, quarter=4)
ival_D_to_QEJUN = Period(freq="Q-JUN", year=2007, quarter=3)
ival_D_to_QEDEC = Period(freq="Q-DEC", year=2007, quarter=1)
ival_D_to_M = Period(freq='M', year=2007, month=1)
ival_D_to_W = Period(freq='W', year=2007, month=1, day=7)
ival_D_to_H_start = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_D_to_H_end = Period(freq='H', year=2007, month=1, day=1, hour=23)
ival_D_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_D_to_T_end = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_D_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_D_to_S_end = Period(freq='S', year=2007, month=1, day=1, hour=23,
minute=59, second=59)
self.assertEqual(ival_D.asfreq('A'), ival_D_to_A)
self.assertEqual(ival_D_end_of_quarter.asfreq('A-JAN'),
ival_Deoq_to_AJAN)
self.assertEqual(ival_D_end_of_quarter.asfreq('A-JUN'),
ival_Deoq_to_AJUN)
self.assertEqual(ival_D_end_of_quarter.asfreq('A-DEC'),
ival_Deoq_to_ADEC)
self.assertEqual(ival_D_end_of_year.asfreq('A'), ival_D_to_A)
self.assertEqual(ival_D_end_of_quarter.asfreq('Q'), ival_D_to_QEDEC)
self.assertEqual(ival_D.asfreq("Q-JAN"), ival_D_to_QEJAN)
self.assertEqual(ival_D.asfreq("Q-JUN"), ival_D_to_QEJUN)
self.assertEqual(ival_D.asfreq("Q-DEC"), ival_D_to_QEDEC)
self.assertEqual(ival_D.asfreq('M'), ival_D_to_M)
self.assertEqual(ival_D_end_of_month.asfreq('M'), ival_D_to_M)
self.assertEqual(ival_D.asfreq('W'), ival_D_to_W)
self.assertEqual(ival_D_end_of_week.asfreq('W'), ival_D_to_W)
self.assertEqual(ival_D_friday.asfreq('B'), ival_B_friday)
self.assertEqual(ival_D_saturday.asfreq('B', 'S'), ival_B_friday)
self.assertEqual(ival_D_saturday.asfreq('B', 'E'), ival_B_monday)
self.assertEqual(ival_D_sunday.asfreq('B', 'S'), ival_B_friday)
self.assertEqual(ival_D_sunday.asfreq('B', 'E'), ival_B_monday)
self.assertEqual(ival_D.asfreq('H', 'S'), ival_D_to_H_start)
self.assertEqual(ival_D.asfreq('H', 'E'), ival_D_to_H_end)
self.assertEqual(ival_D.asfreq('Min', 'S'), ival_D_to_T_start)
self.assertEqual(ival_D.asfreq('Min', 'E'), ival_D_to_T_end)
self.assertEqual(ival_D.asfreq('S', 'S'), ival_D_to_S_start)
self.assertEqual(ival_D.asfreq('S', 'E'), ival_D_to_S_end)
self.assertEqual(ival_D.asfreq('D'), ival_D)
def test_conv_hourly(self):
# frequency conversion tests: from Hourly Frequency"
ival_H = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_H_end_of_year = Period(freq='H', year=2007, month=12, day=31,
hour=23)
ival_H_end_of_quarter = Period(freq='H', year=2007, month=3, day=31,
hour=23)
ival_H_end_of_month = Period(freq='H', year=2007, month=1, day=31,
hour=23)
ival_H_end_of_week = Period(freq='H', year=2007, month=1, day=7,
hour=23)
ival_H_end_of_day = Period(freq='H', year=2007, month=1, day=1,
hour=23)
ival_H_end_of_bus = Period(freq='H', year=2007, month=1, day=1,
hour=23)
ival_H_to_A = Period(freq='A', year=2007)
ival_H_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_H_to_M = Period(freq='M', year=2007, month=1)
ival_H_to_W = Period(freq='W', year=2007, month=1, day=7)
ival_H_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_H_to_B = Period(freq='B', year=2007, month=1, day=1)
ival_H_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_H_to_T_end = Period(freq='Min', year=2007, month=1, day=1, hour=0,
minute=59)
ival_H_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_H_to_S_end = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=59, second=59)
self.assertEqual(ival_H.asfreq('A'), ival_H_to_A)
self.assertEqual(ival_H_end_of_year.asfreq('A'), ival_H_to_A)
self.assertEqual(ival_H.asfreq('Q'), ival_H_to_Q)
self.assertEqual(ival_H_end_of_quarter.asfreq('Q'), ival_H_to_Q)
self.assertEqual(ival_H.asfreq('M'), ival_H_to_M)
self.assertEqual(ival_H_end_of_month.asfreq('M'), ival_H_to_M)
self.assertEqual(ival_H.asfreq('W'), ival_H_to_W)
self.assertEqual(ival_H_end_of_week.asfreq('W'), ival_H_to_W)
self.assertEqual(ival_H.asfreq('D'), ival_H_to_D)
self.assertEqual(ival_H_end_of_day.asfreq('D'), ival_H_to_D)
self.assertEqual(ival_H.asfreq('B'), ival_H_to_B)
self.assertEqual(ival_H_end_of_bus.asfreq('B'), ival_H_to_B)
self.assertEqual(ival_H.asfreq('Min', 'S'), ival_H_to_T_start)
self.assertEqual(ival_H.asfreq('Min', 'E'), ival_H_to_T_end)
self.assertEqual(ival_H.asfreq('S', 'S'), ival_H_to_S_start)
self.assertEqual(ival_H.asfreq('S', 'E'), ival_H_to_S_end)
self.assertEqual(ival_H.asfreq('H'), ival_H)
def test_conv_minutely(self):
# frequency conversion tests: from Minutely Frequency"
ival_T = Period(freq='Min', year=2007, month=1, day=1, hour=0,
minute=0)
ival_T_end_of_year = Period(freq='Min', year=2007, month=12, day=31,
hour=23, minute=59)
ival_T_end_of_quarter = Period(freq='Min', year=2007, month=3, day=31,
hour=23, minute=59)
ival_T_end_of_month = Period(freq='Min', year=2007, month=1, day=31,
hour=23, minute=59)
ival_T_end_of_week = Period(freq='Min', year=2007, month=1, day=7,
hour=23, minute=59)
ival_T_end_of_day = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_T_end_of_bus = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_T_end_of_hour = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=59)
ival_T_to_A = Period(freq='A', year=2007)
ival_T_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_T_to_M = Period(freq='M', year=2007, month=1)
ival_T_to_W = Period(freq='W', year=2007, month=1, day=7)
ival_T_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_T_to_B = Period(freq='B', year=2007, month=1, day=1)
ival_T_to_H = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_T_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_T_to_S_end = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=59)
self.assertEqual(ival_T.asfreq('A'), ival_T_to_A)
self.assertEqual(ival_T_end_of_year.asfreq('A'), ival_T_to_A)
self.assertEqual(ival_T.asfreq('Q'), ival_T_to_Q)
self.assertEqual(ival_T_end_of_quarter.asfreq('Q'), ival_T_to_Q)
self.assertEqual(ival_T.asfreq('M'), ival_T_to_M)
self.assertEqual(ival_T_end_of_month.asfreq('M'), ival_T_to_M)
self.assertEqual(ival_T.asfreq('W'), ival_T_to_W)
self.assertEqual(ival_T_end_of_week.asfreq('W'), ival_T_to_W)
self.assertEqual(ival_T.asfreq('D'), ival_T_to_D)
self.assertEqual(ival_T_end_of_day.asfreq('D'), ival_T_to_D)
self.assertEqual(ival_T.asfreq('B'), ival_T_to_B)
self.assertEqual(ival_T_end_of_bus.asfreq('B'), ival_T_to_B)
self.assertEqual(ival_T.asfreq('H'), ival_T_to_H)
self.assertEqual(ival_T_end_of_hour.asfreq('H'), ival_T_to_H)
self.assertEqual(ival_T.asfreq('S', 'S'), ival_T_to_S_start)
self.assertEqual(ival_T.asfreq('S', 'E'), ival_T_to_S_end)
self.assertEqual(ival_T.asfreq('Min'), ival_T)
def test_conv_secondly(self):
# frequency conversion tests: from Secondly Frequency"
ival_S = Period(freq='S', year=2007, month=1, day=1, hour=0, minute=0,
second=0)
ival_S_end_of_year = Period(freq='S', year=2007, month=12, day=31,
hour=23, minute=59, second=59)
ival_S_end_of_quarter = Period(freq='S', year=2007, month=3, day=31,
hour=23, minute=59, second=59)
ival_S_end_of_month = Period(freq='S', year=2007, month=1, day=31,
hour=23, minute=59, second=59)
ival_S_end_of_week = Period(freq='S', year=2007, month=1, day=7,
hour=23, minute=59, second=59)
ival_S_end_of_day = Period(freq='S', year=2007, month=1, day=1,
hour=23, minute=59, second=59)
ival_S_end_of_bus = Period(freq='S', year=2007, month=1, day=1,
hour=23, minute=59, second=59)
ival_S_end_of_hour = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=59, second=59)
ival_S_end_of_minute = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=59)
ival_S_to_A = Period(freq='A', year=2007)
ival_S_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_S_to_M = Period(freq='M', year=2007, month=1)
ival_S_to_W = Period(freq='W', year=2007, month=1, day=7)
ival_S_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_S_to_B = Period(freq='B', year=2007, month=1, day=1)
ival_S_to_H = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_S_to_T = Period(freq='Min', year=2007, month=1, day=1, hour=0,
minute=0)
self.assertEqual(ival_S.asfreq('A'), ival_S_to_A)
self.assertEqual(ival_S_end_of_year.asfreq('A'), ival_S_to_A)
self.assertEqual(ival_S.asfreq('Q'), ival_S_to_Q)
self.assertEqual(ival_S_end_of_quarter.asfreq('Q'), ival_S_to_Q)
self.assertEqual(ival_S.asfreq('M'), ival_S_to_M)
self.assertEqual(ival_S_end_of_month.asfreq('M'), ival_S_to_M)
self.assertEqual(ival_S.asfreq('W'), ival_S_to_W)
self.assertEqual(ival_S_end_of_week.asfreq('W'), ival_S_to_W)
self.assertEqual(ival_S.asfreq('D'), ival_S_to_D)
self.assertEqual(ival_S_end_of_day.asfreq('D'), ival_S_to_D)
self.assertEqual(ival_S.asfreq('B'), ival_S_to_B)
self.assertEqual(ival_S_end_of_bus.asfreq('B'), ival_S_to_B)
self.assertEqual(ival_S.asfreq('H'), ival_S_to_H)
self.assertEqual(ival_S_end_of_hour.asfreq('H'), ival_S_to_H)
self.assertEqual(ival_S.asfreq('Min'), ival_S_to_T)
self.assertEqual(ival_S_end_of_minute.asfreq('Min'), ival_S_to_T)
self.assertEqual(ival_S.asfreq('S'), ival_S)
def test_asfreq_mult(self):
# normal freq to mult freq
p = Period(freq='A', year=2007)
# ordinal will not change
for freq in ['3A', offsets.YearEnd(3)]:
result = p.asfreq(freq)
expected = | Period('2007', freq='3A') | pandas.Period |
#This code will take the data scrapped from both Reddit API and Coingecko API,
#transform and process it in order to get a unified dataframe which will be finally
#processed by the LSTM neural network in order to make the predictions.
#Import libraries
import pandas as pd
import transformers
from transformers import pipeline
# Importing Data
# Importing Reddit posts
df_posts = pd.read_csv(
"C:/Users/rober/Desktop/MIS COSAS/DSTI MASTER/SUBJECTS/PYTHON LABS/Crypto Trading Bot/Crypto_Trading_Bot-main/Crypto_Trading_Bot-main/Reddit_Scrapper/subreddit-comments-dl/dataset/finals/concat_file.csv")
#print(df_posts.head())
#print(df_posts.info())
# Importing financial data of cryptocurrencies (prices, volumes)
df_prices = | pd.read_csv(
"C:/Users/rober/Desktop/MIS COSAS/DSTI MASTER/SUBJECTS/PYTHON LABS/Crypto Trading Bot/prices.csv") | pandas.read_csv |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import (
datetime,
time,
timedelta,
)
from itertools import (
product,
starmap,
)
import operator
import warnings
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.offsets import shift_months
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
DateOffset,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
TimedeltaArray,
)
from pandas.core.ops import roperator
from pandas.tests.arithmetic.common import (
assert_cannot_add,
assert_invalid_addsub_type,
assert_invalid_comparison,
get_upcast_box,
)
# ------------------------------------------------------------------
# Comparisons
class TestDatetime64ArrayLikeComparisons:
# Comparison tests for datetime64 vectors fully parametrized over
# DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
# Test comparison with zero-dimensional array is unboxed
tz = tz_naive_fixture
box = box_with_array
dti = date_range("20130101", periods=3, tz=tz)
other = np.array(dti.to_numpy()[0])
dtarr = tm.box_expected(dti, box)
xbox = get_upcast_box(dtarr, other, True)
result = dtarr <= other
expected = np.array([True, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
"foo",
-1,
99,
4.0,
object(),
timedelta(days=2),
# GH#19800, GH#19301 datetime.date comparison raises to
# match DatetimeIndex/Timestamp. This also matches the behavior
# of stdlib datetime.datetime
datetime(2001, 1, 1).date(),
# GH#19301 None and NaN are *not* cast to NaT for comparisons
None,
np.nan,
],
)
def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):
# GH#22074, GH#15966
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
dtarr = tm.box_expected(rng, box_with_array)
assert_invalid_comparison(dtarr, other, box_with_array)
@pytest.mark.parametrize(
"other",
[
# GH#4968 invalid date/int comparisons
list(range(10)),
np.arange(10),
np.arange(10).astype(np.float32),
np.arange(10).astype(object),
pd.timedelta_range("1ns", periods=10).array,
np.array(pd.timedelta_range("1ns", periods=10)),
list(pd.timedelta_range("1ns", periods=10)),
pd.timedelta_range("1 Day", periods=10).astype(object),
pd.period_range("1971-01-01", freq="D", periods=10).array,
pd.period_range("1971-01-01", freq="D", periods=10).astype(object),
],
)
def test_dt64arr_cmp_arraylike_invalid(
self, other, tz_naive_fixture, box_with_array
):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data
obj = tm.box_expected(dta, box_with_array)
assert_invalid_comparison(obj, other, box_with_array)
def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="h", periods=5, tz=tz)._data
other = np.array([0, 1, 2, dta[3], Timedelta(days=1)])
result = dta == other
expected = np.array([False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = dta != other
tm.assert_numpy_array_equal(result, ~expected)
msg = "Invalid comparison between|Cannot compare type|not supported between"
with pytest.raises(TypeError, match=msg):
dta < other
with pytest.raises(TypeError, match=msg):
dta > other
with pytest.raises(TypeError, match=msg):
dta <= other
with pytest.raises(TypeError, match=msg):
dta >= other
def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
# GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
tz = tz_naive_fixture
box = box_with_array
ts = Timestamp("2021-01-01", tz=tz)
ser = Series([ts, NaT])
obj = tm.box_expected(ser, box)
xbox = get_upcast_box(obj, ts, True)
expected = Series([True, False], dtype=np.bool_)
expected = tm.box_expected(expected, xbox)
result = obj == ts
tm.assert_equal(result, expected)
class TestDatetime64SeriesComparison:
# TODO: moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize(
"pair",
[
(
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[NaT, NaT, Timestamp("2011-01-03")],
),
(
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[NaT, NaT, Timedelta("3 days")],
),
(
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
[NaT, NaT, Period("2011-03", freq="M")],
),
],
)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("dtype", [None, object])
@pytest.mark.parametrize(
"op, expected",
[
(operator.eq, Series([False, False, True])),
(operator.ne, Series([True, True, False])),
(operator.lt, Series([False, False, False])),
(operator.gt, Series([False, False, False])),
(operator.ge, Series([False, False, True])),
(operator.le, Series([False, False, True])),
],
)
def test_nat_comparisons(
self,
dtype,
index_or_series,
reverse,
pair,
op,
expected,
):
box = index_or_series
l, r = pair
if reverse:
# add lhs / rhs switched data
l, r = r, l
left = Series(l, dtype=dtype)
right = box(r, dtype=dtype)
result = op(left, right)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
],
)
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons_scalar(self, dtype, data, box_with_array):
box = box_with_array
left = Series(data, dtype=dtype)
left = tm.box_expected(left, box)
xbox = get_upcast_box(left, NaT, True)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left == NaT, expected)
tm.assert_equal(NaT == left, expected)
expected = [True, True, True]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left != NaT, expected)
tm.assert_equal(NaT != left, expected)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left < NaT, expected)
tm.assert_equal(NaT > left, expected)
tm.assert_equal(left <= NaT, expected)
tm.assert_equal(NaT >= left, expected)
tm.assert_equal(left > NaT, expected)
tm.assert_equal(NaT < left, expected)
tm.assert_equal(left >= NaT, expected)
tm.assert_equal(NaT <= left, expected)
@pytest.mark.parametrize("val", [datetime(2000, 1, 4), datetime(2000, 1, 5)])
def test_series_comparison_scalars(self, val):
series = Series(date_range("1/1/2000", periods=10))
result = series > val
expected = Series([x > val for x in series])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"left,right", [("lt", "gt"), ("le", "ge"), ("eq", "eq"), ("ne", "ne")]
)
def test_timestamp_compare_series(self, left, right):
# see gh-4982
# Make sure we can compare Timestamps on the right AND left hand side.
ser = Series(date_range("20010101", periods=10), name="dates")
s_nat = ser.copy(deep=True)
ser[0] = Timestamp("nat")
ser[3] = Timestamp("nat")
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# No NaT
expected = left_f(ser, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), ser)
tm.assert_series_equal(result, expected)
# NaT
expected = left_f(ser, Timestamp("nat"))
result = right_f(Timestamp("nat"), ser)
tm.assert_series_equal(result, expected)
# Compare to Timestamp with series containing NaT
expected = left_f(s_nat, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), s_nat)
tm.assert_series_equal(result, expected)
# Compare to NaT with series containing NaT
expected = left_f(s_nat, NaT)
result = right_f(NaT, s_nat)
tm.assert_series_equal(result, expected)
def test_dt64arr_timestamp_equality(self, box_with_array):
# GH#11034
ser = Series([Timestamp("2000-01-29 01:59:00"), Timestamp("2000-01-30"), NaT])
ser = tm.box_expected(ser, box_with_array)
xbox = get_upcast_box(ser, ser, True)
result = ser != ser
expected = tm.box_expected([False, False, True], xbox)
tm.assert_equal(result, expected)
warn = FutureWarning if box_with_array is pd.DataFrame else None
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[0]
expected = tm.box_expected([False, True, True], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[2]
expected = tm.box_expected([True, True, True], xbox)
tm.assert_equal(result, expected)
result = ser == ser
expected = tm.box_expected([True, True, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[0]
expected = tm.box_expected([True, False, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[2]
expected = tm.box_expected([False, False, False], xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"datetimelike",
[
Timestamp("20130101"),
datetime(2013, 1, 1),
np.datetime64("2013-01-01T00:00", "ns"),
],
)
@pytest.mark.parametrize(
"op,expected",
[
(operator.lt, [True, False, False, False]),
(operator.le, [True, True, False, False]),
(operator.eq, [False, True, False, False]),
(operator.gt, [False, False, False, True]),
],
)
def test_dt64_compare_datetime_scalar(self, datetimelike, op, expected):
# GH#17965, test for ability to compare datetime64[ns] columns
# to datetimelike
ser = Series(
[
Timestamp("20120101"),
Timestamp("20130101"),
np.nan,
Timestamp("20130103"),
],
name="A",
)
result = op(ser, datetimelike)
expected = Series(expected, name="A")
tm.assert_series_equal(result, expected)
class TestDatetimeIndexComparisons:
# TODO: moved from tests.indexes.test_base; parametrize and de-duplicate
def test_comparators(self, comparison_op):
index = tm.makeDateIndex(100)
element = index[len(index) // 2]
element = Timestamp(element).to_datetime64()
arr = np.array(index)
arr_result = comparison_op(arr, element)
index_result = comparison_op(index, element)
assert isinstance(index_result, np.ndarray)
tm.assert_numpy_array_equal(arr_result, index_result)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
def test_dti_cmp_datetimelike(self, other, tz_naive_fixture):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
if tz is not None:
if isinstance(other, np.datetime64):
# no tzaware version available
return
other = localize_pydatetime(other, dti.tzinfo)
result = dti == other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = dti > other
expected = np.array([False, True])
tm.assert_numpy_array_equal(result, expected)
result = dti >= other
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
result = dti < other
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
result = dti <= other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, object])
def test_dti_cmp_nat(self, dtype, box_with_array):
left = DatetimeIndex([Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")])
right = DatetimeIndex([NaT, NaT, Timestamp("2011-01-03")])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
xbox = get_upcast_box(left, right, True)
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = lhs != rhs
expected = np.array([True, True, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs == NaT, expected)
tm.assert_equal(NaT == rhs, expected)
expected = np.array([True, True, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs != NaT, expected)
tm.assert_equal(NaT != lhs, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs < NaT, expected)
tm.assert_equal(NaT > lhs, expected)
def test_dti_cmp_nat_behaves_like_float_cmp_nan(self):
fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
didx1 = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
didx2 = DatetimeIndex(
["2014-02-01", "2014-03-01", NaT, NaT, "2014-06-01", "2014-07-01"]
)
darr = np.array(
[
np.datetime64("2014-02-01 00:00"),
np.datetime64("2014-03-01 00:00"),
np.datetime64("nat"),
np.datetime64("nat"),
np.datetime64("2014-06-01 00:00"),
np.datetime64("2014-07-01 00:00"),
]
)
cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, np.nan), (didx1, NaT)]:
result = idx1 < val
expected = np.array([False, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, True, True, True, True])
tm.assert_numpy_array_equal(result, expected)
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
result = idx1 < val
expected = np.array([True, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
expected = np.array([False, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
expected = np.array([False, False, True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
expected = np.array([False, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, False, True, True, True])
tm.assert_numpy_array_equal(result, expected)
def test_comparison_tzawareness_compat(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
box = box_with_array
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box)
dz = tm.box_expected(dz, box)
if box is pd.DataFrame:
tolist = lambda x: x.astype(object).values.tolist()[0]
else:
tolist = list
if op not in [operator.eq, operator.ne]:
msg = (
r"Invalid comparison between dtype=datetime64\[ns.*\] "
"and (Timestamp|DatetimeArray|list|ndarray)"
)
with pytest.raises(TypeError, match=msg):
op(dr, dz)
with pytest.raises(TypeError, match=msg):
op(dr, tolist(dz))
with pytest.raises(TypeError, match=msg):
op(dr, np.array(tolist(dz), dtype=object))
with pytest.raises(TypeError, match=msg):
op(dz, dr)
with pytest.raises(TypeError, match=msg):
op(dz, tolist(dr))
with pytest.raises(TypeError, match=msg):
op(dz, np.array(tolist(dr), dtype=object))
# The aware==aware and naive==naive comparisons should *not* raise
assert np.all(dr == dr)
assert np.all(dr == tolist(dr))
assert np.all(tolist(dr) == dr)
assert np.all(np.array(tolist(dr), dtype=object) == dr)
assert np.all(dr == np.array(tolist(dr), dtype=object))
assert np.all(dz == dz)
assert np.all(dz == tolist(dz))
assert np.all(tolist(dz) == dz)
assert np.all(np.array(tolist(dz), dtype=object) == dz)
assert np.all(dz == np.array(tolist(dz), dtype=object))
def test_comparison_tzawareness_compat_scalars(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box_with_array)
dz = tm.box_expected(dz, box_with_array)
# Check comparisons against scalar Timestamps
ts = Timestamp("2000-03-14 01:59")
ts_tz = Timestamp("2000-03-14 01:59", tz="Europe/Amsterdam")
assert np.all(dr > ts)
msg = r"Invalid comparison between dtype=datetime64\[ns.*\] and Timestamp"
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dr, ts_tz)
assert np.all(dz > ts_tz)
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dz, ts)
if op not in [operator.eq, operator.ne]:
# GH#12601: Check comparison against Timestamps and DatetimeIndex
with pytest.raises(TypeError, match=msg):
op(ts, dz)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
# Bug in NumPy? https://github.com/numpy/numpy/issues/13841
# Raising in __eq__ will fallback to NumPy, which warns, fails,
# then re-raises the original exception. So we just need to ignore.
@pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:Converting timezone-aware:FutureWarning")
def test_scalar_comparison_tzawareness(
self, comparison_op, other, tz_aware_fixture, box_with_array
):
op = comparison_op
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
xbox = get_upcast_box(dtarr, other, True)
if op in [operator.eq, operator.ne]:
exbool = op is operator.ne
expected = np.array([exbool, exbool], dtype=bool)
expected = tm.box_expected(expected, xbox)
result = op(dtarr, other)
tm.assert_equal(result, expected)
result = op(other, dtarr)
tm.assert_equal(result, expected)
else:
msg = (
r"Invalid comparison between dtype=datetime64\[ns, .*\] "
f"and {type(other).__name__}"
)
with pytest.raises(TypeError, match=msg):
op(dtarr, other)
with pytest.raises(TypeError, match=msg):
op(other, dtarr)
def test_nat_comparison_tzawareness(self, comparison_op):
# GH#19276
# tzaware DatetimeIndex should not raise when compared to NaT
op = comparison_op
dti = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
expected = np.array([op == operator.ne] * len(dti))
result = op(dti, NaT)
tm.assert_numpy_array_equal(result, expected)
result = op(dti.tz_localize("US/Pacific"), NaT)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_str(self, tz_naive_fixture):
# GH#22074
# regardless of tz, we expect these comparisons are valid
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
other = "1/1/2000"
result = rng == other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng != other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng < other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = rng <= other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng > other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng >= other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_list(self):
rng = date_range("1/1/2000", periods=10)
result = rng == list(rng)
expected = rng == rng
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
pd.timedelta_range("1D", periods=10),
pd.timedelta_range("1D", periods=10).to_series(),
pd.timedelta_range("1D", periods=10).asi8.view("m8[ns]"),
],
ids=lambda x: type(x).__name__,
)
def test_dti_cmp_tdi_tzawareness(self, other):
# GH#22074
# reversion test that we _don't_ call _assert_tzawareness_compat
# when comparing against TimedeltaIndex
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
result = dti == other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = dti != other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
msg = "Invalid comparison between"
with pytest.raises(TypeError, match=msg):
dti < other
with pytest.raises(TypeError, match=msg):
dti <= other
with pytest.raises(TypeError, match=msg):
dti > other
with pytest.raises(TypeError, match=msg):
dti >= other
def test_dti_cmp_object_dtype(self):
# GH#22074
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
other = dti.astype("O")
result = dti == other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
other = dti.tz_localize(None)
result = dti != other
tm.assert_numpy_array_equal(result, expected)
other = np.array(list(dti[:5]) + [Timedelta(days=1)] * 5)
result = dti == other
expected = np.array([True] * 5 + [False] * 5)
tm.assert_numpy_array_equal(result, expected)
msg = ">=' not supported between instances of 'Timestamp' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
dti >= other
# ------------------------------------------------------------------
# Arithmetic
class TestDatetime64Arithmetic:
# This class is intended for "finished" tests that are fully parametrized
# over DataFrame/Series/Index/DatetimeArray
# -------------------------------------------------------------
# Addition/Subtraction of timedelta-like
@pytest.mark.arm_slow
def test_dt64arr_add_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
# GH#22005, GH#22163 check DataFrame doesn't raise TypeError
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("2000-01-01 02:00", "2000-02-01 02:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng + two_hours
tm.assert_equal(result, expected)
rng += two_hours
tm.assert_equal(rng, expected)
def test_dt64arr_sub_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("1999-12-31 22:00", "2000-01-31 22:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng - two_hours
tm.assert_equal(result, expected)
rng -= two_hours
tm.assert_equal(rng, expected)
# TODO: redundant with test_dt64arr_add_timedeltalike_scalar
def test_dt64arr_add_td64_scalar(self, box_with_array):
# scalar timedeltas/np.timedelta64 objects
# operate with np.timedelta64 correctly
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:01"), Timestamp("20130101 9:02:01")]
)
dtarr = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(1, "s")
tm.assert_equal(result, expected)
result = np.timedelta64(1, "s") + dtarr
tm.assert_equal(result, expected)
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(5, "ms")
tm.assert_equal(result, expected)
result = np.timedelta64(5, "ms") + dtarr
tm.assert_equal(result, expected)
def test_dt64arr_add_sub_td64_nat(self, box_with_array, tz_naive_fixture):
# GH#23320 special handling for timedelta64("NaT")
tz = tz_naive_fixture
dti = date_range("1994-04-01", periods=9, tz=tz, freq="QS")
other = np.timedelta64("NaT")
expected = DatetimeIndex(["NaT"] * 9, tz=tz)
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
other - obj
def test_dt64arr_add_sub_td64ndarray(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
tdi = TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
expected = date_range("2015-12-31", "2016-01-02", periods=3, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + tdarr
tm.assert_equal(result, expected)
result = tdarr + dtarr
tm.assert_equal(result, expected)
expected = date_range("2016-01-02", "2016-01-04", periods=3, tz=tz)
expected = tm.box_expected(expected, box_with_array)
result = dtarr - tdarr
tm.assert_equal(result, expected)
msg = "cannot subtract|(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
tdarr - dtarr
# -----------------------------------------------------------------
# Subtraction of datetime-like scalars
@pytest.mark.parametrize(
"ts",
[
Timestamp("2013-01-01"),
Timestamp("2013-01-01").to_pydatetime(),
Timestamp("2013-01-01").to_datetime64(),
],
)
def test_dt64arr_sub_dtscalar(self, box_with_array, ts):
# GH#8554, GH#22163 DataFrame op should _not_ return dt64 dtype
idx = date_range("2013-01-01", periods=3)._with_freq(None)
idx = tm.box_expected(idx, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = idx - ts
tm.assert_equal(result, expected)
def test_dt64arr_sub_datetime64_not_ns(self, box_with_array):
# GH#7996, GH#22163 ensure non-nano datetime64 is converted to nano
# for DataFrame operation
dt64 = np.datetime64("2013-01-01")
assert dt64.dtype == "datetime64[D]"
dti = date_range("20130101", periods=3)._with_freq(None)
dtarr = tm.box_expected(dti, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = dtarr - dt64
tm.assert_equal(result, expected)
result = dt64 - dtarr
tm.assert_equal(result, -expected)
def test_dt64arr_sub_timestamp(self, box_with_array):
ser = date_range("2014-03-17", periods=2, freq="D", tz="US/Eastern")
ser = ser._with_freq(None)
ts = ser[0]
ser = tm.box_expected(ser, box_with_array)
delta_series = Series([np.timedelta64(0, "D"), np.timedelta64(1, "D")])
expected = tm.box_expected(delta_series, box_with_array)
tm.assert_equal(ser - ts, expected)
tm.assert_equal(ts - ser, -expected)
def test_dt64arr_sub_NaT(self, box_with_array):
# GH#18808
dti = DatetimeIndex([NaT, Timestamp("19900315")])
ser = tm.box_expected(dti, box_with_array)
result = ser - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
dti_tz = dti.tz_localize("Asia/Tokyo")
ser_tz = tm.box_expected(dti_tz, box_with_array)
result = ser_tz - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
# -------------------------------------------------------------
# Subtraction of datetime-like array-like
def test_dt64arr_sub_dt64object_array(self, box_with_array, tz_naive_fixture):
dti = date_range("2016-01-01", periods=3, tz=tz_naive_fixture)
expected = dti - dti
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
result = obj - obj.astype(object)
tm.assert_equal(result, expected)
def test_dt64arr_naive_sub_dt64ndarray(self, box_with_array):
dti = date_range("2016-01-01", periods=3, tz=None)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
expected = dtarr - dtarr
result = dtarr - dt64vals
tm.assert_equal(result, expected)
result = dt64vals - dtarr
tm.assert_equal(result, expected)
def test_dt64arr_aware_sub_dt64ndarray_raises(
self, tz_aware_fixture, box_with_array
):
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
msg = "subtraction must have the same timezones or"
with pytest.raises(TypeError, match=msg):
dtarr - dt64vals
with pytest.raises(TypeError, match=msg):
dt64vals - dtarr
# -------------------------------------------------------------
# Addition of datetime-like others (invalid)
def test_dt64arr_add_dt64ndarray_raises(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
assert_cannot_add(dtarr, dt64vals)
def test_dt64arr_add_timestamp_raises(self, box_with_array):
# GH#22163 ensure DataFrame doesn't cast Timestamp to i8
idx = DatetimeIndex(["2011-01-01", "2011-01-02"])
ts = idx[0]
idx = tm.box_expected(idx, box_with_array)
assert_cannot_add(idx, ts)
# -------------------------------------------------------------
# Other Invalid Addition/Subtraction
@pytest.mark.parametrize(
"other",
[
3.14,
np.array([2.0, 3.0]),
# GH#13078 datetime +/- Period is invalid
Period("2011-01-01", freq="D"),
# https://github.com/pandas-dev/pandas/issues/10329
time(1, 2, 3),
],
)
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_invalid(self, dti_freq, other, box_with_array):
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
dtarr = tm.box_expected(dti, box_with_array)
msg = "|".join(
[
"unsupported operand type",
"cannot (add|subtract)",
"cannot use operands with types",
"ufunc '?(add|subtract)'? cannot use operands with types",
"Concatenation operation is not implemented for NumPy arrays",
]
)
assert_invalid_addsub_type(dtarr, other, msg)
@pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "H"])
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_parr(
self, dti_freq, pi_freq, box_with_array, box_with_array2
):
# GH#20049 subtracting PeriodIndex should raise TypeError
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
pi = dti.to_period(pi_freq)
dtarr = tm.box_expected(dti, box_with_array)
parr = tm.box_expected(pi, box_with_array2)
msg = "|".join(
[
"cannot (add|subtract)",
"unsupported operand",
"descriptor.*requires",
"ufunc.*cannot use operands",
]
)
assert_invalid_addsub_type(dtarr, parr, msg)
def test_dt64arr_addsub_time_objects_raises(self, box_with_array, tz_naive_fixture):
# https://github.com/pandas-dev/pandas/issues/10329
tz = tz_naive_fixture
obj1 = date_range("2012-01-01", periods=3, tz=tz)
obj2 = [time(i, i, i) for i in range(3)]
obj1 = tm.box_expected(obj1, box_with_array)
obj2 = tm.box_expected(obj2, box_with_array)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
# If `x + y` raises, then `y + x` should raise here as well
msg = (
r"unsupported operand type\(s\) for -: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 - obj2
msg = "|".join(
[
"cannot subtract DatetimeArray from ndarray",
"ufunc (subtract|'subtract') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 - obj1
msg = (
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 + obj2
msg = "|".join(
[
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'",
"ufunc (add|'add') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 + obj1
class TestDatetime64DateOffsetArithmetic:
# -------------------------------------------------------------
# Tick DateOffsets
# TODO: parametrize over timezone?
def test_dt64arr_series_add_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:05"), Timestamp("20130101 9:02:05")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser + pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
def test_dt64arr_series_sub_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:00:55"), Timestamp("20130101 9:01:55")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser - pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = -pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
pd.offsets.Second(5) - ser
@pytest.mark.parametrize(
"cls_name", ["Day", "Hour", "Minute", "Second", "Milli", "Micro", "Nano"]
)
def test_dt64arr_add_sub_tick_DateOffset_smoke(self, cls_name, box_with_array):
# GH#4532
# smoke tests for valid DateOffsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
ser = tm.box_expected(ser, box_with_array)
offset_cls = getattr(pd.offsets, cls_name)
ser + offset_cls(5)
offset_cls(5) + ser
ser - offset_cls(5)
def test_dti_add_tick_tzaware(self, tz_aware_fixture, box_with_array):
# GH#21610, GH#22163 ensure DataFrame doesn't return object-dtype
tz = tz_aware_fixture
if tz == "US/Pacific":
dates = date_range("2012-11-01", periods=3, tz=tz)
offset = dates + pd.offsets.Hour(5)
assert dates[0] + pd.offsets.Hour(5) == offset[0]
dates = date_range("2010-11-01 00:00", periods=3, tz=tz, freq="H")
expected = DatetimeIndex(
["2010-11-01 05:00", "2010-11-01 06:00", "2010-11-01 07:00"],
freq="H",
tz=tz,
)
dates = tm.box_expected(dates, box_with_array)
expected = tm.box_expected(expected, box_with_array)
# TODO: sub?
for scalar in [pd.offsets.Hour(5), np.timedelta64(5, "h"), timedelta(hours=5)]:
offset = dates + scalar
tm.assert_equal(offset, expected)
offset = scalar + dates
tm.assert_equal(offset, expected)
# -------------------------------------------------------------
# RelativeDelta DateOffsets
def test_dt64arr_add_sub_relativedelta_offsets(self, box_with_array):
# GH#10699
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec
# DateOffset relativedelta fastpath
relative_kwargs = [
("years", 2),
("months", 5),
("days", 3),
("hours", 5),
("minutes", 10),
("seconds", 2),
("microseconds", 5),
]
for i, (unit, value) in enumerate(relative_kwargs):
off = DateOffset(**{unit: value})
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
off = DateOffset(**dict(relative_kwargs[: i + 1]))
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
off - vec
# -------------------------------------------------------------
# Non-Tick, Non-RelativeDelta DateOffsets
# TODO: redundant with test_dt64arr_add_sub_DateOffset? that includes
# tz-aware cases which this does not
@pytest.mark.parametrize(
"cls_and_kwargs",
[
"YearBegin",
("YearBegin", {"month": 5}),
"YearEnd",
("YearEnd", {"month": 5}),
"MonthBegin",
"MonthEnd",
"SemiMonthEnd",
"SemiMonthBegin",
"Week",
("Week", {"weekday": 3}),
"Week",
("Week", {"weekday": 6}),
"BusinessDay",
"BDay",
"QuarterEnd",
"QuarterBegin",
"CustomBusinessDay",
"CDay",
"CBMonthEnd",
"CBMonthBegin",
"BMonthBegin",
"BMonthEnd",
"BusinessHour",
"BYearBegin",
"BYearEnd",
"BQuarterBegin",
("LastWeekOfMonth", {"weekday": 2}),
(
"FY5253Quarter",
{
"qtr_with_extra_week": 1,
"startingMonth": 1,
"weekday": 2,
"variation": "nearest",
},
),
("FY5253", {"weekday": 0, "startingMonth": 2, "variation": "nearest"}),
("WeekOfMonth", {"weekday": 2, "week": 2}),
"Easter",
("DateOffset", {"day": 4}),
("DateOffset", {"month": 5}),
],
)
@pytest.mark.parametrize("normalize", [True, False])
@pytest.mark.parametrize("n", [0, 5])
def test_dt64arr_add_sub_DateOffsets(
self, box_with_array, n, normalize, cls_and_kwargs
):
# GH#10699
# assert vectorized operation matches pointwise operations
if isinstance(cls_and_kwargs, tuple):
# If cls_name param is a tuple, then 2nd entry is kwargs for
# the offset constructor
cls_name, kwargs = cls_and_kwargs
else:
cls_name = cls_and_kwargs
kwargs = {}
if n == 0 and cls_name in [
"WeekOfMonth",
"LastWeekOfMonth",
"FY5253Quarter",
"FY5253",
]:
# passing n = 0 is invalid for these offset classes
return
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec
offset_cls = getattr(pd.offsets, cls_name)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
offset = offset_cls(n, normalize=normalize, **kwargs)
expected = DatetimeIndex([x + offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + offset)
expected = DatetimeIndex([x - offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - offset)
expected = DatetimeIndex([offset + x for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, offset + vec)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
offset - vec
def test_dt64arr_add_sub_DateOffset(self, box_with_array):
# GH#10699
s = date_range("2000-01-01", "2000-01-31", name="a")
s = tm.box_expected(s, box_with_array)
result = s + DateOffset(years=1)
result2 = DateOffset(years=1) + s
exp = date_range("2001-01-01", "2001-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
result = s - DateOffset(years=1)
exp = date_range("1999-01-01", "1999-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.Day()
result2 = pd.offsets.Day() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-16 00:15:00", tz="US/Central"),
Timestamp("2000-02-16", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.MonthEnd()
result2 = pd.offsets.MonthEnd() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-31 00:15:00", tz="US/Central"),
Timestamp("2000-02-29", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
@pytest.mark.parametrize(
"other",
[
np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)]),
np.array([pd.offsets.DateOffset(years=1), pd.offsets.MonthEnd()]),
np.array( # matching offsets
[pd.offsets.DateOffset(years=1), pd.offsets.DateOffset(years=1)]
),
],
)
@pytest.mark.parametrize("op", [operator.add, roperator.radd, operator.sub])
@pytest.mark.parametrize("box_other", [True, False])
def test_dt64arr_add_sub_offset_array(
self, tz_naive_fixture, box_with_array, box_other, op, other
):
# GH#18849
# GH#10699 array of offsets
tz = tz_naive_fixture
dti = date_range("2017-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
other = np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
expected = DatetimeIndex([op(dti[n], other[n]) for n in range(len(dti))])
expected = tm.box_expected(expected, box_with_array)
if box_other:
other = tm.box_expected(other, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
res = op(dtarr, other)
tm.assert_equal(res, expected)
@pytest.mark.parametrize(
"op, offset, exp, exp_freq",
[
(
"__add__",
DateOffset(months=3, days=10),
[
Timestamp("2014-04-11"),
Timestamp("2015-04-11"),
Timestamp("2016-04-11"),
Timestamp("2017-04-11"),
],
None,
),
(
"__add__",
DateOffset(months=3),
[
Timestamp("2014-04-01"),
Timestamp("2015-04-01"),
Timestamp("2016-04-01"),
Timestamp("2017-04-01"),
],
"AS-APR",
),
(
"__sub__",
DateOffset(months=3, days=10),
[
Timestamp("2013-09-21"),
Timestamp("2014-09-21"),
Timestamp("2015-09-21"),
Timestamp("2016-09-21"),
],
None,
),
(
"__sub__",
DateOffset(months=3),
[
Timestamp("2013-10-01"),
Timestamp("2014-10-01"),
Timestamp("2015-10-01"),
Timestamp("2016-10-01"),
],
"AS-OCT",
),
],
)
def test_dti_add_sub_nonzero_mth_offset(
self, op, offset, exp, exp_freq, tz_aware_fixture, box_with_array
):
# GH 26258
tz = tz_aware_fixture
date = date_range(start="01 Jan 2014", end="01 Jan 2017", freq="AS", tz=tz)
date = tm.box_expected(date, box_with_array, False)
mth = getattr(date, op)
result = mth(offset)
expected = DatetimeIndex(exp, tz=tz)
expected = tm.box_expected(expected, box_with_array, False)
tm.assert_equal(result, expected)
class TestDatetime64OverflowHandling:
# TODO: box + de-duplicate
def test_dt64_overflow_masking(self, box_with_array):
# GH#25317
left = Series([Timestamp("1969-12-31")])
right = Series([NaT])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
expected = TimedeltaIndex([NaT])
expected = tm.box_expected(expected, box_with_array)
result = left - right
tm.assert_equal(result, expected)
def test_dt64_series_arith_overflow(self):
# GH#12534, fixed by GH#19024
dt = Timestamp("1700-01-31")
td = Timedelta("20000 Days")
dti = date_range("1949-09-30", freq="100Y", periods=4)
ser = Series(dti)
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
ser - dt
with pytest.raises(OverflowError, match=msg):
dt - ser
with pytest.raises(OverflowError, match=msg):
ser + td
with pytest.raises(OverflowError, match=msg):
td + ser
ser.iloc[-1] = NaT
expected = Series(
["2004-10-03", "2104-10-04", "2204-10-04", "NaT"], dtype="datetime64[ns]"
)
res = ser + td
tm.assert_series_equal(res, expected)
res = td + ser
tm.assert_series_equal(res, expected)
ser.iloc[1:] = NaT
expected = Series(["91279 Days", "NaT", "NaT", "NaT"], dtype="timedelta64[ns]")
res = ser - dt
tm.assert_series_equal(res, expected)
res = dt - ser
tm.assert_series_equal(res, -expected)
def test_datetimeindex_sub_timestamp_overflow(self):
dtimax = pd.to_datetime(["now", Timestamp.max])
dtimin = pd.to_datetime(["now", Timestamp.min])
tsneg = Timestamp("1950-01-01")
ts_neg_variants = [
tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype("datetime64[ns]"),
tsneg.to_datetime64().astype("datetime64[D]"),
]
tspos = Timestamp("1980-01-01")
ts_pos_variants = [
tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype("datetime64[ns]"),
tspos.to_datetime64().astype("datetime64[D]"),
]
msg = "Overflow in int64 addition"
for variant in ts_neg_variants:
with pytest.raises(OverflowError, match=msg):
dtimax - variant
expected = Timestamp.max.value - tspos.value
for variant in ts_pos_variants:
res = dtimax - variant
assert res[1].value == expected
expected = Timestamp.min.value - tsneg.value
for variant in ts_neg_variants:
res = dtimin - variant
assert res[1].value == expected
for variant in ts_pos_variants:
with pytest.raises(OverflowError, match=msg):
dtimin - variant
def test_datetimeindex_sub_datetimeindex_overflow(self):
# GH#22492, GH#22508
dtimax = pd.to_datetime(["now", Timestamp.max])
dtimin = pd.to_datetime(["now", Timestamp.min])
ts_neg = pd.to_datetime(["1950-01-01", "1950-01-01"])
ts_pos = pd.to_datetime(["1980-01-01", "1980-01-01"])
# General tests
expected = Timestamp.max.value - ts_pos[1].value
result = dtimax - ts_pos
assert result[1].value == expected
expected = Timestamp.min.value - ts_neg[1].value
result = dtimin - ts_neg
assert result[1].value == expected
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
dtimax - ts_neg
with pytest.raises(OverflowError, match=msg):
dtimin - ts_pos
# Edge cases
tmin = pd.to_datetime([Timestamp.min])
t1 = tmin + Timedelta.max + Timedelta("1us")
with pytest.raises(OverflowError, match=msg):
t1 - tmin
tmax = pd.to_datetime([Timestamp.max])
t2 = tmax + Timedelta.min - Timedelta("1us")
with pytest.raises(OverflowError, match=msg):
tmax - t2
class TestTimestampSeriesArithmetic:
def test_empty_series_add_sub(self):
# GH#13844
a = Series(dtype="M8[ns]")
b = Series(dtype="m8[ns]")
tm.assert_series_equal(a, a + b)
tm.assert_series_equal(a, a - b)
tm.assert_series_equal(a, b + a)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
b - a
def test_operators_datetimelike(self):
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Series(
[
Timestamp("20111230"),
Timestamp("20120101"),
Timestamp("20120103"),
]
)
dt1.iloc[2] = np.nan
dt2 = Series(
[
Timestamp("20111231"),
Timestamp("20120102"),
Timestamp("20120104"),
]
)
dt1 - dt2
dt2 - dt1
# datetime64 with timetimedelta
dt1 + td1
td1 + dt1
dt1 - td1
# timetimedelta with datetime64
td1 + dt1
dt1 + td1
def test_dt64ser_sub_datetime_dtype(self):
ts = Timestamp(datetime(1993, 1, 7, 13, 30, 00))
dt = datetime(1993, 6, 22, 13, 30)
ser = Series([ts])
result = pd.to_timedelta(np.abs(ser - dt))
assert result.dtype == "timedelta64[ns]"
# -------------------------------------------------------------
# TODO: This next block of tests came from tests.series.test_operators,
# needs to be de-duplicated and parametrized over `box` classes
def test_operators_datetimelike_invalid(self, all_arithmetic_operators):
# these are all TypeEror ops
op_str = all_arithmetic_operators
def check(get_ser, test_ser):
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
op = getattr(get_ser, op_str, None)
# Previously, _validate_for_numeric_binop in core/indexes/base.py
# did this for us.
with pytest.raises(
TypeError, match="operate|[cC]annot|unsupported operand"
):
op(test_ser)
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Series(
[Timestamp("20111230"), Timestamp("20120101"), Timestamp("20120103")]
)
dt1.iloc[2] = np.nan
dt2 = Series(
[Timestamp("20111231"), Timestamp("20120102"), Timestamp("20120104")]
)
if op_str not in ["__sub__", "__rsub__"]:
check(dt1, dt2)
# ## datetime64 with timetimedelta ###
# TODO(jreback) __rsub__ should raise?
if op_str not in ["__add__", "__radd__", "__sub__"]:
check(dt1, td1)
# 8260, 10763
# datetime64 with tz
tz = "US/Eastern"
dt1 = Series(date_range("2000-01-01 09:00:00", periods=5, tz=tz), name="foo")
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(pd.timedelta_range("1 days 1 min", periods=5, freq="H"))
td2 = td1.copy()
td2.iloc[1] = np.nan
if op_str not in ["__add__", "__radd__", "__sub__", "__rsub__"]:
check(dt2, td2)
def test_sub_single_tz(self):
# GH#12290
s1 = Series([Timestamp("2016-02-10", tz="America/Sao_Paulo")])
s2 = Series([Timestamp("2016-02-08", tz="America/Sao_Paulo")])
result = s1 - s2
expected = Series([Timedelta("2days")])
tm.assert_series_equal(result, expected)
result = s2 - s1
expected = Series([Timedelta("-2days")])
tm.assert_series_equal(result, expected)
def test_dt64tz_series_sub_dtitz(self):
# GH#19071 subtracting tzaware DatetimeIndex from tzaware Series
# (with same tz) raises, fixed by #19024
dti = date_range("1999-09-30", periods=10, tz="US/Pacific")
ser = Series(dti)
expected = Series(TimedeltaIndex(["0days"] * 10))
res = dti - ser
tm.assert_series_equal(res, expected)
res = ser - dti
tm.assert_series_equal(res, expected)
def test_sub_datetime_compat(self):
# see GH#14088
s = Series([datetime(2016, 8, 23, 12, tzinfo=pytz.utc), NaT])
dt = datetime(2016, 8, 22, 12, tzinfo=pytz.utc)
exp = Series([Timedelta("1 days"), NaT])
tm.assert_series_equal(s - dt, exp)
tm.assert_series_equal(s - Timestamp(dt), exp)
def test_dt64_series_add_mixed_tick_DateOffset(self):
# GH#4532
# operate with pd.offsets
s = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series(
[Timestamp("20130101 9:06:00.005"), Timestamp("20130101 9:07:00.005")]
)
tm.assert_series_equal(result, expected)
def test_datetime64_ops_nat(self):
# GH#11349
datetime_series = Series([NaT, Timestamp("19900315")])
nat_series_dtype_timestamp = Series([NaT, NaT], dtype="datetime64[ns]")
single_nat_dtype_datetime = Series([NaT], dtype="datetime64[ns]")
# subtraction
tm.assert_series_equal(-NaT + datetime_series, nat_series_dtype_timestamp)
msg = "bad operand type for unary -: 'DatetimeArray'"
with pytest.raises(TypeError, match=msg):
-single_nat_dtype_datetime + datetime_series
tm.assert_series_equal(
-NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
with pytest.raises(TypeError, match=msg):
-single_nat_dtype_datetime + nat_series_dtype_timestamp
# addition
tm.assert_series_equal(
nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp
)
tm.assert_series_equal(
NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
tm.assert_series_equal(
nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp
)
tm.assert_series_equal(
NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
# -------------------------------------------------------------
# Invalid Operations
# TODO: this block also needs to be de-duplicated and parametrized
@pytest.mark.parametrize(
"dt64_series",
[
Series([Timestamp("19900315"), Timestamp("19900315")]),
Series([NaT, Timestamp("19900315")]),
Series([NaT, NaT], dtype="datetime64[ns]"),
],
)
@pytest.mark.parametrize("one", [1, 1.0, np.array(1)])
def test_dt64_mul_div_numeric_invalid(self, one, dt64_series):
# multiplication
msg = "cannot perform .* with this index type"
with pytest.raises(TypeError, match=msg):
dt64_series * one
with pytest.raises(TypeError, match=msg):
one * dt64_series
# division
with pytest.raises(TypeError, match=msg):
dt64_series / one
with pytest.raises(TypeError, match=msg):
one / dt64_series
# TODO: parametrize over box
def test_dt64_series_add_intlike(self, tz_naive_fixture):
# GH#19123
tz = tz_naive_fixture
dti = DatetimeIndex(["2016-01-02", "2016-02-03", "NaT"], tz=tz)
ser = Series(dti)
other = Series([20, 30, 40], dtype="uint8")
msg = "|".join(
[
"Addition/subtraction of integers and integer-arrays",
"cannot subtract .* from ndarray",
]
)
assert_invalid_addsub_type(ser, 1, msg)
assert_invalid_addsub_type(ser, other, msg)
assert_invalid_addsub_type(ser, np.array(other), msg)
assert_invalid_addsub_type(ser, pd.Index(other), msg)
# -------------------------------------------------------------
# Timezone-Centric Tests
def test_operators_datetimelike_with_timezones(self):
tz = "US/Eastern"
dt1 = Series(date_range("2000-01-01 09:00:00", periods=5, tz=tz), name="foo")
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(pd.timedelta_range("1 days 1 min", periods=5, freq="H"))
td2 = td1.copy()
td2.iloc[1] = np.nan
assert td2._values.freq is None
result = dt1 + td1[0]
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 + td2[0]
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
# odd numpy behavior with scalar timedeltas
result = td1[0] + dt1
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = td2[0] + dt2
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt1 - td1[0]
exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
td1[0] - dt1
result = dt2 - td2[0]
exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
with pytest.raises(TypeError, match=msg):
td2[0] - dt2
result = dt1 + td1
exp = (dt1.dt.tz_localize(None) + td1).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 + td2
exp = (dt2.dt.tz_localize(None) + td2).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt1 - td1
exp = (dt1.dt.tz_localize(None) - td1).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 - td2
exp = (dt2.dt.tz_localize(None) - td2).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
msg = "cannot (add|subtract)"
with pytest.raises(TypeError, match=msg):
td1 - dt1
with pytest.raises(TypeError, match=msg):
td2 - dt2
class TestDatetimeIndexArithmetic:
# -------------------------------------------------------------
# Binary operations DatetimeIndex and int
def test_dti_addsub_int(self, tz_naive_fixture, one):
# Variants of `one` for #19012
tz = tz_naive_fixture
rng = date_range("2000-01-01 09:00", freq="H", periods=10, tz=tz)
msg = "Addition/subtraction of integers"
with pytest.raises(TypeError, match=msg):
rng + one
with pytest.raises(TypeError, match=msg):
rng += one
with pytest.raises(TypeError, match=msg):
rng - one
with pytest.raises(TypeError, match=msg):
rng -= one
# -------------------------------------------------------------
# __add__/__sub__ with integer arrays
@pytest.mark.parametrize("freq", ["H", "D"])
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_dti_add_intarray_tick(self, int_holder, freq):
# GH#19959
dti = date_range("2016-01-01", periods=2, freq=freq)
other = int_holder([4, -1])
msg = "|".join(
["Addition/subtraction of integers", "cannot subtract DatetimeArray from"]
)
assert_invalid_addsub_type(dti, other, msg)
@pytest.mark.parametrize("freq", ["W", "M", "MS", "Q"])
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_dti_add_intarray_non_tick(self, int_holder, freq):
# GH#19959
dti = date_range("2016-01-01", periods=2, freq=freq)
other = int_holder([4, -1])
msg = "|".join(
["Addition/subtraction of integers", "cannot subtract DatetimeArray from"]
)
assert_invalid_addsub_type(dti, other, msg)
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_dti_add_intarray_no_freq(self, int_holder):
# GH#19959
dti = DatetimeIndex(["2016-01-01", "NaT", "2017-04-05 06:07:08"])
other = int_holder([9, 4, -1])
msg = "|".join(
["cannot subtract DatetimeArray from", "Addition/subtraction of integers"]
)
assert_invalid_addsub_type(dti, other, msg)
# -------------------------------------------------------------
# Binary operations DatetimeIndex and TimedeltaIndex/array
def test_dti_add_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = date_range("2017-01-01", periods=10, tz=tz)
expected = expected._with_freq(None)
# add with TimdeltaIndex
result = dti + tdi
tm.assert_index_equal(result, expected)
result = tdi + dti
tm.assert_index_equal(result, expected)
# add with timedelta64 array
result = dti + tdi.values
tm.assert_index_equal(result, expected)
result = tdi.values + dti
tm.assert_index_equal(result, expected)
def test_dti_iadd_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = date_range("2017-01-01", periods=10, tz=tz)
expected = expected._with_freq(None)
# iadd with TimdeltaIndex
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result += tdi
tm.assert_index_equal(result, expected)
result = pd.timedelta_range("0 days", periods=10)
result += dti
tm.assert_index_equal(result, expected)
# iadd with timedelta64 array
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result += tdi.values
tm.assert_index_equal(result, expected)
result = pd.timedelta_range("0 days", periods=10)
result += dti
tm.assert_index_equal(result, expected)
def test_dti_sub_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = date_range("2017-01-01", periods=10, tz=tz, freq="-1D")
expected = expected._with_freq(None)
# sub with TimedeltaIndex
result = dti - tdi
tm.assert_index_equal(result, expected)
msg = "cannot subtract .*TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi - dti
# sub with timedelta64 array
result = dti - tdi.values
tm.assert_index_equal(result, expected)
msg = "cannot subtract a datelike from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi.values - dti
def test_dti_isub_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = date_range("2017-01-01", periods=10, tz=tz, freq="-1D")
expected = expected._with_freq(None)
# isub with TimedeltaIndex
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result -= tdi
tm.assert_index_equal(result, expected)
# DTA.__isub__ GH#43904
dta = dti._data.copy()
dta -= tdi
tm.assert_datetime_array_equal(dta, expected._data)
out = dti._data.copy()
np.subtract(out, tdi, out=out)
tm.assert_datetime_array_equal(out, expected._data)
msg = "cannot subtract .* from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi -= dti
# isub with timedelta64 array
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result -= tdi.values
tm.assert_index_equal(result, expected)
msg = "cannot subtract DatetimeArray from ndarray"
with pytest.raises(TypeError, match=msg):
tdi.values -= dti
msg = "cannot subtract a datelike from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi._values -= dti
# -------------------------------------------------------------
# Binary Operations DatetimeIndex and datetime-like
# TODO: A couple other tests belong in this section. Move them in
# A PR where there isn't already a giant diff.
@pytest.mark.parametrize(
"addend",
[
datetime(2011, 1, 1),
DatetimeIndex(["2011-01-01", "2011-01-02"]),
DatetimeIndex(["2011-01-01", "2011-01-02"]).tz_localize("US/Eastern"),
np.datetime64("2011-01-01"),
Timestamp("2011-01-01"),
],
ids=lambda x: type(x).__name__,
)
@pytest.mark.parametrize("tz", [None, "US/Eastern"])
def test_add_datetimelike_and_dtarr(self, box_with_array, addend, tz):
# GH#9631
dti = DatetimeIndex(["2011-01-01", "2011-01-02"]).tz_localize(tz)
dtarr = tm.box_expected(dti, box_with_array)
msg = "cannot add DatetimeArray and"
assert_cannot_add(dtarr, addend, msg)
# -------------------------------------------------------------
def test_dta_add_sub_index(self, tz_naive_fixture):
# Check that DatetimeArray defers to Index classes
dti = date_range("20130101", periods=3, tz=tz_naive_fixture)
dta = dti.array
result = dta - dti
expected = dti - dti
| tm.assert_index_equal(result, expected) | pandas._testing.assert_index_equal |
""" Contains unit tests for the Metafeatures class. """
import inspect
import json
import jsonschema
import os
import random
import time
import unittest
import pandas as pd
import numpy as np
from metalearn import Metafeatures, METAFEATURES_JSON_SCHEMA_PATH
import metalearn.metafeatures.constants as consts
from tests.config import CORRECTNESS_SEED, METADATA_PATH
from tests.data.dataset import read_dataset
from tests.data.compute_dataset_metafeatures import get_dataset_metafeatures_path
FAIL_MESSAGE = "message"
FAIL_REPORT = "report"
TEST_NAME = "test_name"
class MetafeaturesWithDataTestCase(unittest.TestCase):
""" Contains tests for Metafeatures that require loading data first. """
def setUp(self):
self.datasets = {}
with open(METADATA_PATH, "r") as fh:
dataset_descriptions = json.load(fh)
for dataset_description in dataset_descriptions:
X, Y, column_types = read_dataset(dataset_description)
filename = dataset_description["filename"]
known_dataset_metafeatures_path = get_dataset_metafeatures_path(
filename
)
if os.path.exists(known_dataset_metafeatures_path):
with open(known_dataset_metafeatures_path) as fh:
metafeatures = json.load(fh)
self.datasets[filename] = {
"X": X, "Y": Y, "column_types": column_types,
"known_metafeatures": metafeatures,
"known_metafeatures_path": known_dataset_metafeatures_path
}
else:
raise FileNotFoundError(f"{known_dataset_metafeatures_path} does not exist")
def tearDown(self):
del self.datasets
def _report_test_failures(self, test_failures, test_name):
if test_failures != {}:
report_path = f"./failures_{test_name}.json"
with open(report_path, "w") as fh:
json.dump(test_failures, fh, indent=4)
message = next(iter(test_failures.values()))[FAIL_MESSAGE]
self.fail(
f"{message} Details have been written in {report_path}."
)
def _check_correctness(self, computed_mfs, known_mfs, filename):
"""
Tests whether computed_mfs are close to previously computed metafeature
values. This assumes that the previously computed values are correct
and allows testing for changes in metafeature computation. Only checks
the correctness of the metafeatures passed in--does not test that all
computable metafeatures were computed.
"""
test_failures = {}
fail_message = "Not all metafeatures matched previous results."
for mf_id, result in computed_mfs.items():
computed_value = result[consts.VALUE_KEY]
if not any(isinstance(computed_value, type_) for type_ in [str, float, int]):
self.fail(
'computed {} has invalid value {} with type {}'.format(mf_id, computed_value, type(computed_value))
)
known_value = known_mfs[mf_id][consts.VALUE_KEY]
correct = True
if known_value is None:
correct = False
elif type(known_value) is str:
correct = known_value == computed_value
else:
correct = np.array(np.isclose(known_value, computed_value, equal_nan=True)).all()
if not correct:
test_failures[mf_id] = {
"known_value": known_value,
"computed_value": computed_value
}
return self._format_check_report(
"correctness", fail_message, test_failures, filename
)
def _format_check_report(
self, test_name, fail_message, test_failures, filename
):
if test_failures == {}:
return test_failures
else:
return {
filename: {
TEST_NAME: test_name,
FAIL_MESSAGE: fail_message,
FAIL_REPORT: test_failures
}
}
def _check_compare_metafeature_lists(self, computed_mfs, known_mfs, filename):
"""
Tests whether computed_mfs matches the list of previously computed metafeature
names as well as the list of computable metafeatures in Metafeatures.list_metafeatures
"""
test_failures = {}
fail_message = "Metafeature lists do not match."
master_mf_ids_set = set(Metafeatures.IDS)
known_mf_ids_set = set({
x for x in known_mfs.keys() if "_Time" not in x
})
computed_mf_ids_set = set(computed_mfs.keys())
intersect_mf_ids_set = master_mf_ids_set.intersection(known_mf_ids_set
).intersection(computed_mf_ids_set)
master_diffs = master_mf_ids_set - intersect_mf_ids_set
if len(master_diffs) > 0:
test_failures["master_differences"] = list(master_diffs)
known_diffs = known_mf_ids_set - intersect_mf_ids_set
if len(known_diffs) > 0:
test_failures["known_differences"] = list(known_diffs)
computed_diffs = computed_mf_ids_set - intersect_mf_ids_set
if len(computed_diffs) > 0:
test_failures["computed_differences"] = list(computed_diffs)
return self._format_check_report(
"metafeature_lists", fail_message, test_failures, filename
)
def _perform_checks(self, functions):
check = {}
for function, args in functions:
check = function(*args)
if check != {}:
break
return check
def test_run_without_exception(self):
try:
for dataset_filename, dataset in self.datasets.items():
Metafeatures().compute(
X=dataset["X"], Y=dataset["Y"],
column_types=dataset["column_types"]
)
except Exception as e:
exc_type = type(e).__name__
self.fail(f"computing metafeatures raised {exc_type} unexpectedly")
def test_correctness(self):
"""Tests that metafeatures are computed correctly, for known datasets.
"""
test_failures = {}
test_name = inspect.stack()[0][3]
for dataset_filename, dataset in self.datasets.items():
computed_mfs = Metafeatures().compute(
X=dataset["X"], Y=dataset["Y"], seed=CORRECTNESS_SEED,
column_types=dataset["column_types"]
)
known_mfs = dataset["known_metafeatures"]
required_checks = [
(self._check_correctness,
[computed_mfs, known_mfs, dataset_filename]),
(self._check_compare_metafeature_lists,
[computed_mfs, known_mfs, dataset_filename])
]
test_failures.update(self._perform_checks(required_checks))
self._report_test_failures(test_failures, test_name)
def test_individual_metafeature_correctness(self):
test_failures = {}
test_name = inspect.stack()[0][3]
for dataset_filename, dataset in self.datasets.items():
known_mfs = dataset["known_metafeatures"]
for mf_id in Metafeatures.IDS:
computed_mfs = Metafeatures().compute(
X=dataset["X"], Y=dataset["Y"], seed=CORRECTNESS_SEED,
metafeature_ids=[mf_id],
column_types=dataset["column_types"]
)
required_checks = [
(self._check_correctness,
[computed_mfs, known_mfs, dataset_filename])
]
test_failures.update(self._perform_checks(required_checks))
self._report_test_failures(test_failures, test_name)
def test_no_targets(self):
""" Test Metafeatures().compute() without targets
"""
test_failures = {}
test_name = inspect.stack()[0][3]
for dataset_filename, dataset in self.datasets.items():
metafeatures = Metafeatures()
computed_mfs = metafeatures.compute(
X=dataset["X"], Y=None, seed=CORRECTNESS_SEED,
column_types=dataset["column_types"]
)
known_mfs = dataset["known_metafeatures"]
target_dependent_metafeatures = Metafeatures.list_metafeatures(
consts.MetafeatureGroup.TARGET_DEPENDENT.value
)
for mf_name in target_dependent_metafeatures:
known_mfs[mf_name] = {
consts.VALUE_KEY: consts.NO_TARGETS,
consts.COMPUTE_TIME_KEY: 0.
}
required_checks = [
(self._check_correctness,
[computed_mfs, known_mfs, dataset_filename]),
(self._check_compare_metafeature_lists,
[computed_mfs, known_mfs, dataset_filename])
]
test_failures.update(self._perform_checks(required_checks))
self._report_test_failures(test_failures, test_name)
def test_numeric_targets(self):
""" Test Metafeatures().compute() with numeric targets
"""
test_failures = {}
test_name = inspect.stack()[0][3]
for dataset_filename, dataset in self.datasets.items():
metafeatures = Metafeatures()
column_types = dataset["column_types"].copy()
column_types[dataset["Y"].name] = consts.NUMERIC
computed_mfs = metafeatures.compute(
X=dataset["X"], Y=pd.Series(np.random.rand(dataset["Y"].shape[0]),
name=dataset["Y"].name), seed=CORRECTNESS_SEED,
column_types=column_types
)
known_mfs = dataset["known_metafeatures"]
target_dependent_metafeatures = Metafeatures.list_metafeatures(
consts.MetafeatureGroup.TARGET_DEPENDENT.value
)
for mf_name in target_dependent_metafeatures:
known_mfs[mf_name] = {
consts.VALUE_KEY: consts.NUMERIC_TARGETS,
consts.COMPUTE_TIME_KEY: 0.
}
required_checks = [
(self._check_correctness,
[computed_mfs, known_mfs, dataset_filename]),
(self._check_compare_metafeature_lists,
[computed_mfs, known_mfs, dataset_filename])
]
test_failures.update(self._perform_checks(required_checks))
self._report_test_failures(test_failures, test_name)
def test_request_metafeatures(self):
SUBSET_LENGTH = 20
test_failures = {}
test_name = inspect.stack()[0][3]
for dataset_filename, dataset in self.datasets.items():
metafeature_ids = random.sample(Metafeatures.IDS, SUBSET_LENGTH)
computed_mfs = Metafeatures().compute(
X=dataset["X"],Y=dataset["Y"], seed=CORRECTNESS_SEED,
metafeature_ids=metafeature_ids,
column_types=dataset["column_types"]
)
known_metafeatures = dataset["known_metafeatures"]
required_checks = [
(self._check_correctness,
[computed_mfs, known_metafeatures, dataset_filename])
]
test_failures.update(self._perform_checks(required_checks))
self.assertEqual(
set(metafeature_ids), set(computed_mfs.keys()),
"Compute did not return requested metafeatures"
)
self._report_test_failures(test_failures, test_name)
def test_exclude_metafeatures(self):
SUBSET_LENGTH = 20
test_failures = {}
test_name = inspect.stack()[0][3]
for dataset_filename, dataset in self.datasets.items():
metafeature_ids = random.sample(Metafeatures.IDS, SUBSET_LENGTH)
computed_mfs = Metafeatures().compute(
X=dataset["X"], Y=dataset["Y"], seed=CORRECTNESS_SEED,
exclude=metafeature_ids,
column_types=dataset["column_types"]
)
known_metafeatures = dataset["known_metafeatures"]
required_checks = [
(self._check_correctness,
[computed_mfs, known_metafeatures, dataset_filename])
]
test_failures.update(self._perform_checks(required_checks))
if any(mf_id in computed_mfs.keys() for mf_id in metafeature_ids):
self.assertTrue(False, "Metafeatures computed an excluded metafeature")
self._report_test_failures(test_failures, test_name)
def test_request_metafeature_groups(self):
SUBSET_LENGTH = 3
test_failures = {}
test_name = inspect.stack()[0][3]
for dataset_filename, dataset in self.datasets.items():
groups = random.sample([group.value for group in consts.MetafeatureGroup], SUBSET_LENGTH)
computed_mfs = Metafeatures().compute(
X=dataset["X"], Y=dataset["Y"], column_types=dataset["column_types"], seed=CORRECTNESS_SEED,
groups=groups,
)
known_metafeatures = dataset["known_metafeatures"]
required_checks = [
(self._check_correctness, [computed_mfs, known_metafeatures, dataset_filename])
]
test_failures.update(self._perform_checks(required_checks))
metafeature_ids = set(mf_id for group in groups for mf_id in Metafeatures.list_metafeatures(group))
self.assertEqual(
metafeature_ids, set(computed_mfs.keys()), 'Compute did not return requested metafeatures'
)
self._report_test_failures(test_failures, test_name)
def test_exclude_metafeature_groups(self):
SUBSET_LENGTH = 3
test_failures = {}
test_name = inspect.stack()[0][3]
for dataset_filename, dataset in self.datasets.items():
groups = random.sample([group.value for group in consts.MetafeatureGroup], SUBSET_LENGTH)
computed_mfs = Metafeatures().compute(
X=dataset["X"], Y=dataset["Y"], column_types=dataset["column_types"], seed=CORRECTNESS_SEED,
exclude_groups=groups,
)
known_metafeatures = dataset["known_metafeatures"]
required_checks = [
(self._check_correctness, [computed_mfs, known_metafeatures, dataset_filename])
]
test_failures.update(self._perform_checks(required_checks))
metafeature_ids = set(mf_id for group in groups for mf_id in Metafeatures.list_metafeatures(group))
if any(mf_id in computed_mfs.keys() for mf_id in metafeature_ids):
self.fail('Metafeatures computed an excluded metafeature')
self._report_test_failures(test_failures, test_name)
def test_compute_effects_on_dataset(self):
"""
Tests whether computing metafeatures has any side effects on the input
X or Y data. Fails if there are any side effects.
"""
for dataset in self.datasets.values():
X_copy, Y_copy = dataset["X"].copy(), dataset["Y"].copy()
Metafeatures().compute(
X=dataset["X"],Y=dataset["Y"],
column_types=dataset["column_types"]
)
if not (
X_copy.equals(dataset["X"]) and Y_copy.equals(dataset["Y"])
):
self.assertTrue(
False, "Input data has changed after Metafeatures.compute"
)
def test_compute_effects_on_compute(self):
"""
Tests whether computing metafeatures has any side effects on the
instance metafeatures object. Fails if there are any side effects.
"""
required_checks = []
test_failures = {}
test_name = inspect.stack()[0][3]
for dataset_filename, dataset in self.datasets.items():
metafeatures_instance = Metafeatures()
# first run
metafeatures_instance.compute(
X=dataset["X"],Y=dataset["Y"],seed=CORRECTNESS_SEED,
column_types=dataset["column_types"]
)
# second run
computed_mfs = metafeatures_instance.compute(
X=dataset["X"],Y=dataset["Y"],seed=CORRECTNESS_SEED,
column_types=dataset["column_types"]
)
known_mfs = dataset["known_metafeatures"]
required_checks.append(
(self._check_correctness,
[computed_mfs, known_mfs, dataset_filename])
)
test_failures.update(self._perform_checks(required_checks))
self._report_test_failures(test_failures, test_name)
def test_output_format(self):
with open(METAFEATURES_JSON_SCHEMA_PATH) as f:
mf_schema = json.load(f)
for dataset_filename, dataset in self.datasets.items():
computed_mfs = Metafeatures().compute(
X=dataset["X"],Y=dataset["Y"],
column_types=dataset["column_types"]
)
try:
jsonschema.validate(computed_mfs, mf_schema)
except jsonschema.exceptions.ValidationError as e:
self.fail(
f"Metafeatures computed from {dataset_filename} do not "+
"conform to schema"
)
def test_output_json_compatibility(self):
with open(METAFEATURES_JSON_SCHEMA_PATH) as f:
mf_schema = json.load(f)
for dataset_filename, dataset in self.datasets.items():
computed_mfs = Metafeatures().compute(
X=dataset["X"],Y=dataset["Y"],
column_types=dataset["column_types"]
)
try:
json_computed_mfs = json.dumps(computed_mfs)
except Exception as e:
self.fail(
f"Failed to convert metafeature output to json: {str(e)}"
)
def test_soft_timeout(self):
"""Tests Metafeatures().compute() with timeout set"""
test_name = inspect.stack()[0][3]
test_failures = {}
for dataset_filename, dataset in self.datasets.items():
metafeatures = Metafeatures()
start_time = time.time()
metafeatures.compute(
X=dataset["X"], Y=dataset["Y"], seed=CORRECTNESS_SEED,
column_types=dataset["column_types"]
)
full_compute_time = time.time() - start_time
start_time = time.time()
computed_mfs = metafeatures.compute(
X=dataset["X"], Y=dataset["Y"], seed=CORRECTNESS_SEED,
column_types=dataset["column_types"], timeout=full_compute_time/2
)
limited_compute_time = time.time() - start_time
self.assertGreater(
full_compute_time, limited_compute_time,
f"Compute metafeatures exceeded timeout on '{dataset_filename}'"
)
computed_mfs_timeout = {k: v for k, v in computed_mfs.items()
if v[consts.VALUE_KEY] != consts.TIMEOUT}
known_mfs = dataset["known_metafeatures"]
required_checks = [
(self._check_correctness,
[computed_mfs_timeout, known_mfs, dataset_filename]),
(self._check_compare_metafeature_lists,
[computed_mfs, known_mfs, dataset_filename])
]
test_failures.update(self._perform_checks(required_checks))
self._report_test_failures(test_failures, test_name)
class MetafeaturesTestCase(unittest.TestCase):
""" Contains tests for Metafeatures that can be executed without loading data. """
def setUp(self):
self.dummy_features = pd.DataFrame(np.random.rand(50, 50))
self.dummy_target = pd.Series(np.random.randint(2, size=50), name="target").astype("str")
self.invalid_requested_metafeature_message_start = "One or more requested metafeatures are not valid:"
self.invalid_excluded_metafeature_message_start = "One or more excluded metafeatures are not valid:"
self.invalid_metafeature_message_start_fail_message = "Error message indicating invalid metafeatures did not start with expected string."
self.invalid_metafeature_message_contains_fail_message = "Error message indicating invalid metafeatures should include names of invalid features."
def test_dataframe_input_error(self):
""" Tests if `compute` gives a user-friendly error when a TypeError or ValueError occurs. """
expected_error_message1 = "X must be of type pandas.DataFrame"
fail_message1 = "We expect a user friendly message when the features passed to compute is not a Pandas.DataFrame."
expected_error_message2 = "X must not be empty"
fail_message2 = "We expect a user friendly message when the features passed to compute are empty."
expected_error_message3 = "Y must be of type pandas.Series"
fail_message3 = "We expect a user friendly message when the target column passed to compute is not a Pandas.Series."
expected_error_message4 = "Y must have the same number of rows as X"
fail_message4 = "We expect a user friendly message when the target column passed to compute has a number of rows different than X's."
# We don't check for the Type of TypeError explicitly as any other error would fail the unit test.
with self.assertRaises(TypeError) as cm:
Metafeatures().compute(X=None, Y=self.dummy_target)
self.assertEqual(str(cm.exception), expected_error_message1, fail_message1)
with self.assertRaises(TypeError) as cm:
Metafeatures().compute(X=np.zeros((500, 50)), Y=pd.Series(np.zeros(500)))
self.assertEqual(str(cm.exception), expected_error_message1, fail_message1)
with self.assertRaises(ValueError) as cm:
Metafeatures().compute(X=pd.DataFrame(np.zeros((0, 50))), Y=pd.Series(np.zeros(500)))
self.assertEqual(str(cm.exception), expected_error_message2, fail_message2)
with self.assertRaises(ValueError) as cm:
Metafeatures().compute(X=pd.DataFrame(np.zeros((500, 0))), Y=pd.Series(np.zeros(500)))
self.assertEqual(str(cm.exception), expected_error_message2, fail_message2)
with self.assertRaises(TypeError) as cm:
Metafeatures().compute(X=pd.DataFrame(np.zeros((500, 50))), Y=np.random.randint(2, size=500).astype("str"))
self.assertEqual(str(cm.exception), expected_error_message3, fail_message3)
with self.assertRaises(ValueError) as cm:
Metafeatures().compute(X=pd.DataFrame(np.zeros((500, 50))), Y=pd.Series(np.random.randint(2, size=0), name="target").astype("str"))
self.assertEqual(str(cm.exception), expected_error_message4, fail_message4)
def _check_invalid_metafeature_exception_string(self, exception_str, expected_str, invalid_metafeatures):
""" Checks if the exception message starts with the right string, and contains all of the invalid metafeatures expected. """
self.assertTrue(
exception_str.startswith(expected_str),
self.invalid_metafeature_message_start_fail_message
)
for invalid_mf in invalid_metafeatures:
self.assertTrue(
invalid_mf in exception_str,
self.invalid_metafeature_message_contains_fail_message
)
def test_metafeatures_input_all_invalid(self):
""" Test cases where all requested and excluded metafeatures are invalid. """
invalid_metafeatures = ["ThisIsNotValid", "ThisIsAlsoNotValid"]
with self.assertRaises(ValueError) as cm:
Metafeatures().compute(X=self.dummy_features, Y=self.dummy_target, metafeature_ids=invalid_metafeatures)
self._check_invalid_metafeature_exception_string(str(cm.exception),
self.invalid_requested_metafeature_message_start,
invalid_metafeatures)
with self.assertRaises(ValueError) as cm:
Metafeatures().compute(X=self.dummy_features, Y=self.dummy_target, exclude=invalid_metafeatures)
self._check_invalid_metafeature_exception_string(str(cm.exception),
self.invalid_excluded_metafeature_message_start,
invalid_metafeatures)
def test_metafeatures_input_partial_invalid(self):
""" Test case where only some requested and excluded metafeatures are invalid. """
invalid_metafeatures = ["ThisIsNotValid", "ThisIsAlsoNotValid"]
valid_metafeatures = ["NumberOfInstances", "NumberOfFeatures"]
with self.assertRaises(ValueError) as cm:
Metafeatures().compute(X=self.dummy_features, Y=self.dummy_target,
metafeature_ids=invalid_metafeatures + valid_metafeatures)
self._check_invalid_metafeature_exception_string(str(cm.exception),
self.invalid_requested_metafeature_message_start,
invalid_metafeatures)
with self.assertRaises(ValueError) as cm:
Metafeatures().compute(X=self.dummy_features, Y=self.dummy_target,
exclude=invalid_metafeatures + valid_metafeatures)
self._check_invalid_metafeature_exception_string(str(cm.exception),
self.invalid_excluded_metafeature_message_start,
invalid_metafeatures)
# Order should not matter
with self.assertRaises(ValueError) as cm:
Metafeatures().compute(X=self.dummy_features, Y=self.dummy_target,
metafeature_ids=valid_metafeatures + invalid_metafeatures)
self._check_invalid_metafeature_exception_string(str(cm.exception),
self.invalid_requested_metafeature_message_start,
invalid_metafeatures)
with self.assertRaises(ValueError) as cm:
Metafeatures().compute(X=self.dummy_features, Y=self.dummy_target,
exclude=valid_metafeatures + invalid_metafeatures)
self._check_invalid_metafeature_exception_string(str(cm.exception),
self.invalid_excluded_metafeature_message_start,
invalid_metafeatures)
def test_request_and_exclude_metafeatures(self):
expected_exception_string = "metafeature_ids and exclude cannot both be non-null"
with self.assertRaises(ValueError) as cm:
Metafeatures().compute(X=self.dummy_features, Y=self.dummy_target,
metafeature_ids=[], exclude=[])
self.assertEqual(str(cm.exception), expected_exception_string)
def test_request_and_exclude_metafeature_groups(self):
with self.assertRaises(ValueError) as cm:
Metafeatures().compute(X=self.dummy_features, Y=self.dummy_target, groups=[], exclude_groups=[])
with self.assertRaises(ValueError) as cm:
Metafeatures().compute(X=self.dummy_features, Y=self.dummy_target, groups=['foobar'])
with self.assertRaises(ValueError) as cm:
Metafeatures().compute(X=self.dummy_features, Y=self.dummy_target, exclude_groups=['foobar'])
def test_column_type_input(self):
column_types = {col: consts.NUMERIC for col in self.dummy_features.columns}
column_types[self.dummy_features.columns[2]] = consts.CATEGORICAL
column_types[self.dummy_target.name] = consts.CATEGORICAL
# all valid
try:
Metafeatures().compute(
self.dummy_features, self.dummy_target, column_types
)
except Exception as e:
exc_type = type(e).__name__
self.fail(f"computing metafeatures raised {exc_type} unexpectedly")
# some valid
column_types[self.dummy_features.columns[0]] = "NUMBER"
column_types[self.dummy_features.columns[1]] = "CATEGORY"
with self.assertRaises(ValueError) as cm:
Metafeatures().compute(
self.dummy_features, self.dummy_target, column_types
)
self.assertTrue(
str(cm.exception).startswith(
"Invalid column types:"
),
"Some invalid column types test failed"
)
# all invalid
column_types = {feature: "INVALID_TYPE" for feature in self.dummy_features.columns}
column_types[self.dummy_target.name] = "INVALID"
with self.assertRaises(ValueError) as cm:
Metafeatures().compute(
self.dummy_features, self.dummy_target, column_types
)
self.assertTrue(
str(cm.exception).startswith(
"Invalid column types:"
),
"All invalid column types test failed"
)
# invalid number of column types
del column_types[self.dummy_features.columns[0]]
with self.assertRaises(ValueError) as cm:
Metafeatures().compute(
self.dummy_features, self.dummy_target, column_types
)
self.assertTrue(
str(cm.exception).startswith(
"Column type not specified for column"
),
"Invalid number of column types test failed"
)
def test_sampling_shape_no_exception(self):
try:
Metafeatures().compute(
self.dummy_features, self.dummy_target, sample_shape=(10,10)
)
except Exception as e:
exc_type = type(e).__name__
self.fail(f"computing metafeatures raised {exc_type} unexpectedly")
def test_sampling_shape_correctness(self):
sample_shape = (7,13)
metafeatures = Metafeatures()
dummy_mf_df = metafeatures.compute(
self.dummy_features, self.dummy_target, sample_shape=sample_shape
)
X_sample = metafeatures._resources["XSample"]["value"]
self.assertEqual(
X_sample.shape, sample_shape,
f"Sampling produced incorrect shape {X_sample.shape}; should have" +
f" been {sample_shape}."
)
def test_sampling_shape_invalid_input(self):
error_tests = [
{
"sample_shape": "bad_shape",
"message": "`sample_shape` must be of type `tuple` or `list`"
},
{
"sample_shape": {0:"bad", 1:"shape"},
"message": "`sample_shape` must be of type `tuple` or `list`"
},
{
"sample_shape": (2,2,2),
"message": "`sample_shape` must be of length 2"
},
{
"sample_shape": [1],
"message": "`sample_shape` must be of length 2"
},
{
"sample_shape": (0,1),
"message": "Cannot sample less than one row"
},
{
"sample_shape": (1,0),
"message": "Cannot sample less than 1 column"
},
{
"sample_shape": (3,10),
# 4 based on self.dummy_target
"message": "Cannot sample less than 4 rows from Y"
}
]
for test in error_tests:
with self.assertRaises(ValueError) as cm:
Metafeatures().compute(
self.dummy_features, self.dummy_target,
sample_shape=test["sample_shape"]
)
self.assertEqual(
str(cm.exception),
test["message"]
)
def test_n_folds_invalid_input(self):
tests = [
{
"n_folds": 0,
"message": "`n_folds` must be >= 2, but was 0"
},
{
"n_folds": 1,
"message": "`n_folds` must be >= 2, but was 1"
},
{
"n_folds": 2.1,
"message": "`n_folds` must be an integer, not 2.1"
},
{
"n_folds": "hello",
"message": "`n_folds` must be an integer, not hello"
},
{
"n_folds": [3],
"message": "`n_folds` must be an integer, not [3]"
},
{
"n_folds": {5:7},
"message": "`n_folds` must be an integer, not {5: 7}"
}
]
for test in tests:
with self.assertRaises(ValueError) as cm:
Metafeatures().compute(
self.dummy_features, self.dummy_target,
n_folds=test["n_folds"]
)
self.assertEqual(str(cm.exception), test["message"])
def test_n_folds_with_small_dataset(self):
# should raise error with small (few instances) dataset
# unless not computing landmarking mfs
X_small = pd.DataFrame(np.random.rand(3, 7))
Y_small = pd.Series([0,1,0], name="target").astype("str")
metafeatures = Metafeatures()
with self.assertRaises(ValueError) as cm:
metafeatures.compute(X_small, Y_small, n_folds=2)
self.assertEqual(
str(cm.exception),
"The minimum number of instances in each class of Y is n_folds=2." +
" Class 1 has 1."
)
def test_n_folds_with_small_dataset_no_landmarkers(self):
# should raise error with small (few instances) dataset
# unless not computing landmarking mfs
X_small = pd.DataFrame(np.random.rand(3, 7))
Y_small = | pd.Series([0,1,0], name="target") | pandas.Series |
import pandas as pd
from sqlalchemy import create_engine, text
from datetime import date, datetime, timedelta
import concurrent.futures
import requests as rq
import time
import config
import traceback
pd.set_option('display.max_columns', None)
#pd.set_option('display.max_rows', None)
# Key
key = config.polygon_key
# Input
multiplier = 1
timespan = 'minute'
adjusted = 'true'
sort = 'desc'
limit = 50000
# Database / Get list of tickers
engine = create_engine(config.psql)
symbols_df = | pd.read_sql_query('select ticker from companies where active = true', con=engine) | pandas.read_sql_query |
from textwrap import dedent
import numpy as np
import pytest
from pandas import (
DataFrame,
MultiIndex,
option_context,
)
pytest.importorskip("jinja2")
from pandas.io.formats.style import Styler
from pandas.io.formats.style_render import (
_parse_latex_cell_styles,
_parse_latex_css_conversion,
_parse_latex_header_span,
_parse_latex_table_styles,
_parse_latex_table_wrapping,
)
@pytest.fixture
def df():
return DataFrame({"A": [0, 1], "B": [-0.61, -1.22], "C": ["ab", "cd"]})
@pytest.fixture
def df_ext():
return DataFrame(
{"A": [0, 1, 2], "B": [-0.61, -1.22, -2.22], "C": ["ab", "cd", "de"]}
)
@pytest.fixture
def styler(df):
return Styler(df, uuid_len=0, precision=2)
def test_minimal_latex_tabular(styler):
expected = dedent(
"""\
\\begin{tabular}{lrrl}
& A & B & C \\\\
0 & 0 & -0.61 & ab \\\\
1 & 1 & -1.22 & cd \\\\
\\end{tabular}
"""
)
assert styler.to_latex() == expected
def test_tabular_hrules(styler):
expected = dedent(
"""\
\\begin{tabular}{lrrl}
\\toprule
& A & B & C \\\\
\\midrule
0 & 0 & -0.61 & ab \\\\
1 & 1 & -1.22 & cd \\\\
\\bottomrule
\\end{tabular}
"""
)
assert styler.to_latex(hrules=True) == expected
def test_tabular_custom_hrules(styler):
styler.set_table_styles(
[
{"selector": "toprule", "props": ":hline"},
{"selector": "bottomrule", "props": ":otherline"},
]
) # no midrule
expected = dedent(
"""\
\\begin{tabular}{lrrl}
\\hline
& A & B & C \\\\
0 & 0 & -0.61 & ab \\\\
1 & 1 & -1.22 & cd \\\\
\\otherline
\\end{tabular}
"""
)
assert styler.to_latex() == expected
def test_column_format(styler):
# default setting is already tested in `test_latex_minimal_tabular`
styler.set_table_styles([{"selector": "column_format", "props": ":cccc"}])
assert "\\begin{tabular}{rrrr}" in styler.to_latex(column_format="rrrr")
styler.set_table_styles([{"selector": "column_format", "props": ":r|r|cc"}])
assert "\\begin{tabular}{r|r|cc}" in styler.to_latex()
def test_siunitx_cols(styler):
expected = dedent(
"""\
\\begin{tabular}{lSSl}
{} & {A} & {B} & {C} \\\\
0 & 0 & -0.61 & ab \\\\
1 & 1 & -1.22 & cd \\\\
\\end{tabular}
"""
)
assert styler.to_latex(siunitx=True) == expected
def test_position(styler):
assert "\\begin{table}[h!]" in styler.to_latex(position="h!")
assert "\\end{table}" in styler.to_latex(position="h!")
styler.set_table_styles([{"selector": "position", "props": ":b!"}])
assert "\\begin{table}[b!]" in styler.to_latex()
assert "\\end{table}" in styler.to_latex()
@pytest.mark.parametrize("env", [None, "longtable"])
def test_label(styler, env):
assert "\n\\label{text}" in styler.to_latex(label="text", environment=env)
styler.set_table_styles([{"selector": "label", "props": ":{more §text}"}])
assert "\n\\label{more :text}" in styler.to_latex(environment=env)
def test_position_float_raises(styler):
msg = "`position_float` should be one of 'raggedright', 'raggedleft', 'centering',"
with pytest.raises(ValueError, match=msg):
styler.to_latex(position_float="bad_string")
msg = "`position_float` cannot be used in 'longtable' `environment`"
with pytest.raises(ValueError, match=msg):
styler.to_latex(position_float="centering", environment="longtable")
@pytest.mark.parametrize("label", [(None, ""), ("text", "\\label{text}")])
@pytest.mark.parametrize("position", [(None, ""), ("h!", "{table}[h!]")])
@pytest.mark.parametrize("caption", [(None, ""), ("text", "\\caption{text}")])
@pytest.mark.parametrize("column_format", [(None, ""), ("rcrl", "{tabular}{rcrl}")])
@pytest.mark.parametrize("position_float", [(None, ""), ("centering", "\\centering")])
def test_kwargs_combinations(
styler, label, position, caption, column_format, position_float
):
result = styler.to_latex(
label=label[0],
position=position[0],
caption=caption[0],
column_format=column_format[0],
position_float=position_float[0],
)
assert label[1] in result
assert position[1] in result
assert caption[1] in result
assert column_format[1] in result
assert position_float[1] in result
def test_custom_table_styles(styler):
styler.set_table_styles(
[
{"selector": "mycommand", "props": ":{myoptions}"},
{"selector": "mycommand2", "props": ":{myoptions2}"},
]
)
expected = dedent(
"""\
\\begin{table}
\\mycommand{myoptions}
\\mycommand2{myoptions2}
"""
)
assert expected in styler.to_latex()
def test_cell_styling(styler):
styler.highlight_max(props="itshape:;Huge:--wrap;")
expected = dedent(
"""\
\\begin{tabular}{lrrl}
& A & B & C \\\\
0 & 0 & \\itshape {\\Huge -0.61} & ab \\\\
1 & \\itshape {\\Huge 1} & -1.22 & \\itshape {\\Huge cd} \\\\
\\end{tabular}
"""
)
assert expected == styler.to_latex()
def test_multiindex_columns(df):
cidx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")])
df.columns = cidx
expected = dedent(
"""\
\\begin{tabular}{lrrl}
& \\multicolumn{2}{r}{A} & B \\\\
& a & b & c \\\\
0 & 0 & -0.61 & ab \\\\
1 & 1 & -1.22 & cd \\\\
\\end{tabular}
"""
)
s = df.style.format(precision=2)
assert expected == s.to_latex()
# non-sparse
expected = dedent(
"""\
\\begin{tabular}{lrrl}
& A & A & B \\\\
& a & b & c \\\\
0 & 0 & -0.61 & ab \\\\
1 & 1 & -1.22 & cd \\\\
\\end{tabular}
"""
)
s = df.style.format(precision=2)
assert expected == s.to_latex(sparse_columns=False)
def test_multiindex_row(df_ext):
ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")])
df_ext.index = ridx
expected = dedent(
"""\
\\begin{tabular}{llrrl}
& & A & B & C \\\\
\\multirow[c]{2}{*}{A} & a & 0 & -0.61 & ab \\\\
& b & 1 & -1.22 & cd \\\\
B & c & 2 & -2.22 & de \\\\
\\end{tabular}
"""
)
styler = df_ext.style.format(precision=2)
result = styler.to_latex()
assert expected == result
# non-sparse
expected = dedent(
"""\
\\begin{tabular}{llrrl}
& & A & B & C \\\\
A & a & 0 & -0.61 & ab \\\\
A & b & 1 & -1.22 & cd \\\\
B & c & 2 & -2.22 & de \\\\
\\end{tabular}
"""
)
result = styler.to_latex(sparse_index=False)
assert expected == result
def test_multirow_naive(df_ext):
ridx = MultiIndex.from_tuples([("X", "x"), ("X", "y"), ("Y", "z")])
df_ext.index = ridx
expected = dedent(
"""\
\\begin{tabular}{llrrl}
& & A & B & C \\\\
X & x & 0 & -0.61 & ab \\\\
& y & 1 & -1.22 & cd \\\\
Y & z & 2 & -2.22 & de \\\\
\\end{tabular}
"""
)
styler = df_ext.style.format(precision=2)
result = styler.to_latex(multirow_align="naive")
assert expected == result
def test_multiindex_row_and_col(df_ext):
cidx = MultiIndex.from_tuples([("Z", "a"), ("Z", "b"), ("Y", "c")])
ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")])
df_ext.index, df_ext.columns = ridx, cidx
expected = dedent(
"""\
\\begin{tabular}{llrrl}
& & \\multicolumn{2}{l}{Z} & Y \\\\
& & a & b & c \\\\
\\multirow[b]{2}{*}{A} & a & 0 & -0.61 & ab \\\\
& b & 1 & -1.22 & cd \\\\
B & c & 2 & -2.22 & de \\\\
\\end{tabular}
"""
)
styler = df_ext.style.format(precision=2)
result = styler.to_latex(multirow_align="b", multicol_align="l")
assert result == expected
# non-sparse
expected = dedent(
"""\
\\begin{tabular}{llrrl}
& & Z & Z & Y \\\\
& & a & b & c \\\\
A & a & 0 & -0.61 & ab \\\\
A & b & 1 & -1.22 & cd \\\\
B & c & 2 & -2.22 & de \\\\
\\end{tabular}
"""
)
result = styler.to_latex(sparse_index=False, sparse_columns=False)
assert result == expected
@pytest.mark.parametrize(
"multicol_align, siunitx, header",
[
("naive-l", False, " & A & &"),
("naive-r", False, " & & & A"),
("naive-l", True, "{} & {A} & {} & {}"),
("naive-r", True, "{} & {} & {} & {A}"),
],
)
def test_multicol_naive(df, multicol_align, siunitx, header):
ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("A", "c")])
df.columns = ridx
level1 = " & a & b & c" if not siunitx else "{} & {a} & {b} & {c}"
col_format = "lrrl" if not siunitx else "lSSl"
expected = dedent(
f"""\
\\begin{{tabular}}{{{col_format}}}
{header} \\\\
{level1} \\\\
0 & 0 & -0.61 & ab \\\\
1 & 1 & -1.22 & cd \\\\
\\end{{tabular}}
"""
)
styler = df.style.format(precision=2)
result = styler.to_latex(multicol_align=multicol_align, siunitx=siunitx)
assert expected == result
def test_multi_options(df_ext):
cidx = MultiIndex.from_tuples([("Z", "a"), ("Z", "b"), ("Y", "c")])
ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")])
df_ext.index, df_ext.columns = ridx, cidx
styler = df_ext.style.format(precision=2)
expected = dedent(
"""\
& & \\multicolumn{2}{r}{Z} & Y \\\\
& & a & b & c \\\\
\\multirow[c]{2}{*}{A} & a & 0 & -0.61 & ab \\\\
"""
)
result = styler.to_latex()
assert expected in result
with option_context("styler.latex.multicol_align", "l"):
assert " & & \\multicolumn{2}{l}{Z} & Y \\\\" in styler.to_latex()
with option_context("styler.latex.multirow_align", "b"):
assert "\\multirow[b]{2}{*}{A} & a & 0 & -0.61 & ab \\\\" in styler.to_latex()
def test_multiindex_columns_hidden():
df = DataFrame([[1, 2, 3, 4]])
df.columns = MultiIndex.from_tuples([("A", 1), ("A", 2), ("A", 3), ("B", 1)])
s = df.style
assert "{tabular}{lrrrr}" in s.to_latex()
s.set_table_styles([]) # reset the position command
s.hide([("A", 2)], axis="columns")
assert "{tabular}{lrrr}" in s.to_latex()
@pytest.mark.parametrize(
"option, value",
[
("styler.sparse.index", True),
("styler.sparse.index", False),
("styler.sparse.columns", True),
("styler.sparse.columns", False),
],
)
def test_sparse_options(df_ext, option, value):
cidx = MultiIndex.from_tuples([("Z", "a"), ("Z", "b"), ("Y", "c")])
ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")])
df_ext.index, df_ext.columns = ridx, cidx
styler = df_ext.style
latex1 = styler.to_latex()
with option_context(option, value):
latex2 = styler.to_latex()
assert (latex1 == latex2) is value
def test_hidden_index(styler):
styler.hide(axis="index")
expected = dedent(
"""\
\\begin{tabular}{rrl}
A & B & C \\\\
0 & -0.61 & ab \\\\
1 & -1.22 & cd \\\\
\\end{tabular}
"""
)
assert styler.to_latex() == expected
@pytest.mark.parametrize("environment", ["table", "figure*", None])
def test_comprehensive(df_ext, environment):
# test as many low level features simultaneously as possible
cidx = MultiIndex.from_tuples([("Z", "a"), ("Z", "b"), ("Y", "c")])
ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")])
df_ext.index, df_ext.columns = ridx, cidx
stlr = df_ext.style
stlr.set_caption("mycap")
stlr.set_table_styles(
[
{"selector": "label", "props": ":{fig§item}"},
{"selector": "position", "props": ":h!"},
{"selector": "position_float", "props": ":centering"},
{"selector": "column_format", "props": ":rlrlr"},
{"selector": "toprule", "props": ":toprule"},
{"selector": "midrule", "props": ":midrule"},
{"selector": "bottomrule", "props": ":bottomrule"},
{"selector": "rowcolors", "props": ":{3}{pink}{}"}, # custom command
]
)
stlr.highlight_max(axis=0, props="textbf:--rwrap;cellcolor:[rgb]{1,1,0.6}--rwrap")
stlr.highlight_max(axis=None, props="Huge:--wrap;", subset=[("Z", "a"), ("Z", "b")])
expected = (
"""\
\\begin{table}[h!]
\\centering
\\caption{mycap}
\\label{fig:item}
\\rowcolors{3}{pink}{}
\\begin{tabular}{rlrlr}
\\toprule
& & \\multicolumn{2}{r}{Z} & Y \\\\
& & a & b & c \\\\
\\midrule
\\multirow[c]{2}{*}{A} & a & 0 & \\textbf{\\cellcolor[rgb]{1,1,0.6}{-0.61}} & ab \\\\
& b & 1 & -1.22 & cd \\\\
B & c & \\textbf{\\cellcolor[rgb]{1,1,0.6}{{\\Huge 2}}} & -2.22 & """
"""\
\\textbf{\\cellcolor[rgb]{1,1,0.6}{de}} \\\\
\\bottomrule
\\end{tabular}
\\end{table}
"""
).replace("table", environment if environment else "table")
result = stlr.format(precision=2).to_latex(environment=environment)
assert result == expected
def test_environment_option(styler):
with option_context("styler.latex.environment", "bar-env"):
assert "\\begin{bar-env}" in styler.to_latex()
assert "\\begin{foo-env}" in styler.to_latex(environment="foo-env")
def test_parse_latex_table_styles(styler):
styler.set_table_styles(
[
{"selector": "foo", "props": [("attr", "value")]},
{"selector": "bar", "props": [("attr", "overwritten")]},
{"selector": "bar", "props": [("attr", "baz"), ("attr2", "ignored")]},
{"selector": "label", "props": [("", "{fig§item}")]},
]
)
assert _parse_latex_table_styles(styler.table_styles, "bar") == "baz"
# test '§' replaced by ':' [for CSS compatibility]
assert _parse_latex_table_styles(styler.table_styles, "label") == "{fig:item}"
def test_parse_latex_cell_styles_basic(): # test nesting
cell_style = [("itshape", "--rwrap"), ("cellcolor", "[rgb]{0,1,1}--rwrap")]
expected = "\\itshape{\\cellcolor[rgb]{0,1,1}{text}}"
assert _parse_latex_cell_styles(cell_style, "text") == expected
@pytest.mark.parametrize(
"wrap_arg, expected",
[ # test wrapping
("", "\\<command><options> <display_value>"),
("--wrap", "{\\<command><options> <display_value>}"),
("--nowrap", "\\<command><options> <display_value>"),
("--lwrap", "{\\<command><options>} <display_value>"),
("--dwrap", "{\\<command><options>}{<display_value>}"),
("--rwrap", "\\<command><options>{<display_value>}"),
],
)
def test_parse_latex_cell_styles_braces(wrap_arg, expected):
cell_style = [("<command>", f"<options>{wrap_arg}")]
assert _parse_latex_cell_styles(cell_style, "<display_value>") == expected
def test_parse_latex_header_span():
cell = {"attributes": 'colspan="3"', "display_value": "text", "cellstyle": []}
expected = "\\multicolumn{3}{Y}{text}"
assert _parse_latex_header_span(cell, "X", "Y") == expected
cell = {"attributes": 'rowspan="5"', "display_value": "text", "cellstyle": []}
expected = "\\multirow[X]{5}{*}{text}"
assert _parse_latex_header_span(cell, "X", "Y") == expected
cell = {"display_value": "text", "cellstyle": []}
assert _parse_latex_header_span(cell, "X", "Y") == "text"
cell = {"display_value": "text", "cellstyle": [("bfseries", "--rwrap")]}
assert _parse_latex_header_span(cell, "X", "Y") == "\\bfseries{text}"
def test_parse_latex_table_wrapping(styler):
styler.set_table_styles(
[
{"selector": "toprule", "props": ":value"},
{"selector": "bottomrule", "props": ":value"},
{"selector": "midrule", "props": ":value"},
{"selector": "column_format", "props": ":value"},
]
)
assert _parse_latex_table_wrapping(styler.table_styles, styler.caption) is False
assert _parse_latex_table_wrapping(styler.table_styles, "some caption") is True
styler.set_table_styles(
[
{"selector": "not-ignored", "props": ":value"},
],
overwrite=False,
)
assert _parse_latex_table_wrapping(styler.table_styles, None) is True
def test_short_caption(styler):
result = styler.to_latex(caption=("full cap", "short cap"))
assert "\\caption[short cap]{full cap}" in result
@pytest.mark.parametrize(
"css, expected",
[
([("color", "red")], [("color", "{red}")]), # test color and input format types
(
[("color", "rgb(128, 128, 128 )")],
[("color", "[rgb]{0.502, 0.502, 0.502}")],
),
(
[("color", "rgb(128, 50%, 25% )")],
[("color", "[rgb]{0.502, 0.500, 0.250}")],
),
(
[("color", "rgba(128,128,128,1)")],
[("color", "[rgb]{0.502, 0.502, 0.502}")],
),
([("color", "#FF00FF")], [("color", "[HTML]{FF00FF}")]),
([("color", "#F0F")], [("color", "[HTML]{FF00FF}")]),
([("font-weight", "bold")], [("bfseries", "")]), # test font-weight and types
([("font-weight", "bolder")], [("bfseries", "")]),
([("font-weight", "normal")], []),
([("background-color", "red")], [("cellcolor", "{red}--lwrap")]),
(
[("background-color", "#FF00FF")], # test background-color command and wrap
[("cellcolor", "[HTML]{FF00FF}--lwrap")],
),
([("font-style", "italic")], [("itshape", "")]), # test font-style and types
([("font-style", "oblique")], [("slshape", "")]),
([("font-style", "normal")], []),
([("color", "red /*--dwrap*/")], [("color", "{red}--dwrap")]), # css comments
([("background-color", "red /* --dwrap */")], [("cellcolor", "{red}--dwrap")]),
],
)
def test_parse_latex_css_conversion(css, expected):
result = _parse_latex_css_conversion(css)
assert result == expected
@pytest.mark.parametrize(
"env, inner_env",
[
(None, "tabular"),
("table", "tabular"),
("longtable", "longtable"),
],
)
@pytest.mark.parametrize(
"convert, exp", [(True, "bfseries"), (False, "font-weightbold")]
)
def test_parse_latex_css_convert_minimal(styler, env, inner_env, convert, exp):
# parameters ensure longtable template is also tested
styler.highlight_max(props="font-weight:bold;")
result = styler.to_latex(convert_css=convert, environment=env)
expected = dedent(
f"""\
0 & 0 & \\{exp} -0.61 & ab \\\\
1 & \\{exp} 1 & -1.22 & \\{exp} cd \\\\
\\end{{{inner_env}}}
"""
)
assert expected in result
def test_parse_latex_css_conversion_option():
css = [("command", "option--latex--wrap")]
expected = [("command", "option--wrap")]
result = _parse_latex_css_conversion(css)
assert result == expected
def test_styler_object_after_render(styler):
# GH 42320
pre_render = styler._copy(deepcopy=True)
styler.to_latex(
column_format="rllr",
position="h",
position_float="centering",
hrules=True,
label="my lab",
caption="my cap",
)
assert pre_render.table_styles == styler.table_styles
assert pre_render.caption == styler.caption
def test_longtable_comprehensive(styler):
result = styler.to_latex(
environment="longtable", hrules=True, label="fig:A", caption=("full", "short")
)
expected = dedent(
"""\
\\begin{longtable}{lrrl}
\\caption[short]{full} \\label{fig:A} \\\\
\\toprule
& A & B & C \\\\
\\midrule
\\endfirsthead
\\caption[]{full} \\\\
\\toprule
& A & B & C \\\\
\\midrule
\\endhead
\\midrule
\\multicolumn{4}{r}{Continued on next page} \\\\
\\midrule
\\endfoot
\\bottomrule
\\endlastfoot
0 & 0 & -0.61 & ab \\\\
1 & 1 & -1.22 & cd \\\\
\\end{longtable}
"""
)
assert result == expected
def test_longtable_minimal(styler):
result = styler.to_latex(environment="longtable")
expected = dedent(
"""\
\\begin{longtable}{lrrl}
& A & B & C \\\\
\\endfirsthead
& A & B & C \\\\
\\endhead
\\multicolumn{4}{r}{Continued on next page} \\\\
\\endfoot
\\endlastfoot
0 & 0 & -0.61 & ab \\\\
1 & 1 & -1.22 & cd \\\\
\\end{longtable}
"""
)
assert result == expected
@pytest.mark.parametrize(
"sparse, exp, siunitx",
[
(True, "{} & \\multicolumn{2}{r}{A} & {B}", True),
(False, "{} & {A} & {A} & {B}", True),
(True, " & \\multicolumn{2}{r}{A} & B", False),
(False, " & A & A & B", False),
],
)
def test_longtable_multiindex_columns(df, sparse, exp, siunitx):
cidx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")])
df.columns = cidx
with_si = "{} & {a} & {b} & {c} \\\\"
without_si = " & a & b & c \\\\"
expected = dedent(
f"""\
\\begin{{longtable}}{{l{"SS" if siunitx else "rr"}l}}
{exp} \\\\
{with_si if siunitx else without_si}
\\endfirsthead
{exp} \\\\
{with_si if siunitx else without_si}
\\endhead
"""
)
result = df.style.to_latex(
environment="longtable", sparse_columns=sparse, siunitx=siunitx
)
assert expected in result
@pytest.mark.parametrize(
"caption, cap_exp",
[
("full", ("{full}", "")),
(("full", "short"), ("{full}", "[short]")),
],
)
@pytest.mark.parametrize("label, lab_exp", [(None, ""), ("tab:A", " \\label{tab:A}")])
def test_longtable_caption_label(styler, caption, cap_exp, label, lab_exp):
cap_exp1 = f"\\caption{cap_exp[1]}{cap_exp[0]}"
cap_exp2 = f"\\caption[]{cap_exp[0]}"
expected = dedent(
f"""\
{cap_exp1}{lab_exp} \\\\
& A & B & C \\\\
\\endfirsthead
{cap_exp2} \\\\
"""
)
assert expected in styler.to_latex(
environment="longtable", caption=caption, label=label
)
@pytest.mark.parametrize("index", [True, False])
@pytest.mark.parametrize(
"columns, siunitx",
[
(True, True),
(True, False),
(False, False),
],
)
def test_apply_map_header_render_mi(df_ext, index, columns, siunitx):
cidx = MultiIndex.from_tuples([("Z", "a"), ("Z", "b"), ("Y", "c")])
ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")])
df_ext.index, df_ext.columns = ridx, cidx
styler = df_ext.style
func = lambda v: "bfseries: --rwrap" if "A" in v or "Z" in v or "c" in v else None
if index:
styler.applymap_index(func, axis="index")
if columns:
styler.applymap_index(func, axis="columns")
result = styler.to_latex(siunitx=siunitx)
expected_index = dedent(
"""\
\\multirow[c]{2}{*}{\\bfseries{A}} & a & 0 & -0.610000 & ab \\\\
\\bfseries{} & b & 1 & -1.220000 & cd \\\\
B & \\bfseries{c} & 2 & -2.220000 & de \\\\
"""
)
assert (expected_index in result) is index
exp_cols_si = dedent(
"""\
{} & {} & \\multicolumn{2}{r}{\\bfseries{Z}} & {Y} \\\\
{} & {} & {a} & {b} & {\\bfseries{c}} \\\\
"""
)
exp_cols_no_si = """\
& & \\multicolumn{2}{r}{\\bfseries{Z}} & Y \\\\
& & a & b & \\bfseries{c} \\\\
"""
assert ((exp_cols_si if siunitx else exp_cols_no_si) in result) is columns
def test_repr_option(styler):
assert "<style" in styler._repr_html_()[:6]
assert styler._repr_latex_() is None
with option_context("styler.render.repr", "latex"):
assert "\\begin{tabular}" in styler._repr_latex_()[:15]
assert styler._repr_html_() is None
@pytest.mark.parametrize("option", ["hrules"])
def test_bool_options(styler, option):
with | option_context(f"styler.latex.{option}", False) | pandas.option_context |
import pandas as pd
import fasttext
import time
import numpy as np
import spacy
import sys
fmodel = fasttext.load_model('/mnt/dhr/CreateChallenge_ICC_0821/lid.176.bin')
def delist_lang(lst):
lang_lst=[]
for i,lang in enumerate(lst):
if not lang:
lang_lst.append(None)
else:
lang_lst.append(lang[0])
return lang_lst
def lang_tagger(parsed_sent):
labels,confs=fmodel.predict(parsed_sent,k=-1,threshold=0.1)
lang_list=delist_lang(labels)
significance_list=significance(confs)
assert len(lang_list)==len(significance_list)
return lang_list,significance_list
def significance(lst):
significance_list=[]
for l in lst:
if len(l)>1:
significance_list.append(abs(l[0]-l[1])/np.mean(l[0]+l[1])>0.1)
#print(f'{conf[0]} {conf[1]} {abs(conf[0]-conf[1])/np.mean(conf[0]+conf[1])>0.1}')
else:
significance_list.append(True)
return significance_list
def post_to_five_grams(post):
parsed_sent = nlp(post, disable=["parser", "ner"])
token_pos = []
for token in parsed_sent:
token_pos.append(token.text+ "_" + token.pos_)
if len(token_pos) < 4:
return None
# generate consecutive lists of 5 tokens
five_grams = []
for i in range(0, len(token_pos)-4, 1):
five_grams.append(" ".join(token_pos[i:i+5]))
return five_grams
def ner_lemma_reducer(sent):
"""
parses sentence with spacy
:param sent:
:return: list of NER entities, lemmas, POS tags, whether each token is part of a compound
"""
lemma = []
pos = []
is_comp = False
ner_token = []
# could limit here which components of spacy are run?
parsed_sent = nlp(sent)
for token in parsed_sent:
lemma.append(token.lemma_)
pos.append(token.pos_)
if token.ent_type_ == "":
to_add = "NONNER"
else:
to_add = token.ent_type_
ner_token.append(to_add)
if token.dep_ == "compound":
is_comp = True
lemma_sent = ' '.join(lemma)
pos_sent = ' '.join(pos)
ner_token_sent = ' '.join(ner_token)
ner_length = 0
if parsed_sent.ents:
for ent in parsed_sent.ents:
ner_length += ent.end_char - ent.start_char
return ner_token_sent, ner_length, lemma_sent, pos_sent, is_comp, len(lemma)
fname = sys.argv[1]
type = sys.argv[2]
CHUNKSIZE = 500_000
dfs = | pd.read_json(fname, lines=True, chunksize=CHUNKSIZE) | pandas.read_json |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import os
plt.rcParams["svg.hashsalt"]=0
def mkdirs(pre_path,parm_name):
try:
os.makedirs("../figures/"+pre_path+parm_name)
except:
pass
try:
os.makedirs("../analysed_data/"+pre_path+parm_name)
except:
pass
def timeseries(pre_path,parm_name,parm_array,parm_format='{:.2E}',post_path='',plot_Tpos=True,plot_Tpro=True,plot_Tneg=True,plot_o2=True,plot_test=True,plot_tot=False,custom_title=None,save=True):
fig,ax=plt.subplots(len(parm_array),2,sharex=True,figsize=(10,3*len(parm_array)))
i=0
for parm in parm_array:
if isinstance(parm,(list,pd.core.series.Series,np.ndarray)): #If the parameter explored is multidimensional
string=parm_format.format(parm[0])
for pp in parm[1:]:
string+='-'+parm_format.format(pp)
else:
string=parm_format.format(parm)
#print('../raw_output/'+pre_path+parm_name+'/'+post_path+string+'.csv')
df=pd.read_csv('../raw_output/'+pre_path+parm_name+'/'+post_path+string+'.csv')
## Plotting Resources
if plot_o2:
ax[i,1].plot(df.t/24/60,df.o2,color="tab:cyan",label='o2')
if plot_test:
ax[i,1].plot(df.t/24/60,df.test,color="tab:orange",label='test')
ax[i,1].set_ylabel("Resource (proportion)")
ax[i,1].legend()
## Plotting Cell Number
if plot_Tpos:
ax[i,0].plot(df.t/24/60,df.Tpos,color="tab:green",label='T+')
if plot_Tpro:
ax[i,0].plot(df.t/24/60,df.Tpro,color="tab:blue",label='Tp')
if plot_Tneg:
ax[i,0].plot(df.t/24/60,df.Tneg,color="tab:red",label='T-')
if plot_tot:
ax[i,0].plot(df.t/24/60,df.Tpos+df.Tpro+df.Tneg,color="tab:grey",label='Total')
ax[i,0].set_ylabel("No of Cells")
ax[i,0].legend()
if custom_title==None:
ax[i,0].set_title(parm_name+'='+string)
else:
ax[i,0].set_title(custom_title[i])
i+=1
## Add Xaxis label for the last row only
ax[i-1,0].set_xlabel('Time (days)')
ax[i-1,1].set_xlabel('Time (days)')
fig.tight_layout()
if save:
fig.savefig('../figures/'+pre_path+parm_name+'/'+post_path+'timeseries.svg')
fig.clf()
plt.close(fig)
def timeseries_split(no_fig,sub_arr_len,pre_path,parm_name,parm_array,parm_format='{:.2E}',post_path='',plot_Tpos=True,plot_Tpro=True,plot_Tneg=True,plot_o2=True,plot_test=True,save=True):
if (sub_arr_len*no_fig!=len(parm_array)):
print("Wrong Array Length")
return
for i in range(no_fig):
timeseries(pre_path=pre_path,parm_name=parm_name,parm_array=parm_array[i*sub_arr_len:(i+1)*sub_arr_len],parm_format=parm_format,post_path=post_path)
os.rename('../figures/'+pre_path+parm_name+'/'+post_path+'timeseries.svg','../figures/'+pre_path+parm_name+'/'+post_path+'timeseries-'+str(i)+'.svg')
def eq_values(pre_path,parm_name,parm_array,parm_format='{:.2E}',post_path='',save=True,parm_name_array=None,ttp=False,limit=None):
lis=[]
eq_col=['o2_eq','test_eq','Tpos_eq','Tpro_eq','Tneg_eq']
ttp_col=['TTE_Tpro','TTP_{}'.format(limit)]
if isinstance(parm_array[0],(list,pd.core.series.Series,np.ndarray)): #If the parameter explored is multidimensional
for parm in parm_array:
string=parm_format.format(parm[0])
for pp in parm[1:]:
string+='-'+parm_format.format(pp)
#print('../raw_output/'+pre_path+parm_name+'/'+post_path+string+'.csv')
df= | pd.read_csv('../raw_output/'+pre_path+parm_name+'/'+post_path+string+'.csv') | pandas.read_csv |
from collections import OrderedDict
import pandas as pd
pd.set_option('display.expand_frame_repr', False)
import numpy as np
# ******************************************
# helpers
# ******************************************
def _set_values_series(dfs):
return set(dfs[~ | pd.isnull(dfs) | pandas.isnull |
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
import requests
import time
from datetime import datetime
import pandas as pd
from urllib import parse
from config import ENV_VARIABLE
from os.path import getsize
fold_path = "./crawler_data/"
page_Max = 100
def stripID(url, wantStrip):
loc = url.find(wantStrip)
length = len(wantStrip)
return url[loc+length:]
def Kklee():
shop_id = 13
name = 'kklee'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.kklee.co/products?page=" + \
str(p) + "&sort_by=&order_by=&limit=24"
#
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//a[%i]/div[@class='Product-info']/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='col-xs-12 ProductList-list']/a[%i]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//a[%i]/div[1]/div[1]" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//a[%i]/div[@class='Product-info']/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//a[%i]/div[@class='Product-info']/div[3]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//a[%i]/div[@class='Product-info']/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Wishbykorea():
shop_id = 14
name = 'wishbykorea'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if(close == 1):
chrome.quit()
break
url = "https://www.wishbykorea.com/collection-727&pgno=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
print(url)
except:
break
time.sleep(1)
i = 1
while(i < 17):
try:
title = chrome.find_element_by_xpath(
"//div[@class='collection_item'][%i]/div/div/label" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='collection_item'][%i]/a[@href]" % (i,)).get_attribute('href')
page_id = page_link.replace("https://www.wishbykorea.com/collection-view-", "").replace("&ca=727", "")
find_href = chrome.find_element_by_xpath(
"//div[@class='collection_item'][%i]/a/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip('")')
except:
i += 1
if(i == 17):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='collection_item'][%i]/div[@class='collection_item_info']/div[2]/label" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = ""
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='collection_item'][%i]/div[@class='collection_item_info']/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = ""
except:
i += 1
if(i == 17):
p += 1
continue
if(sale_price == "0"):
i += 1
if(i == 17):
p += 1
continue
i += 1
if(i == 17):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Aspeed():
shop_id = 15
name = 'aspeed'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if(close == 1):
chrome.quit()
break
url = "https://www.aspeed.co/products?page=" + \
str(p) + "&sort_by=&order_by=&limit=72"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 73):
try:
title = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[1]/div[1]" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 73):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[2]/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[2]/div/div[2]/div[2]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[2]/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = ""
except:
i += 1
if(i == 73):
p += 1
continue
i += 1
if(i == 73):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Openlady():
shop_id = 17
name = 'openlady'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.openlady.tw/item.html?&id=157172&page=" + \
str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 17):
try:
title = chrome.find_element_by_xpath(
"//li[@class='item_block item_block_y'][%i]/div[@class='item_text']/p[@class='item_name']/a[@class='mymy_item_link']" % (i,)).text
page_link = chrome.find_element_by_xpath(
"//li[@class='item_block item_block_y'][%i]/div[@class='item_text']/p[@class='item_name']/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.replace("&id=", "")
except:
close += 1
break
try:
pic_link = chrome.find_element_by_xpath(
"//li[@class='item_block item_block_y'][%i]/div[@class='item_img']/a[@class='mymy_item_link']/img[@src]" % (i,)).get_attribute("src")
except:
i += 1
if(i == 17):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[@class='item_block item_block_y'][%i]/div[@class='item_text']/p[@class='item_amount']/span[2]" % (i,)).text
sale_price = sale_price.strip('NT$ ')
ori_price = chrome.find_element_by_xpath(
"//li[@class='item_block item_block_y'][%i]/div[@class='item_text']/p[@class='item_amount']/span[1]" % (i,)).text
ori_price = ori_price.strip('NT$ ')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[@class='item_block item_block_y'][%i]/div[@class='item_text']/p[@class='item_amount']/span[1]" % (i,)).text
sale_price = sale_price.strip('NT$ ')
ori_price = ""
except:
i += 1
if(i == 17):
p += 1
continue
i += 1
if(i == 17):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Azoom():
shop_id = 20
name = 'azoom'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if(close == 1):
chrome.quit()
break
url = "https://www.aroom1988.com/categories/view-all?page=" + \
str(p) + "&sort_by=&order_by=&limit=24"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 24):
try:
title = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.strip("/products/")
find_href = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[1]/div[1]" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip('")')
except:
i += 1
if(i == 24):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[2]/div/div/div" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = ""
except:
i += 1
if(i == 24):
p += 1
continue
i += 1
if(i == 24):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Roxy():
shop_id = 21
name = 'roxy'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.roxytaiwan.com.tw/new-collection?p=" + \
str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 65):
try:
title = chrome.find_element_by_xpath(
"//div[@class='product-container product-thumb'][%i]/div[@class='product-thumb-info']/p[@class='product-title']/a" % (i,)).text
page_link = chrome.find_element_by_xpath(
"//div[@class='product-container product-thumb'][%i]/div[@class='product-thumb-info']/p[@class='product-title']/a[@href]" % (i,)).get_attribute('href')
page_id = stripID(page_link, "default=")
except:
close += 1
break
try:
pic_link = chrome.find_element_by_xpath(
"//div[@class='product-container product-thumb'][%i]/div[@class='product-img']/a[@class='img-link']/picture[@class='main-picture']/img[@data-src]" % (i,)).get_attribute("data-src")
except:
i += 1
if(i == 65):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='product-container product-thumb'][%i]//span[@class='special-price']//span[@class='price-dollars']" % (i,)).text
sale_price = sale_price.replace('TWD', "")
ori_price = chrome.find_element_by_xpath(
"//div[@class='product-container product-thumb'][%i]//span[@class='old-price']//span[@class='price-dollars']" % (i,)).text
ori_price = ori_price.replace('TWD', "")
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='product-container product-thumb'][%i]//span[@class='price-dollars']" % (i,)).text
sale_price = sale_price.replace('TWD', "")
ori_price = ""
except:
i += 1
if(i == 65):
p += 1
continue
i += 1
if(i == 65):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Shaxi():
shop_id = 22
name = 'shaxi'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.shaxi.tw/products?page=" + str(p)
try:
chrome.get(url)
except:
break
i = 1
while(i < 49):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//li[%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[1]/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 49):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 49):
p += 1
continue
i += 1
if(i == 49):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Cici():
shop_id = 23
name = 'cici'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.cici2.tw/products?page=" + str(p)
try:
chrome.get(url)
except:
break
i = 1
while(i < 49):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//li[%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[1]/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 49):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 49):
p += 1
continue
i += 1
if(i == 49):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Amesoeur():
shop_id = 25
name = 'amesour'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.amesoeur.co/categories/%E5%85%A8%E9%83%A8%E5%95%86%E5%93%81?page=" + \
str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[2]/ul/li[%i]/a[@href]" % (i,)).get_attribute('href')
page_id = chrome.find_element_by_xpath(
"//div[2]/ul/li[%i]/a[@href]" % (i,)).get_attribute('product-id')
find_href = chrome.find_element_by_xpath(
"//li[%i]/a/div[1]/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[3]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[2]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Singular():
shop_id = 27
name = 'singular'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
i = 1
offset = (p-1) * 50
url = "https://www.singular-official.com/products?limit=50&offset=" + \
str(offset) + "&price=0%2C10000&sort=createdAt-desc"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
while(i < 51):
try:
title = chrome.find_element_by_xpath(
"//div[@class='rm<PASSWORD>1ca3'][%i]/div[2]" % (i,)).text
except:
close += 1
# print(i, "title")
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='rmq-3ab81ca3'][%i]//a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/product/")
pic_link = chrome.find_element_by_xpath(
"//div[@class='rm<PASSWORD>1ca3'][%i]//img" % (i,)).get_attribute('src')
sale_price = chrome.find_element_by_xpath(
"//div[@class='rmq-3ab81ca3'][%i]/div[3]/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$ ')
ori_price = chrome.find_element_by_xpath(
"//div[@class='rm<PASSWORD>3'][%i]/div[3]/div[1]/span/s" % (i,)).text
ori_price = ori_price.strip('NT$ ')
ori_price = ori_price.split()
ori_price = ori_price[0]
except:
i += 1
if(i == 51):
p += 1
continue
i += 1
if(i == 51):
p += 1
chrome.find_element_by_tag_name('body').send_keys(Keys.PAGE_DOWN)
time.sleep(1)
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Folie():
shop_id = 28
name = 'folie'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.folief.com/products?page=" + \
str(p) + "&sort_by=&order_by=&limit=24"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div[1]/div[1]" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div/div/div[2]/div[2]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Corban():
shop_id = 29
name = 'corban'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
i = 1
offset = (p-1) * 50
url = "https://www.corban.com.tw/products?limit=50&offset=" + \
str(offset) + "&price=0%2C10000&sort=createdAt-desc&tags=ALL%20ITEMS"
try:
chrome.get(url)
except:
break
while(i < 51):
try:
title = chrome.find_element_by_xpath(
"//div[@class='rmq-3ab81ca3'][%i]/div[2]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='rmq-3ab81ca3'][%i]//a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/product/")
pic_link = chrome.find_element_by_xpath(
"//div[@class='rm<PASSWORD>'][%i]//img" % (i,)).get_attribute('src')
sale_price = chrome.find_element_by_xpath(
"//div[@class='rm<PASSWORD>3'][%i]/div[3]/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$ ')
ori_price = chrome.find_element_by_xpath(
"//div[@class='rm<PASSWORD>3'][%i]/div[3]/div[1]/span/s" % (i,)).text
ori_price = ori_price.strip('NT$ ')
except:
i += 1
if(i == 51):
p += 1
continue
i += 1
if(i == 51):
p += 1
chrome.find_element_by_tag_name('body').send_keys(Keys.PAGE_DOWN)
time.sleep(1)
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Gmorning():
shop_id = 30
name = 'gmorning'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.gmorning.co/products?page=" + \
str(p) + "&sort_by=&order_by=&limit=24"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div[1]/div[1]" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div/div/div[2]/div[2]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def July():
shop_id = 31
name = 'july'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.july2017.co/products?page=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//li[%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[1]/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Per():
shop_id = 32
name = 'per'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.perdot.com.tw/categories/all?page=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//li[%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[1]/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Cereal():
shop_id = 33
name = 'cereal'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.cerealoutfit.com/new/page/" + str(p) + "/"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
try:
chrome.find_element_by_xpath(
"//button[@class='mfp-close']").click()
except:
pass
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//div[@data-loop='%i']/h3/a" % (i,)).text
if(title == ""):
i += 1
if(i == 25):
p += 1
continue
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@data-loop='%i']/div[1]/a[@href]" % (i,)).get_attribute('href')
page_id = chrome.find_element_by_xpath(
"//div[@data-loop='%i']" % (i,)).get_attribute('126-id')
pic_link = chrome.find_element_by_xpath(
"//div[@data-loop='%i']/div[1]/a/img" % (i,)).get_attribute('src')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[@data-loop='%i']//ins//bdi" % (i,)).text
sale_price = sale_price.rstrip(' NT$')
ori_price = chrome.find_element_by_xpath(
"//div[@data-loop='%i']//del//bdi" % (i,)).text
ori_price = ori_price.rstrip(' NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[@data-loop='%i']/div[2]//span[@class='woocommerce-Price-amount amount']" % (i,)).text
sale_price = sale_price.rstrip(' NT$')
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Jcjc():
shop_id = 35
name = 'jcjc'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.jcjc-dailywear.com/collections/in-stock?limit=24&page=" + \
str(p) + "&sort=featured"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//div[@class='grid-uniform grid-link__container']/div[%i]/div/a/p[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='grid-uniform grid-link__container']/div[%i]/div/a[1][@href]" % (i,)).get_attribute('href')
pic_link = chrome.find_element_by_xpath(
"//div[@class='grid-uniform grid-link__container']/div[%i]/div/span/a/img" % (i,)).get_attribute('src')
page_id = pic_link[pic_link.find("i/")+2:pic_link.find(".j")]
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='grid-uniform grid-link__container']/div[%i]/div/a/p[2]/span" % (i,)).text
sale_price = sale_price.strip('NT$ ')
ori_price = chrome.find_element_by_xpath(
"//div[@class='grid-uniform grid-link__container']/div[%i]/div/a/p[2]/s/span" % (i,)).text
ori_price = ori_price.strip('NT$ ')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='grid-uniform grid-link__container']/div[%i]/div/a/p[2]/span" % (i,)).text
sale_price = sale_price.strip('NT$ ')
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Ccshop():
shop_id = 36
name = 'ccshop'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.ccjshop.com/products?page=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[2]/ul/li[%i]/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//li[%i]/a/div[1]/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Iris():
shop_id = 37
name = 'iris'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.irisgarden.com.tw/products?page=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//li[@class='boxify-item product-item ng-isolate-scope'][%i]/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//li[%i]/a/div[1]/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Nook():
shop_id = 39
name = 'nook'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.nooknook.me/products?page=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//li[%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[1]/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Greenpea():
shop_id = 40
name = 'greenpea'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.greenpea-tw.com/products?page=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[2]/ul/li[%i]/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//li[%i]/a/div[1]/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[3]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[2]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Queen():
shop_id = 42
name = 'queen'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.queenshop.com.tw/zh-TW/QueenShop/ProductList?item1=01&item2=all&Page=" + \
str(p) + "&View=4"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
i = 1
while(i < 17):
try:
title = chrome.find_element_by_xpath(
"//ul[@class='items-list list-array-4']/li[%i]/a/p" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//ul[@class='items-list list-array-4']/li[%i]/a[@href]" % (i,)).get_attribute('href')
page_id = stripID(page_link, "SaleID=")
pic_link = chrome.find_element_by_xpath(
"//ul[@class='items-list list-array-4']/li[%i]/a/img[1]" % (i,)).get_attribute('data-src')
except:
i += 1
if(i == 17):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//ul[@class='items-list list-array-4']/li[%i]/p[2]/span[2]" % (i,)).text
sale_price = sale_price.strip('NT. ')
ori_price = chrome.find_element_by_xpath(
"//ul[@class='items-list list-array-4']/li[%i]/p[2]/span[1]" % (i,)).text
ori_price = ori_price.strip('NT. ')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//ul[@class='items-list list-array-4']/li[%i]/p[2]/span[1]" % (i,)).text
sale_price = sale_price.strip('NT. ')
ori_price = ""
except:
i += 1
if(i == 17):
p += 1
continue
i += 1
if(i == 17):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Cozyfee():
shop_id = 48
name = 'cozyfee'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.cozyfee.com/product.php?page=" + \
str(p) + "&cid=55#prod_list"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 41):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/div[2]/a" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//li[%i]/div[2]/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.lstrip("action=detail&pid=")
pic_link = chrome.find_element_by_xpath(
"//li[%i]/div[1]/a/img[1]" % (i,)).get_attribute('data-original')
sale_price = chrome.find_element_by_xpath(
"//li[%i]/div[3]/span" % (i,)).text
sale_price = sale_price.strip('NT.')
ori_price = ""
except:
i += 1
if(i == 41):
p += 1
continue
i += 1
if(i == 41):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Reishop():
shop_id = 49
name = 'reishop'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.reishop.com.tw/pdlist2.asp?item1=all&item2=&item3=&keyword=&ob=A&pagex=&pageno=" + \
str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 31):
try:
title = chrome.find_element_by_xpath(
"//figcaption[%i]/a/span[2]/span[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//figcaption[%i]/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.lstrip("yano=YA")
page_id = page_id.replace("&color=", "")
pic_link = chrome.find_element_by_xpath(
"//figcaption[%i]/a/span/img[1]" % (i,)).get_attribute('src')
sale_price = chrome.find_element_by_xpath(
"//figcaption[%i]/a/span[2]/span[2]/span" % (i,)).text
sale_price = sale_price.strip('NT.')
ori_price = ""
except:
i += 1
if(i == 31):
p += 1
continue
i += 1
if(i == 31):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Yourz():
shop_id = 50
name = 'yourz'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.yourz.com.tw/product/category/34/1/" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 13):
try:
title = chrome.find_element_by_xpath(
"//div[@class='pro_list'][%i]/div/table/tbody/tr/td/div/a" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='pro_list'][%i]/div/table/tbody/tr/td/div/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/product/detail/")
pic_link = chrome.find_element_by_xpath(
"//div[@class='pro_list'][%i]/div/a/img" % (i,)).get_attribute('src')
sale_price = chrome.find_element_by_xpath(
"//div[@class='pro_list'][%i]/div[4]/p/font" % (i,)).text
sale_price = sale_price.replace('VIP價:NT$ ', '')
sale_price = sale_price.rstrip('元')
ori_price = chrome.find_element_by_xpath(
"//div[@class='pro_list'][%i]/div[4]/p/br" % (i,)).text
ori_price = ori_price.replace('NT$ ', '')
ori_price = ori_price.rstrip('元')
except:
i += 1
if(i == 13):
p += 1
continue
i += 1
if(i == 13):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Seoulmate():
shop_id = 54
name = 'seoulmate'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.seoulmate.com.tw/catalog.php?m=115&s=249&t=0&sort=&page=" + \
str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 33):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/p[1]/a" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//ul/li[%i]/p[1]/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.replace("m=115&s=249&t=0&id=", "")
pic_link = chrome.find_element_by_xpath(
"//ul/li[%i]/a/img[1]" % (i,)).get_attribute('src')
if(pic_link == ""):
i += 1
if(i == 33):
p += 1
continue
except:
i += 1
if(i == 33):
p += 1
continue
try:
ori_price = chrome.find_element_by_xpath(
"//ul/li[%i]/p[3]/del" % (i,)).text
ori_price = ori_price.strip('NT.')
sale_price = chrome.find_element_by_xpath(
"//ul/li[%i]/p[3]" % (i,)).text
sale_price = sale_price.strip('NT.')
sale_price = sale_price.strip('NT.')
locate = sale_price.find("NT.")
sale_price = sale_price[locate+3:len(sale_price)]
except:
try:
sale_price = chrome.find_element_by_xpath(
"//ul/li[%i]/p[3]" % (i,)).text
sale_price = sale_price.strip('NT.')
ori_price = ""
except:
i += 1
if(i == 33):
p += 1
continue
i += 1
if(i == 33):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Sweesa():
shop_id = 55
name = 'sweesa'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.sweesa.com/Shop/itemList.aspx?&m=20&o=5&sa=1&smfp=" + \
str(p)
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 45):
try:
title = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[2]/a" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[2]/a" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.replace("mNo1=", "")
page_id = page_id.replace("&m=20", "")
pic_link = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]//a/img[@src]" % (i,)).get_attribute("src")
sale_price = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[4]/span" % (i,)).text
sale_price = sale_price.strip('TWD.')
ori_price = ""
except:
i += 1
if(i == 45):
p += 1
continue
i += 1
if(i == 45):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Pazzo():
shop_id = 56
name = 'pazzo'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.pazzo.com.tw/recent?P=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 41):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/div[2]/p/a" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//li[%i]/div[2]/p/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.lstrip("c=")
pic_link = chrome.find_element_by_xpath(
"//li[@class='item'][%i]/div[@class='item__images']/a/picture/img[@class='img-fluid']" % (i,)).get_attribute('src')
except:
i += 1
if(i == 41):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/div[2]/p[2]/span[2]" % (i,)).text
sale_price = sale_price.strip('NT.')
ori_price = chrome.find_element_by_xpath(
"//li[%i]/div[2]/p[2]/span[1]" % (i,)).text
ori_price = ori_price.strip('NT.')
except:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/div[2]/p[2]/span" % (i,)).text
sale_price = sale_price.strip('NT.')
ori_price = ""
i += 1
if(i == 41):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Meierq():
shop_id = 57
name = 'meierq'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
page = 0
prefix_urls = [
"https://www.meierq.com/zh-tw/category/bottomclothing?P=",
"https://www.meierq.com/zh-tw/category/jewelry?P=",
"https://www.meierq.com/zh-tw/category/outerclothing?P=",
"https://www.meierq.com/zh-tw/category/accessories?P=",
]
for prefix in prefix_urls:
page += 1
for i in range(1, page_Max):
url = f"{prefix}{i}"
try:
print(url)
chrome.get(url)
chrome.find_element_by_xpath("//div[@class='items__image']")
except:
print("find_element_by_xpath_break", page)
if(page == 4):
chrome.quit()
print("break")
break
break
i = 1
while(i < 41):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/div/p/a" % (i,)).text
except:
break
try:
page_link = chrome.find_element_by_xpath(
"//li[%i]/div/p/a[@href]" % (i,)).get_attribute('href')
page_id = stripID(page_link, "n/")
page_id = page_id[:page_id.find("?c")]
pic_link = chrome.find_element_by_xpath(
"//li[%i]/div/img" % (i,)).get_attribute('src')
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/div/p/span[2]" % (i,)).text
sale_price = sale_price.strip('NT.')
ori_price = chrome.find_element_by_xpath(
"//li[%i]/div/p/span" % (i,)).text
ori_price = ori_price.strip('NT.')
except:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/div/p/span" % (i,)).text
sale_price = sale_price.strip('NT.')
ori_price = ""
except:
i += 1
if(i == 41):
p += 1
continue
i += 1
if(i == 41):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Harper():
shop_id = 58
name = 'harper'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
while True:
url = "https://www.harper.com.tw/Shop/itemList.aspx?&m=13&smfp=" + \
str(p)
if(p > 20):
chrome.quit()
break
try:
chrome.get(url)
except:
chrome.quit()
break
i = 1
while(i < 80):
try:
title = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[2]/a" % (i,)).text
except:
p += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[2]/a" % (i,)).get_attribute('href')
page_id = stripID(page_link, "cno=")
page_id = page_id.replace("&m=13", "")
pic_link = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]//a/img[@src]" % (i,)).get_attribute("src")
sale_price = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[4]/span" % (i,)).text
sale_price = sale_price.strip('NT.')
ori_price = ""
except:
i += 1
if(i == 79):
p += 1
continue
i += 1
if(i == 79):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Lurehsu():
shop_id = 59
name = 'lurehsu'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.lurehsu.com/zh-TW/lure/productList?item1=00&item2=16&page=" + \
str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
i = 1
while(i < 28):
try:
title = chrome.find_element_by_xpath(
"//div[@class='grid-item'][%i]/a/div[2]/p" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='grid-item'][%i]/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.lstrip("SaleID=")
page_id = page_id[:page_id.find("&Color")]
pic_link = chrome.find_element_by_xpath(
"//div[@class='grid-item'][%i]/a/div/img" % (i,)).get_attribute('src')
except:
i += 1
if(i == 28):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='grid-item'][%i]/a/div[2]/div/p/span[2]" % (i,)).text
sale_price = sale_price.strip('NTD.')
ori_price = chrome.find_element_by_xpath(
"//div[@class='grid-item'][%i]/a/div[2]/div/p/span[1]" % (i,)).text
ori_price = ori_price.strip('NTD.')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='grid-item'][%i]/a/div[2]/div/p" % (i,)).text
sale_price = sale_price.strip('NTD.')
ori_price = ""
except:
i += 1
if(i == 28):
p += 1
continue
i += 1
if(i == 28):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Pufii():
shop_id = 61
name = 'pufii'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.pufii.com.tw/Shop/itemList.aspx?&m=6&smfp=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 37):
try:
title = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[3]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[1]/a" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.replace("mNo1=P", "")
page_id = page_id.replace("&m=6", "")
pic_link = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]//a/img[@src]" % (i,)).get_attribute("src")
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[@class='pricediv']/span[2]" % (i,)).text
sale_price = sale_price.strip('活動價NT')
ori_price = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[@class='pricediv']/span[1]" % (i,)).text
ori_price = ori_price.strip('NT')
except:
sale_price = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[@class='pricediv']/span[1]" % (i,)).text
sale_price = sale_price.strip('NT')
ori_price = ""
except:
i += 1
if(i == 37):
p += 1
continue
i += 1
if(i == 37):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Mouggan():
shop_id = 62
name = 'mouggan'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.mouggan.com/zh-tw/category/ALL-ITEM?P=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
try:
chrome.find_element_by_xpath(
"//a[@class='close p-0']/i[@class='icon-popup-close']").click()
except:
pass
i = 1
while(i < 19):
try:
title = chrome.find_element_by_xpath(
"//div[2]/div[%i]/div[2]/a" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[2]/div[%i]/div[2]/a[@href]" % (i,)).get_attribute('href')
page_id = stripID(page_link, "c=")
pic_link = chrome.find_element_by_xpath(
"//div[2]/div[%i]/div[1]/div/a/img" % (i,)).get_attribute('src')
except:
i += 1
if(i == 19):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[2]/div[%i]/div[2]/div[1]/span[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = chrome.find_element_by_xpath(
"//div[2]/div[%i]/div[2]/div[1]/span[1]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[2]/div[%i]/div[2]/div[1]/span[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = ""
except:
i += 1
if(i == 19):
p += 1
continue
i += 1
if(i == 19):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Mercci():
shop_id = 64
name = 'mercci'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = | pd.DataFrame() | pandas.DataFrame |
import types
from functools import wraps
import numpy as np
import datetime
import collections
from pandas.compat import(
zip, builtins, range, long, lzip,
OrderedDict, callable
)
from pandas import compat
from pandas.core.base import PandasObject
from pandas.core.categorical import Categorical
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame
from pandas.core.index import Index, MultiIndex, _ensure_index, _union_indexes
from pandas.core.internals import BlockManager, make_block
from pandas.core.series import Series
from pandas.core.panel import Panel
from pandas.util.decorators import cache_readonly, Appender
import pandas.core.algorithms as algos
import pandas.core.common as com
from pandas.core.common import(_possibly_downcast_to_dtype, isnull,
notnull, _DATELIKE_DTYPES, is_numeric_dtype,
is_timedelta64_dtype, is_datetime64_dtype,
is_categorical_dtype, _values_from_object)
from pandas.core.config import option_context
from pandas import _np_version_under1p7
import pandas.lib as lib
from pandas.lib import Timestamp
import pandas.tslib as tslib
import pandas.algos as _algos
import pandas.hashtable as _hash
_agg_doc = """Aggregate using input function or dict of {column -> function}
Parameters
----------
arg : function or dict
Function to use for aggregating groups. If a function, must either
work when passed a DataFrame or when passed to DataFrame.apply. If
passed a dict, the keys must be DataFrame column names.
Notes
-----
Numpy functions mean/median/prod/sum/std/var are special cased so the
default behavior is applying the function along axis=0
(e.g., np.mean(arr_2d, axis=0)) as opposed to
mimicking the default Numpy behavior (e.g., np.mean(arr_2d)).
Returns
-------
aggregated : DataFrame
"""
# special case to prevent duplicate plots when catching exceptions when
# forwarding methods from NDFrames
_plotting_methods = frozenset(['plot', 'boxplot', 'hist'])
_common_apply_whitelist = frozenset([
'last', 'first',
'head', 'tail', 'median',
'mean', 'sum', 'min', 'max',
'cumsum', 'cumprod', 'cummin', 'cummax', 'cumcount',
'resample',
'describe',
'rank', 'quantile', 'count',
'fillna',
'mad',
'any', 'all',
'irow', 'take',
'idxmax', 'idxmin',
'shift', 'tshift',
'ffill', 'bfill',
'pct_change', 'skew',
'corr', 'cov', 'diff',
]) | _plotting_methods
_series_apply_whitelist = \
(_common_apply_whitelist - set(['boxplot'])) | \
frozenset(['dtype', 'value_counts', 'unique', 'nunique',
'nlargest', 'nsmallest'])
_dataframe_apply_whitelist = \
_common_apply_whitelist | frozenset(['dtypes', 'corrwith'])
class GroupByError(Exception):
pass
class DataError(GroupByError):
pass
class SpecificationError(GroupByError):
pass
def _groupby_function(name, alias, npfunc, numeric_only=True,
_convert=False):
def f(self):
self._set_selection_from_grouper()
try:
return self._cython_agg_general(alias, numeric_only=numeric_only)
except AssertionError as e:
raise SpecificationError(str(e))
except Exception:
result = self.aggregate(lambda x: npfunc(x, axis=self.axis))
if _convert:
result = result.convert_objects()
return result
f.__doc__ = "Compute %s of group values" % name
f.__name__ = name
return f
def _first_compat(x, axis=0):
def _first(x):
x = np.asarray(x)
x = x[notnull(x)]
if len(x) == 0:
return np.nan
return x[0]
if isinstance(x, DataFrame):
return x.apply(_first, axis=axis)
else:
return _first(x)
def _last_compat(x, axis=0):
def _last(x):
x = np.asarray(x)
x = x[notnull(x)]
if len(x) == 0:
return np.nan
return x[-1]
if isinstance(x, DataFrame):
return x.apply(_last, axis=axis)
else:
return _last(x)
def _count_compat(x, axis=0):
try:
return x.size
except:
return x.count()
class Grouper(object):
"""
A Grouper allows the user to specify a groupby instruction for a target object
This specification will select a column via the key parameter, or if the level and/or
axis parameters are given, a level of the index of the target object.
These are local specifications and will override 'global' settings, that is the parameters
axis and level which are passed to the groupby itself.
Parameters
----------
key : string, defaults to None
groupby key, which selects the grouping column of the target
level : name/number, defaults to None
the level for the target index
freq : string / freqency object, defaults to None
This will groupby the specified frequency if the target selection (via key or level) is
a datetime-like object
axis : number/name of the axis, defaults to None
sort : boolean, default to False
whether to sort the resulting labels
additional kwargs to control time-like groupers (when freq is passed)
closed : closed end of interval; left or right
label : interval boundary to use for labeling; left or right
convention : {'start', 'end', 'e', 's'}
If grouper is PeriodIndex
Returns
-------
A specification for a groupby instruction
Examples
--------
>>> df.groupby(Grouper(key='A')) : syntatic sugar for df.groupby('A')
>>> df.groupby(Grouper(key='date',freq='60s')) : specify a resample on the column 'date'
>>> df.groupby(Grouper(level='date',freq='60s',axis=1)) :
specify a resample on the level 'date' on the columns axis with a frequency of 60s
"""
def __new__(cls, *args, **kwargs):
if kwargs.get('freq') is not None:
from pandas.tseries.resample import TimeGrouper
cls = TimeGrouper
return super(Grouper, cls).__new__(cls)
def __init__(self, key=None, level=None, freq=None, axis=None, sort=False):
self.key=key
self.level=level
self.freq=freq
self.axis=axis
self.sort=sort
self.grouper=None
self.obj=None
self.indexer=None
self.binner=None
self.grouper=None
@property
def ax(self):
return self.grouper
def _get_grouper(self, obj):
"""
Parameters
----------
obj : the subject object
Returns
-------
a tuple of binner, grouper, obj (possibly sorted)
"""
self._set_grouper(obj)
return self.binner, self.grouper, self.obj
def _set_grouper(self, obj, sort=False):
"""
given an object and the specifcations, setup the internal grouper for this particular specification
Parameters
----------
obj : the subject object
"""
if self.key is not None and self.level is not None:
raise ValueError("The Grouper cannot specify both a key and a level!")
# the key must be a valid info item
if self.key is not None:
key = self.key
if key not in obj._info_axis:
raise KeyError("The grouper name {0} is not found".format(key))
ax = Index(obj[key],name=key)
else:
ax = obj._get_axis(self.axis)
if self.level is not None:
level = self.level
# if a level is given it must be a mi level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
if isinstance(level, compat.string_types):
if obj.index.name != level:
raise ValueError('level name %s is not the name of the '
'index' % level)
elif level > 0:
raise ValueError('level > 0 only valid with MultiIndex')
ax = Index(ax.get_level_values(level), name=level)
else:
if not (level == 0 or level == ax.name):
raise ValueError("The grouper level {0} is not valid".format(level))
# possibly sort
if (self.sort or sort) and not ax.is_monotonic:
indexer = self.indexer = ax.argsort(kind='quicksort')
ax = ax.take(indexer)
obj = obj.take(indexer, axis=self.axis, convert=False, is_copy=False)
self.obj = obj
self.grouper = ax
return self.grouper
def _get_binner_for_grouping(self, obj):
raise NotImplementedError
@property
def groups(self):
return self.grouper.groups
class GroupBy(PandasObject):
"""
Class for grouping and aggregating relational data. See aggregate,
transform, and apply functions on this object.
It's easiest to use obj.groupby(...) to use GroupBy, but you can also do:
::
grouped = groupby(obj, ...)
Parameters
----------
obj : pandas object
axis : int, default 0
level : int, default None
Level of MultiIndex
groupings : list of Grouping objects
Most users should ignore this
exclusions : array-like, optional
List of columns to exclude
name : string
Most users should ignore this
Notes
-----
After grouping, see aggregate, apply, and transform functions. Here are
some other brief notes about usage. When grouping by multiple groups, the
result index will be a MultiIndex (hierarchical) by default.
Iteration produces (key, group) tuples, i.e. chunking the data by group. So
you can write code like:
::
grouped = obj.groupby(keys, axis=axis)
for key, group in grouped:
# do something with the data
Function calls on GroupBy, if not specially implemented, "dispatch" to the
grouped data. So if you group a DataFrame and wish to invoke the std()
method on each group, you can simply do:
::
df.groupby(mapper).std()
rather than
::
df.groupby(mapper).aggregate(np.std)
You can pass arguments to these "wrapped" functions, too.
See the online documentation for full exposition on these topics and much
more
Returns
-------
**Attributes**
groups : dict
{group name -> group labels}
len(grouped) : int
Number of groups
"""
_apply_whitelist = _common_apply_whitelist
_internal_names = ['_cache']
_internal_names_set = set(_internal_names)
_group_selection = None
def __init__(self, obj, keys=None, axis=0, level=None,
grouper=None, exclusions=None, selection=None, as_index=True,
sort=True, group_keys=True, squeeze=False):
self._selection = selection
if isinstance(obj, NDFrame):
obj._consolidate_inplace()
self.level = level
if not as_index:
if not isinstance(obj, DataFrame):
raise TypeError('as_index=False only valid with DataFrame')
if axis != 0:
raise ValueError('as_index=False only valid for axis=0')
self.as_index = as_index
self.keys = keys
self.sort = sort
self.group_keys = group_keys
self.squeeze = squeeze
if grouper is None:
grouper, exclusions, obj = _get_grouper(obj, keys, axis=axis,
level=level, sort=sort)
self.obj = obj
self.axis = obj._get_axis_number(axis)
self.grouper = grouper
self.exclusions = set(exclusions) if exclusions else set()
def __len__(self):
return len(self.indices)
def __unicode__(self):
# TODO: Better unicode/repr for GroupBy object
return object.__repr__(self)
@property
def groups(self):
""" dict {group name -> group labels} """
return self.grouper.groups
@property
def ngroups(self):
return self.grouper.ngroups
@property
def indices(self):
""" dict {group name -> group indices} """
return self.grouper.indices
def _get_index(self, name):
""" safe get index, translate keys for datelike to underlying repr """
def convert(key, s):
# possibly convert to they actual key types
# in the indices, could be a Timestamp or a np.datetime64
if isinstance(s, (Timestamp,datetime.datetime)):
return Timestamp(key)
elif isinstance(s, np.datetime64):
return Timestamp(key).asm8
return key
sample = next(iter(self.indices))
if isinstance(sample, tuple):
if not isinstance(name, tuple):
raise ValueError("must supply a tuple to get_group with multiple grouping keys")
if not len(name) == len(sample):
raise ValueError("must supply a a same-length tuple to get_group with multiple grouping keys")
name = tuple([ convert(n, k) for n, k in zip(name,sample) ])
else:
name = convert(name, sample)
return self.indices[name]
@property
def name(self):
if self._selection is None:
return None # 'result'
else:
return self._selection
@property
def _selection_list(self):
if not isinstance(self._selection, (list, tuple, Series, Index, np.ndarray)):
return [self._selection]
return self._selection
@cache_readonly
def _selected_obj(self):
if self._selection is None or isinstance(self.obj, Series):
if self._group_selection is not None:
return self.obj[self._group_selection]
return self.obj
else:
return self.obj[self._selection]
def _set_selection_from_grouper(self):
""" we may need create a selection if we have non-level groupers """
grp = self.grouper
if self.as_index and getattr(grp,'groupings',None) is not None and self.obj.ndim > 1:
ax = self.obj._info_axis
groupers = [ g.name for g in grp.groupings if g.level is None and g.name is not None and g.name in ax ]
if len(groupers):
self._group_selection = (ax-Index(groupers)).tolist()
def _local_dir(self):
return sorted(set(self.obj._local_dir() + list(self._apply_whitelist)))
def __getattr__(self, attr):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
if hasattr(self.obj, attr):
return self._make_wrapper(attr)
raise AttributeError("%r object has no attribute %r" %
(type(self).__name__, attr))
def __getitem__(self, key):
raise NotImplementedError('Not implemented: %s' % key)
def _make_wrapper(self, name):
if name not in self._apply_whitelist:
is_callable = callable(getattr(self._selected_obj, name, None))
kind = ' callable ' if is_callable else ' '
msg = ("Cannot access{0}attribute {1!r} of {2!r} objects, try "
"using the 'apply' method".format(kind, name,
type(self).__name__))
raise AttributeError(msg)
# need to setup the selection
# as are not passed directly but in the grouper
self._set_selection_from_grouper()
f = getattr(self._selected_obj, name)
if not isinstance(f, types.MethodType):
return self.apply(lambda self: getattr(self, name))
f = getattr(type(self._selected_obj), name)
def wrapper(*args, **kwargs):
# a little trickery for aggregation functions that need an axis
# argument
kwargs_with_axis = kwargs.copy()
if 'axis' not in kwargs_with_axis:
kwargs_with_axis['axis'] = self.axis
def curried_with_axis(x):
return f(x, *args, **kwargs_with_axis)
def curried(x):
return f(x, *args, **kwargs)
# preserve the name so we can detect it when calling plot methods,
# to avoid duplicates
curried.__name__ = curried_with_axis.__name__ = name
# special case otherwise extra plots are created when catching the
# exception below
if name in _plotting_methods:
return self.apply(curried)
try:
return self.apply(curried_with_axis)
except Exception:
try:
return self.apply(curried)
except Exception:
# related to : GH3688
# try item-by-item
# this can be called recursively, so need to raise ValueError if
# we don't have this method to indicated to aggregate to
# mark this column as an error
try:
return self._aggregate_item_by_item(name, *args, **kwargs)
except (AttributeError):
raise ValueError
return wrapper
def get_group(self, name, obj=None):
"""
Constructs NDFrame from group with provided name
Parameters
----------
name : object
the name of the group to get as a DataFrame
obj : NDFrame, default None
the NDFrame to take the DataFrame out of. If
it is None, the object groupby was called on will
be used
Returns
-------
group : type of obj
"""
if obj is None:
obj = self._selected_obj
inds = self._get_index(name)
return obj.take(inds, axis=self.axis, convert=False)
def __iter__(self):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
return self.grouper.get_iterator(self.obj, axis=self.axis)
def apply(self, func, *args, **kwargs):
"""
Apply function and combine results together in an intelligent way. The
split-apply-combine combination rules attempt to be as common sense
based as possible. For example:
case 1:
group DataFrame
apply aggregation function (f(chunk) -> Series)
yield DataFrame, with group axis having group labels
case 2:
group DataFrame
apply transform function ((f(chunk) -> DataFrame with same indexes)
yield DataFrame with resulting chunks glued together
case 3:
group Series
apply function with f(chunk) -> DataFrame
yield DataFrame with result of chunks glued together
Parameters
----------
func : function
Notes
-----
See online documentation for full exposition on how to use apply.
In the current implementation apply calls func twice on the
first group to decide whether it can take a fast or slow code
path. This can lead to unexpected behavior if func has
side-effects, as they will take effect twice for the first
group.
See also
--------
aggregate, transform
Returns
-------
applied : type depending on grouped object and function
"""
func = _intercept_function(func)
@wraps(func)
def f(g):
return func(g, *args, **kwargs)
# ignore SettingWithCopy here in case the user mutates
with option_context('mode.chained_assignment',None):
return self._python_apply_general(f)
def _python_apply_general(self, f):
keys, values, mutated = self.grouper.apply(f, self._selected_obj,
self.axis)
return self._wrap_applied_output(keys, values,
not_indexed_same=mutated)
def aggregate(self, func, *args, **kwargs):
raise NotImplementedError
@Appender(_agg_doc)
def agg(self, func, *args, **kwargs):
return self.aggregate(func, *args, **kwargs)
def _iterate_slices(self):
yield self.name, self._selected_obj
def transform(self, func, *args, **kwargs):
raise NotImplementedError
def mean(self):
"""
Compute mean of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
try:
return self._cython_agg_general('mean')
except GroupByError:
raise
except Exception: # pragma: no cover
self._set_selection_from_grouper()
f = lambda x: x.mean(axis=self.axis)
return self._python_agg_general(f)
def median(self):
"""
Compute median of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
try:
return self._cython_agg_general('median')
except GroupByError:
raise
except Exception: # pragma: no cover
self._set_selection_from_grouper()
def f(x):
if isinstance(x, np.ndarray):
x = Series(x)
return x.median(axis=self.axis)
return self._python_agg_general(f)
def std(self, ddof=1):
"""
Compute standard deviation of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
# todo, implement at cython level?
return np.sqrt(self.var(ddof=ddof))
def var(self, ddof=1):
"""
Compute variance of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
if ddof == 1:
return self._cython_agg_general('var')
else:
self._set_selection_from_grouper()
f = lambda x: x.var(ddof=ddof)
return self._python_agg_general(f)
def sem(self, ddof=1):
"""
Compute standard error of the mean of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
return self.std(ddof=ddof)/np.sqrt(self.count())
def size(self):
"""
Compute group sizes
"""
return self.grouper.size()
sum = _groupby_function('sum', 'add', np.sum)
prod = _groupby_function('prod', 'prod', np.prod)
min = _groupby_function('min', 'min', np.min, numeric_only=False)
max = _groupby_function('max', 'max', np.max, numeric_only=False)
first = _groupby_function('first', 'first', _first_compat,
numeric_only=False, _convert=True)
last = _groupby_function('last', 'last', _last_compat, numeric_only=False,
_convert=True)
_count = _groupby_function('_count', 'count', _count_compat,
numeric_only=False)
def count(self, axis=0):
return self._count().astype('int64')
def ohlc(self):
"""
Compute sum of values, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
return self._apply_to_column_groupbys(
lambda x: x._cython_agg_general('ohlc'))
def nth(self, n, dropna=None):
"""
Take the nth row from each group.
If dropna, will not show nth non-null row, dropna is either
Truthy (if a Series) or 'all', 'any' (if a DataFrame); this is equivalent
to calling dropna(how=dropna) before the groupby.
Examples
--------
>>> df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
>>> g = df.groupby('A')
>>> g.nth(0)
A B
0 1 NaN
2 5 6
>>> g.nth(1)
A B
1 1 4
>>> g.nth(-1)
A B
1 1 4
2 5 6
>>> g.nth(0, dropna='any')
B
A
1 4
5 6
>>> g.nth(1, dropna='any') # NaNs denote group exhausted when using dropna
B
A
1 NaN
5 NaN
"""
self._set_selection_from_grouper()
if not dropna: # good choice
m = self.grouper._max_groupsize
if n >= m or n < -m:
return self._selected_obj.loc[[]]
rng = np.zeros(m, dtype=bool)
if n >= 0:
rng[n] = True
is_nth = self._cumcount_array(rng)
else:
rng[- n - 1] = True
is_nth = self._cumcount_array(rng, ascending=False)
result = self._selected_obj[is_nth]
# the result index
if self.as_index:
ax = self.obj._info_axis
names = self.grouper.names
if self.obj.ndim == 1:
# this is a pass-thru
pass
elif all([ n in ax for n in names ]):
result.index = Index(self.obj[names][is_nth].values.ravel()).set_names(names)
elif self._group_selection is not None:
result.index = self.obj._get_axis(self.axis)[is_nth]
result = result.sort_index()
return result
if (isinstance(self._selected_obj, DataFrame)
and dropna not in ['any', 'all']):
# Note: when agg-ing picker doesn't raise this, just returns NaN
raise ValueError("For a DataFrame groupby, dropna must be "
"either None, 'any' or 'all', "
"(was passed %s)." % (dropna),)
# old behaviour, but with all and any support for DataFrames.
# modified in GH 7559 to have better perf
max_len = n if n >= 0 else - 1 - n
dropped = self.obj.dropna(how=dropna, axis=self.axis)
# get a new grouper for our dropped obj
if self.keys is None and self.level is None:
# we don't have the grouper info available (e.g. we have selected out
# a column that is not in the current object)
axis = self.grouper.axis
grouper = axis[axis.isin(dropped.index)]
keys = self.grouper.names
else:
# create a grouper with the original parameters, but on the dropped object
grouper, _, _ = _get_grouper(dropped, key=self.keys, axis=self.axis,
level=self.level, sort=self.sort)
sizes = dropped.groupby(grouper).size()
result = dropped.groupby(grouper).nth(n)
mask = (sizes<max_len).values
# set the results which don't meet the criteria
if len(result) and mask.any():
result.loc[mask] = np.nan
# reset/reindex to the original groups
if len(self.obj) == len(dropped) or len(result) == len(self.grouper.result_index):
result.index = self.grouper.result_index
else:
result = result.reindex(self.grouper.result_index)
return result
def cumcount(self, **kwargs):
"""
Number each item in each group from 0 to the length of that group - 1.
Essentially this is equivalent to
>>> self.apply(lambda x: Series(np.arange(len(x)), x.index))
Parameters
----------
ascending : bool, default True
If False, number in reverse, from length of group - 1 to 0.
Example
-------
>>> df = pd.DataFrame([['a'], ['a'], ['a'], ['b'], ['b'], ['a']],
... columns=['A'])
>>> df
A
0 a
1 a
2 a
3 b
4 b
5 a
>>> df.groupby('A').cumcount()
0 0
1 1
2 2
3 0
4 1
5 3
dtype: int64
>>> df.groupby('A').cumcount(ascending=False)
0 3
1 2
2 1
3 1
4 0
5 0
dtype: int64
"""
self._set_selection_from_grouper()
ascending = kwargs.pop('ascending', True)
index = self._selected_obj.index
cumcounts = self._cumcount_array(ascending=ascending)
return Series(cumcounts, index)
def head(self, n=5):
"""
Returns first n rows of each group.
Essentially equivalent to ``.apply(lambda x: x.head(n))``,
except ignores as_index flag.
Example
-------
>>> df = DataFrame([[1, 2], [1, 4], [5, 6]],
columns=['A', 'B'])
>>> df.groupby('A', as_index=False).head(1)
A B
0 1 2
2 5 6
>>> df.groupby('A').head(1)
A B
0 1 2
2 5 6
"""
obj = self._selected_obj
in_head = self._cumcount_array() < n
head = obj[in_head]
return head
def tail(self, n=5):
"""
Returns last n rows of each group
Essentially equivalent to ``.apply(lambda x: x.tail(n))``,
except ignores as_index flag.
Example
-------
>>> df = DataFrame([[1, 2], [1, 4], [5, 6]],
columns=['A', 'B'])
>>> df.groupby('A', as_index=False).tail(1)
A B
0 1 2
2 5 6
>>> df.groupby('A').head(1)
A B
0 1 2
2 5 6
"""
obj = self._selected_obj
rng = np.arange(0, -self.grouper._max_groupsize, -1, dtype='int64')
in_tail = self._cumcount_array(rng, ascending=False) > -n
tail = obj[in_tail]
return tail
def _cumcount_array(self, arr=None, **kwargs):
"""
arr is where cumcount gets its values from
note: this is currently implementing sort=False (though the default is sort=True)
for groupby in general
"""
ascending = kwargs.pop('ascending', True)
if arr is None:
arr = np.arange(self.grouper._max_groupsize, dtype='int64')
len_index = len(self._selected_obj.index)
cumcounts = np.zeros(len_index, dtype=arr.dtype)
if not len_index:
return cumcounts
indices, values = [], []
for v in self.indices.values():
indices.append(v)
if ascending:
values.append(arr[:len(v)])
else:
values.append(arr[len(v)-1::-1])
indices = np.concatenate(indices)
values = np.concatenate(values)
cumcounts[indices] = values
return cumcounts
def _index_with_as_index(self, b):
"""
Take boolean mask of index to be returned from apply, if as_index=True
"""
# TODO perf, it feels like this should already be somewhere...
from itertools import chain
original = self._selected_obj.index
gp = self.grouper
levels = chain((gp.levels[i][gp.labels[i][b]]
for i in range(len(gp.groupings))),
(original.get_level_values(i)[b]
for i in range(original.nlevels)))
new = MultiIndex.from_arrays(list(levels))
new.names = gp.names + original.names
return new
def _try_cast(self, result, obj):
"""
try to cast the result to our obj original type,
we may have roundtripped thru object in the mean-time
"""
if obj.ndim > 1:
dtype = obj.values.dtype
else:
dtype = obj.dtype
if not np.isscalar(result):
result = _possibly_downcast_to_dtype(result, dtype)
return result
def _cython_agg_general(self, how, numeric_only=True):
output = {}
for name, obj in self._iterate_slices():
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
try:
result, names = self.grouper.aggregate(obj.values, how)
except AssertionError as e:
raise GroupByError(str(e))
output[name] = self._try_cast(result, obj)
if len(output) == 0:
raise DataError('No numeric types to aggregate')
return self._wrap_aggregated_output(output, names)
def _python_agg_general(self, func, *args, **kwargs):
func = _intercept_function(func)
f = lambda x: func(x, *args, **kwargs)
# iterate through "columns" ex exclusions to populate output dict
output = {}
for name, obj in self._iterate_slices():
try:
result, counts = self.grouper.agg_series(obj, f)
output[name] = self._try_cast(result, obj)
except TypeError:
continue
if len(output) == 0:
return self._python_apply_general(f)
if self.grouper._filter_empty_groups:
mask = counts.ravel() > 0
for name, result in compat.iteritems(output):
# since we are masking, make sure that we have a float object
values = result
if is_numeric_dtype(values.dtype):
values = com.ensure_float(values)
output[name] = self._try_cast(values[mask], result)
return self._wrap_aggregated_output(output)
def _wrap_applied_output(self, *args, **kwargs):
raise NotImplementedError
def _concat_objects(self, keys, values, not_indexed_same=False):
from pandas.tools.merge import concat
if not not_indexed_same:
result = concat(values, axis=self.axis)
ax = self._selected_obj._get_axis(self.axis)
if isinstance(result, Series):
result = result.reindex(ax)
else:
result = result.reindex_axis(ax, axis=self.axis)
elif self.group_keys:
if self.as_index:
# possible MI return case
group_keys = keys
group_levels = self.grouper.levels
group_names = self.grouper.names
result = concat(values, axis=self.axis, keys=group_keys,
levels=group_levels, names=group_names)
else:
# GH5610, returns a MI, with the first level being a
# range index
keys = list(range(len(values)))
result = concat(values, axis=self.axis, keys=keys)
else:
result = concat(values, axis=self.axis)
return result
def _apply_filter(self, indices, dropna):
if len(indices) == 0:
indices = []
else:
indices = np.sort(np.concatenate(indices))
if dropna:
filtered = self._selected_obj.take(indices)
else:
mask = np.empty(len(self._selected_obj.index), dtype=bool)
mask.fill(False)
mask[indices.astype(int)] = True
# mask fails to broadcast when passed to where; broadcast manually.
mask = np.tile(mask, list(self._selected_obj.shape[1:]) + [1]).T
filtered = self._selected_obj.where(mask) # Fill with NaNs.
return filtered
@Appender(GroupBy.__doc__)
def groupby(obj, by, **kwds):
if isinstance(obj, Series):
klass = SeriesGroupBy
elif isinstance(obj, DataFrame):
klass = DataFrameGroupBy
else: # pragma: no cover
raise TypeError('invalid type: %s' % type(obj))
return klass(obj, by, **kwds)
def _get_axes(group):
if isinstance(group, Series):
return [group.index]
else:
return group.axes
def _is_indexed_like(obj, axes):
if isinstance(obj, Series):
if len(axes) > 1:
return False
return obj.index.equals(axes[0])
elif isinstance(obj, DataFrame):
return obj.index.equals(axes[0])
return False
class BaseGrouper(object):
"""
This is an internal Grouper class, which actually holds the generated groups
"""
def __init__(self, axis, groupings, sort=True, group_keys=True):
self.axis = axis
self.groupings = groupings
self.sort = sort
self.group_keys = group_keys
self.compressed = True
@property
def shape(self):
return tuple(ping.ngroups for ping in self.groupings)
def __iter__(self):
return iter(self.indices)
@property
def nkeys(self):
return len(self.groupings)
def get_iterator(self, data, axis=0):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
splitter = self._get_splitter(data, axis=axis)
keys = self._get_group_keys()
for key, (i, group) in zip(keys, splitter):
yield key, group
def _get_splitter(self, data, axis=0):
comp_ids, _, ngroups = self.group_info
return get_splitter(data, comp_ids, ngroups, axis=axis)
def _get_group_keys(self):
if len(self.groupings) == 1:
return self.levels[0]
else:
comp_ids, _, ngroups = self.group_info
# provide "flattened" iterator for multi-group setting
mapper = _KeyMapper(comp_ids, ngroups, self.labels, self.levels)
return [mapper.get_key(i) for i in range(ngroups)]
def apply(self, f, data, axis=0):
mutated = False
splitter = self._get_splitter(data, axis=axis)
group_keys = self._get_group_keys()
# oh boy
f_name = com._get_callable_name(f)
if (f_name not in _plotting_methods and
hasattr(splitter, 'fast_apply') and axis == 0):
try:
values, mutated = splitter.fast_apply(f, group_keys)
return group_keys, values, mutated
except (lib.InvalidApply):
# we detect a mutation of some kind
# so take slow path
pass
except (Exception) as e:
# raise this error to the caller
pass
result_values = []
for key, (i, group) in zip(group_keys, splitter):
object.__setattr__(group, 'name', key)
# group might be modified
group_axes = _get_axes(group)
res = f(group)
if not _is_indexed_like(res, group_axes):
mutated = True
result_values.append(res)
return group_keys, result_values, mutated
@cache_readonly
def indices(self):
""" dict {group name -> group indices} """
if len(self.groupings) == 1:
return self.groupings[0].indices
else:
label_list = [ping.labels for ping in self.groupings]
keys = [_values_from_object(ping.group_index) for ping in self.groupings]
return _get_indices_dict(label_list, keys)
@property
def labels(self):
return [ping.labels for ping in self.groupings]
@property
def levels(self):
return [ping.group_index for ping in self.groupings]
@property
def names(self):
return [ping.name for ping in self.groupings]
def size(self):
"""
Compute group sizes
"""
# TODO: better impl
labels, _, ngroups = self.group_info
bin_counts = algos.value_counts(labels, sort=False)
bin_counts = bin_counts.reindex(np.arange(ngroups))
bin_counts.index = self.result_index
return bin_counts
@cache_readonly
def _max_groupsize(self):
'''
Compute size of largest group
'''
# For many items in each group this is much faster than
# self.size().max(), in worst case marginally slower
if self.indices:
return max(len(v) for v in self.indices.values())
else:
return 0
@cache_readonly
def groups(self):
""" dict {group name -> group labels} """
if len(self.groupings) == 1:
return self.groupings[0].groups
else:
to_groupby = lzip(*(ping.grouper for ping in self.groupings))
to_groupby = Index(to_groupby)
return self.axis.groupby(to_groupby.values)
@cache_readonly
def group_info(self):
comp_ids, obs_group_ids = self._get_compressed_labels()
ngroups = len(obs_group_ids)
comp_ids = com._ensure_int64(comp_ids)
return comp_ids, obs_group_ids, ngroups
def _get_compressed_labels(self):
all_labels = [ping.labels for ping in self.groupings]
if self._overflow_possible:
tups = lib.fast_zip(all_labels)
labs, uniques = algos.factorize(tups)
if self.sort:
uniques, labs = _reorder_by_uniques(uniques, labs)
return labs, uniques
else:
if len(all_labels) > 1:
group_index = get_group_index(all_labels, self.shape)
comp_ids, obs_group_ids = _compress_group_index(group_index)
else:
ping = self.groupings[0]
comp_ids = ping.labels
obs_group_ids = np.arange(len(ping.group_index))
self.compressed = False
self._filter_empty_groups = False
return comp_ids, obs_group_ids
@cache_readonly
def _overflow_possible(self):
return _int64_overflow_possible(self.shape)
@cache_readonly
def ngroups(self):
return len(self.result_index)
@cache_readonly
def result_index(self):
recons = self.get_group_levels()
return MultiIndex.from_arrays(recons, names=self.names)
def get_group_levels(self):
obs_ids = self.group_info[1]
if not self.compressed and len(self.groupings) == 1:
return [self.groupings[0].group_index]
if self._overflow_possible:
recons_labels = [np.array(x) for x in zip(*obs_ids)]
else:
recons_labels = decons_group_index(obs_ids, self.shape)
name_list = []
for ping, labels in zip(self.groupings, recons_labels):
labels = com._ensure_platform_int(labels)
levels = ping.group_index.take(labels)
name_list.append(levels)
return name_list
#------------------------------------------------------------
# Aggregation functions
_cython_functions = {
'add': 'group_add',
'prod': 'group_prod',
'min': 'group_min',
'max': 'group_max',
'mean': 'group_mean',
'median': {
'name': 'group_median'
},
'var': 'group_var',
'first': {
'name': 'group_nth',
'f': lambda func, a, b, c, d: func(a, b, c, d, 1)
},
'last': 'group_last',
'count': 'group_count',
}
_cython_arity = {
'ohlc': 4, # OHLC
}
_name_functions = {}
_filter_empty_groups = True
def _get_aggregate_function(self, how, values):
dtype_str = values.dtype.name
def get_func(fname):
# find the function, or use the object function, or return a
# generic
for dt in [dtype_str, 'object']:
f = getattr(_algos, "%s_%s" % (fname, dtype_str), None)
if f is not None:
return f
return getattr(_algos, fname, None)
ftype = self._cython_functions[how]
if isinstance(ftype, dict):
func = afunc = get_func(ftype['name'])
# a sub-function
f = ftype.get('f')
if f is not None:
def wrapper(*args, **kwargs):
return f(afunc, *args, **kwargs)
# need to curry our sub-function
func = wrapper
else:
func = get_func(ftype)
if func is None:
raise NotImplementedError("function is not implemented for this"
"dtype: [how->%s,dtype->%s]" %
(how, dtype_str))
return func, dtype_str
def aggregate(self, values, how, axis=0):
arity = self._cython_arity.get(how, 1)
vdim = values.ndim
swapped = False
if vdim == 1:
values = values[:, None]
out_shape = (self.ngroups, arity)
else:
if axis > 0:
swapped = True
values = values.swapaxes(0, axis)
if arity > 1:
raise NotImplementedError
out_shape = (self.ngroups,) + values.shape[1:]
if is_numeric_dtype(values.dtype):
values = com.ensure_float(values)
is_numeric = True
out_dtype = 'f%d' % values.dtype.itemsize
else:
is_numeric = issubclass(values.dtype.type, (np.datetime64,
np.timedelta64))
if is_numeric:
out_dtype = 'float64'
values = values.view('int64')
else:
out_dtype = 'object'
values = values.astype(object)
# will be filled in Cython function
result = np.empty(out_shape, dtype=out_dtype)
result.fill(np.nan)
counts = np.zeros(self.ngroups, dtype=np.int64)
result = self._aggregate(result, counts, values, how, is_numeric)
if self._filter_empty_groups:
if result.ndim == 2:
try:
result = lib.row_bool_subset(
result, (counts > 0).view(np.uint8))
except ValueError:
result = lib.row_bool_subset_object(
result, (counts > 0).view(np.uint8))
else:
result = result[counts > 0]
if vdim == 1 and arity == 1:
result = result[:, 0]
if how in self._name_functions:
# TODO
names = self._name_functions[how]()
else:
names = None
if swapped:
result = result.swapaxes(0, axis)
return result, names
def _aggregate(self, result, counts, values, how, is_numeric):
agg_func, dtype = self._get_aggregate_function(how, values)
comp_ids, _, ngroups = self.group_info
if values.ndim > 3:
# punting for now
raise NotImplementedError
elif values.ndim > 2:
for i, chunk in enumerate(values.transpose(2, 0, 1)):
chunk = chunk.squeeze()
agg_func(result[:, :, i], counts, chunk, comp_ids)
else:
agg_func(result, counts, values, comp_ids)
return result
def agg_series(self, obj, func):
try:
return self._aggregate_series_fast(obj, func)
except Exception:
return self._aggregate_series_pure_python(obj, func)
def _aggregate_series_fast(self, obj, func):
func = _intercept_function(func)
if obj.index._has_complex_internals:
raise TypeError('Incompatible index for Cython grouper')
group_index, _, ngroups = self.group_info
# avoids object / Series creation overhead
dummy = obj._get_values(slice(None, 0)).to_dense()
indexer = _algos.groupsort_indexer(group_index, ngroups)[0]
obj = obj.take(indexer, convert=False)
group_index = com.take_nd(group_index, indexer, allow_fill=False)
grouper = lib.SeriesGrouper(obj, func, group_index, ngroups,
dummy)
result, counts = grouper.get_result()
return result, counts
def _aggregate_series_pure_python(self, obj, func):
group_index, _, ngroups = self.group_info
counts = np.zeros(ngroups, dtype=int)
result = None
splitter = get_splitter(obj, group_index, ngroups, axis=self.axis)
for label, group in splitter:
res = func(group)
if result is None:
if (isinstance(res, (Series, Index, np.ndarray)) or
isinstance(res, list)):
raise ValueError('Function does not reduce')
result = np.empty(ngroups, dtype='O')
counts[label] = group.shape[0]
result[label] = res
result = lib.maybe_convert_objects(result, try_float=0)
return result, counts
def generate_bins_generic(values, binner, closed):
"""
Generate bin edge offsets and bin labels for one array using another array
which has bin edge values. Both arrays must be sorted.
Parameters
----------
values : array of values
binner : a comparable array of values representing bins into which to bin
the first array. Note, 'values' end-points must fall within 'binner'
end-points.
closed : which end of bin is closed; left (default), right
Returns
-------
bins : array of offsets (into 'values' argument) of bins.
Zero and last edge are excluded in result, so for instance the first
bin is values[0:bin[0]] and the last is values[bin[-1]:]
"""
lenidx = len(values)
lenbin = len(binner)
if lenidx <= 0 or lenbin <= 0:
raise ValueError("Invalid length for values or for binner")
# check binner fits data
if values[0] < binner[0]:
raise ValueError("Values falls before first bin")
if values[lenidx - 1] > binner[lenbin - 1]:
raise ValueError("Values falls after last bin")
bins = np.empty(lenbin - 1, dtype=np.int64)
j = 0 # index into values
bc = 0 # bin count
# linear scan, presume nothing about values/binner except that it fits ok
for i in range(0, lenbin - 1):
r_bin = binner[i + 1]
# count values in current bin, advance to next bin
while j < lenidx and (values[j] < r_bin or
(closed == 'right' and values[j] == r_bin)):
j += 1
bins[bc] = j
bc += 1
return bins
class BinGrouper(BaseGrouper):
def __init__(self, bins, binlabels, filter_empty=False):
self.bins = com._ensure_int64(bins)
self.binlabels = _ensure_index(binlabels)
self._filter_empty_groups = filter_empty
@cache_readonly
def groups(self):
""" dict {group name -> group labels} """
# this is mainly for compat
# GH 3881
result = {}
for key, value in zip(self.binlabels, self.bins):
if key is not tslib.NaT:
result[key] = value
return result
@property
def nkeys(self):
return 1
def get_iterator(self, data, axis=0):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
if isinstance(data, NDFrame):
slicer = lambda start,edge: data._slice(slice(start,edge),axis=axis)
length = len(data.axes[axis])
else:
slicer = lambda start,edge: data[slice(start,edge)]
length = len(data)
start = 0
for edge, label in zip(self.bins, self.binlabels):
if label is not tslib.NaT:
yield label, slicer(start,edge)
start = edge
if start < length:
yield self.binlabels[-1], slicer(start,None)
def apply(self, f, data, axis=0):
result_keys = []
result_values = []
mutated = False
for key, group in self.get_iterator(data, axis=axis):
object.__setattr__(group, 'name', key)
# group might be modified
group_axes = _get_axes(group)
res = f(group)
if not _is_indexed_like(res, group_axes):
mutated = True
result_keys.append(key)
result_values.append(res)
return result_keys, result_values, mutated
@cache_readonly
def indices(self):
indices = collections.defaultdict(list)
i = 0
for label, bin in zip(self.binlabels, self.bins):
if i < bin:
if label is not tslib.NaT:
indices[label] = list(range(i, bin))
i = bin
return indices
@cache_readonly
def ngroups(self):
return len(self.binlabels)
@cache_readonly
def result_index(self):
mask = self.binlabels.asi8 == tslib.iNaT
return self.binlabels[~mask]
@property
def levels(self):
return [self.binlabels]
@property
def names(self):
return [self.binlabels.name]
@property
def groupings(self):
# for compat
return None
def size(self):
"""
Compute group sizes
"""
base = Series(np.zeros(len(self.result_index), dtype=np.int64),
index=self.result_index)
indices = self.indices
for k, v in compat.iteritems(indices):
indices[k] = len(v)
bin_counts = Series(indices, dtype=np.int64)
result = base.add(bin_counts, fill_value=0)
# addition with fill_value changes dtype to float64
result = result.astype(np.int64)
return result
#----------------------------------------------------------------------
# cython aggregation
_cython_functions = {
'add': 'group_add_bin',
'prod': 'group_prod_bin',
'mean': 'group_mean_bin',
'min': 'group_min_bin',
'max': 'group_max_bin',
'var': 'group_var_bin',
'ohlc': 'group_ohlc',
'first': {
'name': 'group_nth_bin',
'f': lambda func, a, b, c, d: func(a, b, c, d, 1)
},
'last': 'group_last_bin',
'count': 'group_count_bin',
}
_name_functions = {
'ohlc': lambda *args: ['open', 'high', 'low', 'close']
}
_filter_empty_groups = True
def _aggregate(self, result, counts, values, how, is_numeric=True):
agg_func, dtype = self._get_aggregate_function(how, values)
if values.ndim > 3:
# punting for now
raise NotImplementedError
elif values.ndim > 2:
for i, chunk in enumerate(values.transpose(2, 0, 1)):
agg_func(result[:, :, i], counts, chunk, self.bins)
else:
agg_func(result, counts, values, self.bins)
return result
def agg_series(self, obj, func):
dummy = obj[:0]
grouper = lib.SeriesBinGrouper(obj, func, self.bins, dummy)
return grouper.get_result()
class Grouping(object):
"""
Holds the grouping information for a single key
Parameters
----------
index : Index
grouper :
obj :
name :
level :
Returns
-------
**Attributes**:
* indices : dict of {group -> index_list}
* labels : ndarray, group labels
* ids : mapping of label -> group
* counts : array of group counts
* group_index : unique groups
* groups : dict of {group -> label_list}
"""
def __init__(self, index, grouper=None, obj=None, name=None, level=None,
sort=True):
self.name = name
self.level = level
self.grouper = _convert_grouper(index, grouper)
self.index = index
self.sort = sort
self.obj = obj
# right place for this?
if isinstance(grouper, (Series, Index)) and name is None:
self.name = grouper.name
if isinstance(grouper, MultiIndex):
self.grouper = grouper.values
# pre-computed
self._was_factor = False
self._should_compress = True
# we have a single grouper which may be a myriad of things, some of which are
# dependent on the passing in level
#
if level is not None:
if not isinstance(level, int):
if level not in index.names:
raise AssertionError('Level %s not in index' % str(level))
level = index.names.index(level)
inds = index.labels[level]
level_index = index.levels[level]
if self.name is None:
self.name = index.names[level]
# XXX complete hack
if grouper is not None:
level_values = index.levels[level].take(inds)
self.grouper = level_values.map(self.grouper)
else:
self._was_factor = True
# all levels may not be observed
labels, uniques = algos.factorize(inds, sort=True)
if len(uniques) > 0 and uniques[0] == -1:
# handle NAs
mask = inds != -1
ok_labels, uniques = algos.factorize(inds[mask], sort=True)
labels = np.empty(len(inds), dtype=inds.dtype)
labels[mask] = ok_labels
labels[~mask] = -1
if len(uniques) < len(level_index):
level_index = level_index.take(uniques)
self._labels = labels
self._group_index = level_index
self.grouper = level_index.take(labels)
else:
if isinstance(self.grouper, (list, tuple)):
self.grouper = com._asarray_tuplesafe(self.grouper)
# a passed Categorical
elif isinstance(self.grouper, Categorical):
factor = self.grouper
self._was_factor = True
# Is there any way to avoid this?
self.grouper = np.asarray(factor)
self._labels = factor.codes
self._group_index = factor.levels
if self.name is None:
self.name = factor.name
# a passed Grouper like
elif isinstance(self.grouper, Grouper):
# get the new grouper
grouper = self.grouper._get_binner_for_grouping(self.obj)
self.obj = self.grouper.obj
self.grouper = grouper
if self.name is None:
self.name = grouper.name
# no level passed
if not isinstance(self.grouper, (Series, Index, np.ndarray)):
self.grouper = self.index.map(self.grouper)
if not (hasattr(self.grouper, "__len__") and
len(self.grouper) == len(self.index)):
errmsg = ('Grouper result violates len(labels) == '
'len(data)\nresult: %s' %
com.pprint_thing(self.grouper))
self.grouper = None # Try for sanity
raise AssertionError(errmsg)
# if we have a date/time-like grouper, make sure that we have Timestamps like
if getattr(self.grouper,'dtype',None) is not None:
if is_datetime64_dtype(self.grouper):
from pandas import to_datetime
self.grouper = to_datetime(self.grouper)
elif is_timedelta64_dtype(self.grouper):
from pandas import to_timedelta
self.grouper = to_timedelta(self.grouper)
def __repr__(self):
return 'Grouping(%s)' % self.name
def __iter__(self):
return iter(self.indices)
_labels = None
_group_index = None
@property
def ngroups(self):
return len(self.group_index)
@cache_readonly
def indices(self):
return _groupby_indices(self.grouper)
@property
def labels(self):
if self._labels is None:
self._make_labels()
return self._labels
@property
def group_index(self):
if self._group_index is None:
self._make_labels()
return self._group_index
def _make_labels(self):
if self._was_factor: # pragma: no cover
raise Exception('Should not call this method grouping by level')
else:
labels, uniques = algos.factorize(self.grouper, sort=self.sort)
uniques = Index(uniques, name=self.name)
self._labels = labels
self._group_index = uniques
_groups = None
@property
def groups(self):
if self._groups is None:
self._groups = self.index.groupby(self.grouper)
return self._groups
def _get_grouper(obj, key=None, axis=0, level=None, sort=True):
"""
create and return a BaseGrouper, which is an internal
mapping of how to create the grouper indexers.
This may be composed of multiple Grouping objects, indicating
multiple groupers
Groupers are ultimately index mappings. They can originate as:
index mappings, keys to columns, functions, or Groupers
Groupers enable local references to axis,level,sort, while
the passed in axis, level, and sort are 'global'.
This routine tries to figure of what the passing in references
are and then creates a Grouping for each one, combined into
a BaseGrouper.
"""
group_axis = obj._get_axis(axis)
# validate thatthe passed level is compatible with the passed
# axis of the object
if level is not None:
if not isinstance(group_axis, MultiIndex):
if isinstance(level, compat.string_types):
if obj.index.name != level:
raise ValueError('level name %s is not the name of the '
'index' % level)
elif level > 0:
raise ValueError('level > 0 only valid with MultiIndex')
level = None
key = group_axis
# a passed in Grouper, directly convert
if isinstance(key, Grouper):
binner, grouper, obj = key._get_grouper(obj)
if key.key is None:
return grouper, [], obj
else:
return grouper, set([key.key]), obj
# already have a BaseGrouper, just return it
elif isinstance(key, BaseGrouper):
return key, [], obj
if not isinstance(key, (tuple, list)):
keys = [key]
else:
keys = key
# what are we after, exactly?
match_axis_length = len(keys) == len(group_axis)
any_callable = any(callable(g) or isinstance(g, dict) for g in keys)
any_arraylike = any(isinstance(g, (list, tuple, Series, Index, np.ndarray))
for g in keys)
try:
if isinstance(obj, DataFrame):
all_in_columns = all(g in obj.columns for g in keys)
else:
all_in_columns = False
except Exception:
all_in_columns = False
if (not any_callable and not all_in_columns
and not any_arraylike and match_axis_length
and level is None):
keys = [com._asarray_tuplesafe(keys)]
if isinstance(level, (tuple, list)):
if key is None:
keys = [None] * len(level)
levels = level
else:
levels = [level] * len(keys)
groupings = []
exclusions = []
for i, (gpr, level) in enumerate(zip(keys, levels)):
name = None
try:
obj._data.items.get_loc(gpr)
in_axis = True
except Exception:
in_axis = False
if _is_label_like(gpr) or in_axis:
exclusions.append(gpr)
name = gpr
gpr = obj[gpr]
if isinstance(gpr, Categorical) and len(gpr) != len(obj):
errmsg = "Categorical grouper must have len(grouper) == len(data)"
raise AssertionError(errmsg)
ping = Grouping(group_axis, gpr, obj=obj, name=name, level=level, sort=sort)
groupings.append(ping)
if len(groupings) == 0:
raise ValueError('No group keys passed!')
# create the internals grouper
grouper = BaseGrouper(group_axis, groupings, sort=sort)
return grouper, exclusions, obj
def _is_label_like(val):
return isinstance(val, compat.string_types) or np.isscalar(val)
def _convert_grouper(axis, grouper):
if isinstance(grouper, dict):
return grouper.get
elif isinstance(grouper, Series):
if grouper.index.equals(axis):
return grouper.values
else:
return grouper.reindex(axis).values
elif isinstance(grouper, (list, Series, Index, np.ndarray)):
if len(grouper) != len(axis):
raise AssertionError('Grouper and axis must be same length')
return grouper
else:
return grouper
class SeriesGroupBy(GroupBy):
_apply_whitelist = _series_apply_whitelist
def aggregate(self, func_or_funcs, *args, **kwargs):
"""
Apply aggregation function or functions to groups, yielding most likely
Series but in some cases DataFrame depending on the output of the
aggregation function
Parameters
----------
func_or_funcs : function or list / dict of functions
List/dict of functions will produce DataFrame with column names
determined by the function names themselves (list) or the keys in
the dict
Notes
-----
agg is an alias for aggregate. Use it.
Examples
--------
>>> series
bar 1.0
baz 2.0
qot 3.0
qux 4.0
>>> mapper = lambda x: x[0] # first letter
>>> grouped = series.groupby(mapper)
>>> grouped.aggregate(np.sum)
b 3.0
q 7.0
>>> grouped.aggregate([np.sum, np.mean, np.std])
mean std sum
b 1.5 0.5 3
q 3.5 0.5 7
>>> grouped.agg({'result' : lambda x: x.mean() / x.std(),
... 'total' : np.sum})
result total
b 2.121 3
q 4.95 7
See also
--------
apply, transform
Returns
-------
Series or DataFrame
"""
if isinstance(func_or_funcs, compat.string_types):
return getattr(self, func_or_funcs)(*args, **kwargs)
if hasattr(func_or_funcs, '__iter__'):
ret = self._aggregate_multiple_funcs(func_or_funcs)
else:
cyfunc = _intercept_cython(func_or_funcs)
if cyfunc and not args and not kwargs:
return getattr(self, cyfunc)()
if self.grouper.nkeys > 1:
return self._python_agg_general(func_or_funcs, *args, **kwargs)
try:
return self._python_agg_general(func_or_funcs, *args, **kwargs)
except Exception:
result = self._aggregate_named(func_or_funcs, *args, **kwargs)
index = Index(sorted(result), name=self.grouper.names[0])
ret = Series(result, index=index)
if not self.as_index: # pragma: no cover
print('Warning, ignoring as_index=True')
return ret
def _aggregate_multiple_funcs(self, arg):
if isinstance(arg, dict):
columns = list(arg.keys())
arg = list(arg.items())
elif any(isinstance(x, (tuple, list)) for x in arg):
arg = [(x, x) if not isinstance(x, (tuple, list)) else x
for x in arg]
# indicated column order
columns = lzip(*arg)[0]
else:
# list of functions / function names
columns = []
for f in arg:
if isinstance(f, compat.string_types):
columns.append(f)
else:
# protect against callables without names
columns.append(com._get_callable_name(f))
arg = lzip(columns, arg)
results = {}
for name, func in arg:
if name in results:
raise SpecificationError('Function names must be unique, '
'found multiple named %s' % name)
results[name] = self.aggregate(func)
return DataFrame(results, columns=columns)
def _wrap_aggregated_output(self, output, names=None):
# sort of a kludge
output = output[self.name]
index = self.grouper.result_index
if names is not None:
return DataFrame(output, index=index, columns=names)
else:
name = self.name
if name is None:
name = self._selected_obj.name
return Series(output, index=index, name=name)
def _wrap_applied_output(self, keys, values, not_indexed_same=False):
if len(keys) == 0:
# GH #6265
return Series([], name=self.name)
def _get_index():
if self.grouper.nkeys > 1:
index = MultiIndex.from_tuples(keys, names=self.grouper.names)
else:
index = Index(keys, name=self.grouper.names[0])
return index
if isinstance(values[0], dict):
# GH #823
index = _get_index()
return DataFrame(values, index=index).stack()
if isinstance(values[0], (Series, dict)):
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
elif isinstance(values[0], DataFrame):
# possible that Series -> DataFrame by applied function
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
else:
# GH #6265
return Series(values, index=_get_index(), name=self.name)
def _aggregate_named(self, func, *args, **kwargs):
result = {}
for name, group in self:
group.name = name
output = func(group, *args, **kwargs)
if isinstance(output, (Series, Index, np.ndarray)):
raise Exception('Must produce aggregated value')
result[name] = self._try_cast(output, group)
return result
def transform(self, func, *args, **kwargs):
"""
Call function producing a like-indexed Series on each group and return
a Series with the transformed values
Parameters
----------
func : function
To apply to each group. Should return a Series with the same index
Examples
--------
>>> grouped.transform(lambda x: (x - x.mean()) / x.std())
Returns
-------
transformed : Series
"""
# if string function
if isinstance(func, compat.string_types):
return self._transform_fast(lambda : getattr(self, func)(*args, **kwargs))
# do we have a cython function
cyfunc = _intercept_cython(func)
if cyfunc and not args and not kwargs:
return self._transform_fast(cyfunc)
# reg transform
dtype = self._selected_obj.dtype
result = self._selected_obj.values.copy()
wrapper = lambda x: func(x, *args, **kwargs)
for i, (name, group) in enumerate(self):
object.__setattr__(group, 'name', name)
res = wrapper(group)
if hasattr(res, 'values'):
res = res.values
# may need to astype
try:
common_type = np.common_type(np.array(res), result)
if common_type != result.dtype:
result = result.astype(common_type)
except:
pass
indexer = self._get_index(name)
result[indexer] = res
result = _possibly_downcast_to_dtype(result, dtype)
return self._selected_obj.__class__(result,
index=self._selected_obj.index,
name=self._selected_obj.name)
def _transform_fast(self, func):
"""
fast version of transform, only applicable to builtin/cythonizable functions
"""
if isinstance(func, compat.string_types):
func = getattr(self,func)
values = func().values
counts = self.count().values
values = np.repeat(values, com._ensure_platform_int(counts))
# the values/counts are repeated according to the group index
indices = self.indices
# shortcut of we have an already ordered grouper
if Index(self.grouper.group_info[0]).is_monotonic:
result = Series(values, index=self.obj.index)
else:
index = Index(np.concatenate([ indices[v] for v in self.grouper.result_index ]))
result = Series(values, index=index).sort_index()
result.index = self.obj.index
return result
def filter(self, func, dropna=True, *args, **kwargs):
"""
Return a copy of a Series excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
func : function
To apply to each group. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Example
-------
>>> grouped.filter(lambda x: x.mean() > 0)
Returns
-------
filtered : Series
"""
if isinstance(func, compat.string_types):
wrapper = lambda x: getattr(x, func)(*args, **kwargs)
else:
wrapper = lambda x: func(x, *args, **kwargs)
# Interpret np.nan as False.
def true_and_notnull(x, *args, **kwargs):
b = wrapper(x, *args, **kwargs)
return b and notnull(b)
try:
indices = [self._get_index(name) if true_and_notnull(group) else []
for name, group in self]
except ValueError:
raise TypeError("the filter must return a boolean result")
except TypeError:
raise TypeError("the filter must return a boolean result")
filtered = self._apply_filter(indices, dropna)
return filtered
def _apply_to_column_groupbys(self, func):
""" return a pass thru """
return func(self)
class NDFrameGroupBy(GroupBy):
def _iterate_slices(self):
if self.axis == 0:
# kludge
if self._selection is None:
slice_axis = self.obj.columns
else:
slice_axis = self._selection_list
slicer = lambda x: self.obj[x]
else:
slice_axis = self.obj.index
slicer = self.obj.xs
for val in slice_axis:
if val in self.exclusions:
continue
yield val, slicer(val)
def _cython_agg_general(self, how, numeric_only=True):
new_items, new_blocks = self._cython_agg_blocks(how, numeric_only=numeric_only)
return self._wrap_agged_blocks(new_items, new_blocks)
def _wrap_agged_blocks(self, items, blocks):
obj = self._obj_with_exclusions
new_axes = list(obj._data.axes)
# more kludge
if self.axis == 0:
new_axes[0], new_axes[1] = new_axes[1], self.grouper.result_index
else:
new_axes[self.axis] = self.grouper.result_index
# Make sure block manager integrity check passes.
assert new_axes[0].equals(items)
new_axes[0] = items
mgr = BlockManager(blocks, new_axes)
new_obj = type(obj)(mgr)
return self._post_process_cython_aggregate(new_obj)
_block_agg_axis = 0
def _cython_agg_blocks(self, how, numeric_only=True):
data, agg_axis = self._get_data_to_aggregate()
new_blocks = []
if numeric_only:
data = data.get_numeric_data(copy=False)
for block in data.blocks:
values = block._try_operate(block.values)
if block.is_numeric:
values = com.ensure_float(values)
result, _ = self.grouper.aggregate(values, how, axis=agg_axis)
# see if we can cast the block back to the original dtype
result = block._try_coerce_and_cast_result(result)
newb = make_block(result, placement=block.mgr_locs)
new_blocks.append(newb)
if len(new_blocks) == 0:
raise DataError('No numeric types to aggregate')
return data.items, new_blocks
def _get_data_to_aggregate(self):
obj = self._obj_with_exclusions
if self.axis == 0:
return obj.swapaxes(0, 1)._data, 1
else:
return obj._data, self.axis
def _post_process_cython_aggregate(self, obj):
# undoing kludge from below
if self.axis == 0:
obj = obj.swapaxes(0, 1)
return obj
@cache_readonly
def _obj_with_exclusions(self):
if self._selection is not None:
return self.obj.reindex(columns=self._selection_list)
if len(self.exclusions) > 0:
return self.obj.drop(self.exclusions, axis=1)
else:
return self.obj
@Appender(_agg_doc)
def aggregate(self, arg, *args, **kwargs):
if isinstance(arg, compat.string_types):
return getattr(self, arg)(*args, **kwargs)
result = OrderedDict()
if isinstance(arg, dict):
if self.axis != 0: # pragma: no cover
raise ValueError('Can only pass dict with axis=0')
obj = self._selected_obj
if any(isinstance(x, (list, tuple, dict)) for x in arg.values()):
new_arg = OrderedDict()
for k, v in compat.iteritems(arg):
if not isinstance(v, (tuple, list, dict)):
new_arg[k] = [v]
else:
new_arg[k] = v
arg = new_arg
keys = []
if self._selection is not None:
subset = obj
if isinstance(subset, DataFrame):
raise NotImplementedError
for fname, agg_how in compat.iteritems(arg):
colg = SeriesGroupBy(subset, selection=self._selection,
grouper=self.grouper)
result[fname] = colg.aggregate(agg_how)
keys.append(fname)
else:
for col, agg_how in compat.iteritems(arg):
colg = SeriesGroupBy(obj[col], selection=col,
grouper=self.grouper)
result[col] = colg.aggregate(agg_how)
keys.append(col)
if isinstance(list(result.values())[0], DataFrame):
from pandas.tools.merge import concat
result = concat([result[k] for k in keys], keys=keys, axis=1)
else:
result = DataFrame(result)
elif isinstance(arg, list):
return self._aggregate_multiple_funcs(arg)
else:
cyfunc = _intercept_cython(arg)
if cyfunc and not args and not kwargs:
return getattr(self, cyfunc)()
if self.grouper.nkeys > 1:
return self._python_agg_general(arg, *args, **kwargs)
else:
# try to treat as if we are passing a list
try:
assert not args and not kwargs
result = self._aggregate_multiple_funcs([arg])
result.columns = Index(result.columns.levels[0],
name=self._selected_obj.columns.name)
except:
result = self._aggregate_generic(arg, *args, **kwargs)
if not self.as_index:
if isinstance(result.index, MultiIndex):
zipped = zip(result.index.levels, result.index.labels,
result.index.names)
for i, (lev, lab, name) in enumerate(zipped):
result.insert(i, name,
com.take_nd(lev.values, lab,
allow_fill=False))
result = result.consolidate()
else:
values = result.index.values
name = self.grouper.groupings[0].name
result.insert(0, name, values)
result.index = np.arange(len(result))
return result.convert_objects()
def _aggregate_multiple_funcs(self, arg):
from pandas.tools.merge import concat
if self.axis != 0:
raise NotImplementedError
obj = self._obj_with_exclusions
results = []
keys = []
for col in obj:
try:
colg = SeriesGroupBy(obj[col], selection=col,
grouper=self.grouper)
results.append(colg.aggregate(arg))
keys.append(col)
except (TypeError, DataError):
pass
except SpecificationError:
raise
result = concat(results, keys=keys, axis=1)
return result
def _aggregate_generic(self, func, *args, **kwargs):
if self.grouper.nkeys != 1:
raise AssertionError('Number of keys must be 1')
axis = self.axis
obj = self._obj_with_exclusions
result = {}
if axis != obj._info_axis_number:
try:
for name, data in self:
# for name in self.indices:
# data = self.get_group(name, obj=obj)
result[name] = self._try_cast(func(data, *args, **kwargs),
data)
except Exception:
return self._aggregate_item_by_item(func, *args, **kwargs)
else:
for name in self.indices:
try:
data = self.get_group(name, obj=obj)
result[name] = self._try_cast(func(data, *args, **kwargs),
data)
except Exception:
wrapper = lambda x: func(x, *args, **kwargs)
result[name] = data.apply(wrapper, axis=axis)
return self._wrap_generic_output(result, obj)
def _wrap_aggregated_output(self, output, names=None):
raise NotImplementedError
def _aggregate_item_by_item(self, func, *args, **kwargs):
# only for axis==0
obj = self._obj_with_exclusions
result = {}
cannot_agg = []
errors=None
for item in obj:
try:
data = obj[item]
colg = SeriesGroupBy(data, selection=item,
grouper=self.grouper)
result[item] = self._try_cast(
colg.aggregate(func, *args, **kwargs), data)
except ValueError:
cannot_agg.append(item)
continue
except TypeError as e:
cannot_agg.append(item)
errors=e
continue
result_columns = obj.columns
if cannot_agg:
result_columns = result_columns.drop(cannot_agg)
# GH6337
if not len(result_columns) and errors is not None:
raise errors
return DataFrame(result, columns=result_columns)
def _decide_output_index(self, output, labels):
if len(output) == len(labels):
output_keys = labels
else:
output_keys = sorted(output)
try:
output_keys.sort()
except Exception: # pragma: no cover
pass
if isinstance(labels, MultiIndex):
output_keys = MultiIndex.from_tuples(output_keys,
names=labels.names)
return output_keys
def _wrap_applied_output(self, keys, values, not_indexed_same=False):
from pandas.core.index import _all_indexes_same
if len(keys) == 0:
# XXX
return DataFrame({})
key_names = self.grouper.names
if isinstance(values[0], DataFrame):
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
elif self.grouper.groupings is not None:
if len(self.grouper.groupings) > 1:
key_index = MultiIndex.from_tuples(keys, names=key_names)
else:
ping = self.grouper.groupings[0]
if len(keys) == ping.ngroups:
key_index = ping.group_index
key_index.name = key_names[0]
key_lookup = Index(keys)
indexer = key_lookup.get_indexer(key_index)
# reorder the values
values = [values[i] for i in indexer]
else:
key_index = Index(keys, name=key_names[0])
# don't use the key indexer
if not self.as_index:
key_index = None
# make Nones an empty object
if com._count_not_none(*values) != len(values):
v = next(v for v in values if v is not None)
if v is None:
return DataFrame()
elif isinstance(v, NDFrame):
values = [
x if x is not None else
v._constructor(**v._construct_axes_dict())
for x in values
]
v = values[0]
if isinstance(v, (np.ndarray, Index, Series)):
if isinstance(v, Series):
applied_index = self._selected_obj._get_axis(self.axis)
all_indexed_same = _all_indexes_same([
x.index for x in values
])
singular_series = (len(values) == 1 and
applied_index.nlevels == 1)
# GH3596
# provide a reduction (Frame -> Series) if groups are
# unique
if self.squeeze:
# assign the name to this series
if singular_series:
values[0].name = keys[0]
# GH2893
# we have series in the values array, we want to
# produce a series:
# if any of the sub-series are not indexed the same
# OR we don't have a multi-index and we have only a
# single values
return self._concat_objects(
keys, values, not_indexed_same=not_indexed_same
)
# still a series
# path added as of GH 5545
elif all_indexed_same:
from pandas.tools.merge import concat
return concat(values)
if not all_indexed_same:
return self._concat_objects(
keys, values, not_indexed_same=not_indexed_same
)
try:
if self.axis == 0:
# GH6124 if the list of Series have a consistent name,
# then propagate that name to the result.
index = v.index.copy()
if index.name is None:
# Only propagate the series name to the result
# if all series have a consistent name. If the
# series do not have a consistent name, do
# nothing.
names = set(v.name for v in values)
if len(names) == 1:
index.name = list(names)[0]
# normally use vstack as its faster than concat
# and if we have mi-columns
if not _np_version_under1p7 or isinstance(v.index,MultiIndex) or key_index is None:
stacked_values = np.vstack([np.asarray(x) for x in values])
result = DataFrame(stacked_values,index=key_index,columns=index)
else:
# GH5788 instead of stacking; concat gets the dtypes correct
from pandas.tools.merge import concat
result = concat(values,keys=key_index,names=key_index.names,
axis=self.axis).unstack()
result.columns = index
else:
stacked_values = np.vstack([np.asarray(x) for x in values])
result = DataFrame(stacked_values.T,index=v.index,columns=key_index)
except (ValueError, AttributeError):
# GH1738: values is list of arrays of unequal lengths fall
# through to the outer else caluse
return | Series(values, index=key_index) | pandas.core.series.Series |
#/*##########################################################################
# Copyright (C) 2020-2021 The University of Lorraine - France
#
# This file is part of the PyRecon toolkit developed at the GeoRessources
# Laboratory of the University of Lorraine, France.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
#############################################################################*/
# -*- coding: utf-8 -*-
from pkgs.SpectraPreProcessing import *
import pandas as pd
import json
import numpy as np
'''
Once the object is built : Obj = Raman(AdressFolder, Name)
Where :
> AdressFolder : The path to the folder containing the files.
> Name : the file name without the extension (ex : MesureR_2019 07 16_13h 49mn 38s L2C53.esp --> Name = MesureR_2019 07 16_13h 49mn 38s L2C53).
We can :
> Read headers from one file with the funtion "Read_Header" : df = Obj.Read_Header()
> Read headers from more files in one DataFrame With the function "Read_Headers" : df = Obj.Read_Headers()
We can leave the argument "Name" empty as folows: Name = " "
> Read spectrum from one file with the funtion "Read_spectrum" : df = Obj.Read_spectrum()
> Read spectrums from more files in one DataFrame With the function "Read_spectrums" : df = Obj.Read_spectrums()
we can leave argument "Name" empty as folows: Name = " "
> Save as .CSV, after having collected the data in "df" we can save them under a given name
"name = "DataFrame" " with the following command : : Obj.Save_as_csv(df , name = "DataFrame")
NOTE :
* The folder must containe only the files.
* The functions ended by "_" (example : Obj.Read_spectrum_()) used for extructing the integer portion from the NumberWaves.
* Please move the .csv file generated by "Obj.Save_as_csv" elsewhere and delete it from the current folder.
'''
class Raman(SpectraPreProcessing):
def __init__(self, AdressFolder, Name):
SpectraPreProcessing.__init__(self,AdressFolder)
self.Name = Name
def NameSpectrumColumns(self):
'''
Returns List of tuples containing the names of each column
Note : you can change the folowing argument "ColomnsName" for naming the columns.
'''
ColomnsName = ["NumberWaves", "Intensity"]
Name = [self.NameColumn(SampleName = self.Name, ColomnName = [w]) for w in ColomnsName]
return Name
def Header_to_Dict(self,NumberLine):
'''
Returns the header lines as a dictionary
'''
inputfile = open(self.FilePath(self.Name), 'r')
Liste = list()
for i in range(NumberLine):
inputstr=inputfile.readline()
inputspl=re.split("=",inputstr)
Liste.append(json.loads(inputspl[1]))
inputfile.close()
return Liste
def Read_Header(self):
'''
Convertes the header lines in the file to data frame
Note : In this case the header lines are recognized by the hashtags "#" : #exp_cfg and #proc_cfg
'''
global df
try :
List = [self.Dict_to_Df(Element) for Element in self.Header_to_Dict(self.Count_Hachtags(self.Name))]
# Add columns for the name : the name is also the name of the file "name.esp"
df = self.Add_columns(List[0], "Sample", self.Name)
# Join the laste two data frame in one data frame:
for i in range(len(List)-1):
df = self.Join_df(df , List[i+1])
except IndexError:
print("Please check that your file contains headers")
df = pd.DataFrame({'None' : [np.nan]})
return df
def Read_Headers(self):
'''
Returns DataFrame containing the header lines in all file exising in the folder
Note : this function concatenates all header lines from one or more filesin the folder into one dataframe using Header_to_Df
'''
df = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
import re
import warnings
from datetime import timedelta
from itertools import product
import pytest
import numpy as np
import pandas as pd
from pandas import (CategoricalIndex, DataFrame, Index, MultiIndex,
compat, date_range, period_range)
from pandas.compat import PY3, long, lrange, lzip, range, u, PYPY
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas._libs.tslib import Timestamp
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal, assert_copy
from .common import Base
class TestMultiIndex(Base):
_holder = MultiIndex
_compat_props = ['shape', 'ndim', 'size']
def setup_method(self, method):
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
self.index_names = ['first', 'second']
self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels
], names=self.index_names,
verify_integrity=False))
self.setup_indices()
def create_index(self):
return self.index
def test_can_hold_identifiers(self):
idx = self.create_index()
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is True
def test_boolean_context_compat2(self):
# boolean context compat
# GH7897
i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)])
i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)])
common = i1.intersection(i2)
def f():
if common:
pass
tm.assert_raises_regex(ValueError, 'The truth value of a', f)
def test_labels_dtypes(self):
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
assert i.labels[0].dtype == 'int8'
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(40)])
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(400)])
assert i.labels[1].dtype == 'int16'
i = MultiIndex.from_product([['a'], range(40000)])
assert i.labels[1].dtype == 'int32'
i = pd.MultiIndex.from_product([['a'], range(1000)])
assert (i.labels[0] >= 0).all()
assert (i.labels[1] >= 0).all()
def test_where(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
def f():
i.where(True)
pytest.raises(NotImplementedError, f)
def test_where_array_like(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
klasses = [list, tuple, np.array, pd.Series]
cond = [False, True]
for klass in klasses:
def f():
return i.where(klass(cond))
pytest.raises(NotImplementedError, f)
def test_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(m.repeat(reps), expected)
with tm.assert_produces_warning(FutureWarning):
result = m.repeat(n=reps)
tm.assert_index_equal(result, expected)
def test_numpy_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(np.repeat(m, reps), expected)
msg = "the 'axis' parameter is not supported"
tm.assert_raises_regex(
ValueError, msg, np.repeat, m, reps, axis=1)
def test_set_name_methods(self):
# so long as these are synonyms, we don't need to test set_names
assert self.index.rename == self.index.set_names
new_names = [name + "SUFFIX" for name in self.index_names]
ind = self.index.set_names(new_names)
assert self.index.names == self.index_names
assert ind.names == new_names
with tm.assert_raises_regex(ValueError, "^Length"):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
assert res is None
assert ind.names == new_names2
# set names for specific level (# GH7792)
ind = self.index.set_names(new_names[0], level=0)
assert self.index.names == self.index_names
assert ind.names == [new_names[0], self.index_names[1]]
res = ind.set_names(new_names2[0], level=0, inplace=True)
assert res is None
assert ind.names == [new_names2[0], self.index_names[1]]
# set names for multiple levels
ind = self.index.set_names(new_names, level=[0, 1])
assert self.index.names == self.index_names
assert ind.names == new_names
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
assert res is None
assert ind.names == new_names2
@pytest.mark.parametrize('inplace', [True, False])
def test_set_names_with_nlevel_1(self, inplace):
# GH 21149
# Ensure that .set_names for MultiIndex with
# nlevels == 1 does not raise any errors
expected = pd.MultiIndex(levels=[[0, 1]],
labels=[[0, 1]],
names=['first'])
m = pd.MultiIndex.from_product([[0, 1]])
result = m.set_names('first', level=0, inplace=inplace)
if inplace:
result = m
tm.assert_index_equal(result, expected)
def test_set_levels_labels_directly(self):
# setting levels/labels directly raises AttributeError
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
with pytest.raises(AttributeError):
self.index.levels = new_levels
with pytest.raises(AttributeError):
self.index.labels = new_labels
def test_set_levels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
def assert_matching(actual, expected, check_dtype=False):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
tm.assert_numpy_array_equal(act, exp, check_dtype=check_dtype)
# level changing [w/o mutation]
ind2 = self.index.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = self.index.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = self.index.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# illegal level changing should not change levels
# GH 13754
original_index = self.index.copy()
for inplace in [True, False]:
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_levels(['c'], level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_labels([0, 1, 2, 3, 4, 5], level=0,
inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Levels"):
self.index.set_levels('c', level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Labels"):
self.index.set_labels(1, level=0, inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
def test_set_labels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp, dtype=np.int8)
tm.assert_numpy_array_equal(act, exp)
# label changing [w/o mutation]
ind2 = self.index.set_labels(new_labels)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
# label changing specific level [w/o mutation]
ind2 = self.index.set_labels(new_labels[0], level=0)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.set_labels(new_labels[1], level=1)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/o mutation]
ind2 = self.index.set_labels(new_labels, level=[0, 1])
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing for levels of different magnitude of categories
ind = pd.MultiIndex.from_tuples([(0, i) for i in range(130)])
new_labels = range(129, -1, -1)
expected = pd.MultiIndex.from_tuples(
[(0, i) for i in new_labels])
# [w/o mutation]
result = ind.set_labels(labels=new_labels, level=1)
assert result.equals(expected)
# [w/ mutation]
result = ind.copy()
result.set_labels(labels=new_labels, level=1, inplace=True)
assert result.equals(expected)
def test_set_levels_labels_names_bad_input(self):
levels, labels = self.index.levels, self.index.labels
names = self.index.names
with tm.assert_raises_regex(ValueError, 'Length of levels'):
self.index.set_levels([levels[0]])
with tm.assert_raises_regex(ValueError, 'Length of labels'):
self.index.set_labels([labels[0]])
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_names(names[0])
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_levels(levels, level=0)
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_labels(labels, level=0)
# should have equal lengths
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names(names[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'string'):
self.index.set_names(names, level=0)
def test_set_levels_categorical(self):
# GH13854
index = MultiIndex.from_arrays([list("xyzx"), [0, 1, 2, 3]])
for ordered in [False, True]:
cidx = CategoricalIndex(list("bac"), ordered=ordered)
result = index.set_levels(cidx, 0)
expected = MultiIndex(levels=[cidx, [0, 1, 2, 3]],
labels=index.labels)
tm.assert_index_equal(result, expected)
result_lvl = result.get_level_values(0)
expected_lvl = CategoricalIndex(list("bacb"),
categories=cidx.categories,
ordered=cidx.ordered)
tm.assert_index_equal(result_lvl, expected_lvl)
def test_metadata_immutable(self):
levels, labels = self.index.levels, self.index.labels
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile('does not support mutable operations')
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0] = levels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0] = labels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0][0] = labels[0][0]
# and for names
names = self.index.names
with tm.assert_raises_regex(TypeError, mutable_regex):
names[0] = names[0]
def test_inplace_mutation_resets_values(self):
levels = [['a', 'b', 'c'], [4]]
levels2 = [[1, 2, 3], ['a']]
labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]]
mi1 = MultiIndex(levels=levels, labels=labels)
mi2 = MultiIndex(levels=levels2, labels=labels)
vals = mi1.values.copy()
vals2 = mi2.values.copy()
assert mi1._tuples is not None
# Make sure level setting works
new_vals = mi1.set_levels(levels2).values
tm.assert_almost_equal(vals2, new_vals)
# Non-inplace doesn't kill _tuples [implementation detail]
tm.assert_almost_equal(mi1._tuples, vals)
# ...and values is still same too
tm.assert_almost_equal(mi1.values, vals)
# Inplace should kill _tuples
mi1.set_levels(levels2, inplace=True)
tm.assert_almost_equal(mi1.values, vals2)
# Make sure label setting works too
labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
exp_values = np.empty((6,), dtype=object)
exp_values[:] = [(long(1), 'a')] * 6
# Must be 1d array of tuples
assert exp_values.shape == (6,)
new_values = mi2.set_labels(labels2).values
# Not inplace shouldn't change
tm.assert_almost_equal(mi2._tuples, vals2)
# Should have correct values
tm.assert_almost_equal(exp_values, new_values)
# ...and again setting inplace should kill _tuples, etc
mi2.set_labels(labels2, inplace=True)
tm.assert_almost_equal(mi2.values, new_values)
def test_copy_in_constructor(self):
levels = np.array(["a", "b", "c"])
labels = np.array([1, 1, 2, 0, 0, 1, 1])
val = labels[0]
mi = MultiIndex(levels=[levels, levels], labels=[labels, labels],
copy=True)
assert mi.labels[0][0] == val
labels[0] = 15
assert mi.labels[0][0] == val
val = levels[0]
levels[0] = "PANDA"
assert mi.levels[0][0] == val
def test_set_value_keeps_names(self):
# motivating example from #3742
lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe']
lev2 = ['1', '2', '3'] * 2
idx = pd.MultiIndex.from_arrays([lev1, lev2], names=['Name', 'Number'])
df = pd.DataFrame(
np.random.randn(6, 4),
columns=['one', 'two', 'three', 'four'],
index=idx)
df = df.sort_index()
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
df.at[('grethe', '4'), 'one'] = 99.34
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
def test_copy_names(self):
# Check that adding a "names" parameter to the copy is honored
# GH14302
multi_idx = pd.Index([(1, 2), (3, 4)], names=['MyName1', 'MyName2'])
multi_idx1 = multi_idx.copy()
assert multi_idx.equals(multi_idx1)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx1.names == ['MyName1', 'MyName2']
multi_idx2 = multi_idx.copy(names=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx2)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx2.names == ['NewName1', 'NewName2']
multi_idx3 = multi_idx.copy(name=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx3)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx3.names == ['NewName1', 'NewName2']
def test_names(self):
# names are assigned in setup
names = self.index_names
level_names = [level.name for level in self.index.levels]
assert names == level_names
# setting bad names on existing
index = self.index
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names",
list(index.names) + ["third"])
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names", [])
# initializing with bad names (should always be equivalent)
major_axis, minor_axis = self.index.levels
major_labels, minor_labels = self.index.labels
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first'])
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first', 'second', 'third'])
# names are assigned
index.names = ["a", "b"]
ind_names = list(index.names)
level_names = [level.name for level in index.levels]
assert ind_names == level_names
def test_astype(self):
expected = self.index.copy()
actual = self.index.astype('O')
assert_copy(actual.levels, expected.levels)
assert_copy(actual.labels, expected.labels)
self.check_level_names(actual, expected.names)
with tm.assert_raises_regex(TypeError, "^Setting.*dtype.*object"):
self.index.astype(np.dtype(int))
@pytest.mark.parametrize('ordered', [True, False])
def test_astype_category(self, ordered):
# GH 18630
msg = '> 1 ndim Categorical are not supported at this time'
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype(CategoricalDtype(ordered=ordered))
if ordered is False:
# dtype='category' defaults to ordered=False, so only test once
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype('category')
def test_constructor_single_level(self):
result = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]], names=['first'])
assert isinstance(result, MultiIndex)
expected = Index(['foo', 'bar', 'baz', 'qux'], name='first')
tm.assert_index_equal(result.levels[0], expected)
assert result.names == ['first']
def test_constructor_no_levels(self):
tm.assert_raises_regex(ValueError, "non-zero number "
"of levels/labels",
MultiIndex, levels=[], labels=[])
both_re = re.compile('Must pass both levels and labels')
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(levels=[])
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(labels=[])
def test_constructor_mismatched_label_levels(self):
labels = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
tm.assert_raises_regex(ValueError, "Length of levels and labels "
"must be the same", MultiIndex,
levels=levels, labels=labels)
length_error = re.compile('>= length of level')
label_error = re.compile(r'Unequal label lengths: \[4, 2\]')
# important to check that it's looking at the right thing.
with tm.assert_raises_regex(ValueError, length_error):
MultiIndex(levels=[['a'], ['b']],
labels=[[0, 1, 2, 3], [0, 3, 4, 1]])
with tm.assert_raises_regex(ValueError, label_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 0, 0, 0], [0, 0]])
# external API
with tm.assert_raises_regex(ValueError, length_error):
self.index.copy().set_levels([['a'], ['b']])
with tm.assert_raises_regex(ValueError, label_error):
self.index.copy().set_labels([[0, 0, 0, 0], [0, 0]])
def test_constructor_nonhashable_names(self):
# GH 20527
levels = [[1, 2], [u'one', u'two']]
labels = [[0, 0, 1, 1], [0, 1, 0, 1]]
names = ((['foo'], ['bar']))
message = "MultiIndex.name must be a hashable type"
tm.assert_raises_regex(TypeError, message,
MultiIndex, levels=levels,
labels=labels, names=names)
# With .rename()
mi = MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=('foo', 'bar'))
renamed = [['foor'], ['barr']]
tm.assert_raises_regex(TypeError, message, mi.rename, names=renamed)
# With .set_names()
tm.assert_raises_regex(TypeError, message, mi.set_names, names=renamed)
@pytest.mark.parametrize('names', [['a', 'b', 'a'], ['1', '1', '2'],
['1', 'a', '1']])
def test_duplicate_level_names(self, names):
# GH18872
pytest.raises(ValueError, pd.MultiIndex.from_product,
[[0, 1]] * 3, names=names)
# With .rename()
mi = pd.MultiIndex.from_product([[0, 1]] * 3)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names)
# With .rename(., level=)
mi.rename(names[0], level=1, inplace=True)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names[:2], level=[0, 2])
def assert_multiindex_copied(self, copy, original):
# Levels should be (at least, shallow copied)
tm.assert_copy(copy.levels, original.levels)
tm.assert_almost_equal(copy.labels, original.labels)
# Labels doesn't matter which way copied
tm.assert_almost_equal(copy.labels, original.labels)
assert copy.labels is not original.labels
# Names doesn't matter which way copied
assert copy.names == original.names
assert copy.names is not original.names
# Sort order should be copied
assert copy.sortorder == original.sortorder
def test_copy(self):
i_copy = self.index.copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_shallow_copy(self):
i_copy = self.index._shallow_copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_view(self):
i_view = self.index.view()
self.assert_multiindex_copied(i_view, self.index)
def check_level_names(self, index, names):
assert [level.name for level in index.levels] == list(names)
def test_changing_names(self):
# names should be applied to levels
level_names = [level.name for level in self.index.levels]
self.check_level_names(self.index, self.index.names)
view = self.index.view()
copy = self.index.copy()
shallow_copy = self.index._shallow_copy()
# changing names should change level names on object
new_names = [name + "a" for name in self.index.names]
self.index.names = new_names
self.check_level_names(self.index, new_names)
# but not on copies
self.check_level_names(view, level_names)
self.check_level_names(copy, level_names)
self.check_level_names(shallow_copy, level_names)
# and copies shouldn't change original
shallow_copy.names = [name + "c" for name in shallow_copy.names]
self.check_level_names(self.index, new_names)
def test_get_level_number_integer(self):
self.index.names = [1, 0]
assert self.index._get_level_number(1) == 0
assert self.index._get_level_number(0) == 1
pytest.raises(IndexError, self.index._get_level_number, 2)
tm.assert_raises_regex(KeyError, 'Level fourth not found',
self.index._get_level_number, 'fourth')
def test_from_arrays(self):
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# list of arrays as input
result = MultiIndex.from_arrays(arrays, names=self.index.names)
tm.assert_index_equal(result, self.index)
# infer correctly
result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')],
['a', 'b']])
assert result.levels[0].equals(Index([Timestamp('20130101')]))
assert result.levels[1].equals(Index(['a', 'b']))
def test_from_arrays_iterator(self):
# GH 18434
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# iterator as input
result = MultiIndex.from_arrays(iter(arrays), names=self.index.names)
tm.assert_index_equal(result, self.index)
# invalid iterator input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of array-likes."):
MultiIndex.from_arrays(0)
def test_from_arrays_index_series_datetimetz(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3,
tz='Asia/Tokyo')
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_timedelta(self):
idx1 = pd.timedelta_range('1 days', freq='D', periods=3)
idx2 = pd.timedelta_range('2 hours', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_period(self):
idx1 = pd.period_range('2011-01-01', freq='D', periods=3)
idx2 = pd.period_range('2015-01-01', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_datetimelike_mixed(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3)
idx3 = pd.timedelta_range('1 days', freq='D', periods=3)
idx4 = pd.period_range('2011-01-01', freq='D', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2, idx3, idx4])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
tm.assert_index_equal(result.get_level_values(2), idx3)
tm.assert_index_equal(result.get_level_values(3), idx4)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1),
pd.Series(idx2),
pd.Series(idx3),
pd.Series(idx4)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result2.get_level_values(2), idx3)
tm.assert_index_equal(result2.get_level_values(3), idx4)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_categorical(self):
# GH13743
idx1 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=False)
idx2 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=True)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
result3 = pd.MultiIndex.from_arrays([idx1.values, idx2.values])
tm.assert_index_equal(result3.get_level_values(0), idx1)
tm.assert_index_equal(result3.get_level_values(1), idx2)
def test_from_arrays_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_arrays(arrays=[])
# 1 level
result = MultiIndex.from_arrays(arrays=[[]], names=['A'])
assert isinstance(result, MultiIndex)
expected = Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# N levels
for N in [2, 3]:
arrays = [[]] * N
names = list('ABC')[:N]
result = MultiIndex.from_arrays(arrays=arrays, names=names)
expected = MultiIndex(levels=[[]] * N, labels=[[]] * N,
names=names)
tm.assert_index_equal(result, expected)
def test_from_arrays_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_arrays, arrays=i)
def test_from_arrays_different_lengths(self):
# see gh-13599
idx1 = [1, 2, 3]
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = []
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = [1, 2, 3]
idx2 = []
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
def test_from_product(self):
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
result = MultiIndex.from_product([first, second], names=names)
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
tm.assert_index_equal(result, expected)
def test_from_product_iterator(self):
# GH 18434
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
# iterator as input
result = MultiIndex.from_product(iter([first, second]), names=names)
tm.assert_index_equal(result, expected)
# Invalid non-iterable input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of iterables."):
MultiIndex.from_product(0)
def test_from_product_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_product([])
# 1 level
result = MultiIndex.from_product([[]], names=['A'])
expected = pd.Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# 2 levels
l1 = [[], ['foo', 'bar', 'baz'], []]
l2 = [[], [], ['a', 'b', 'c']]
names = ['A', 'B']
for first, second in zip(l1, l2):
result = MultiIndex.from_product([first, second], names=names)
expected = MultiIndex(levels=[first, second],
labels=[[], []], names=names)
tm.assert_index_equal(result, expected)
# GH12258
names = ['A', 'B', 'C']
for N in range(4):
lvl2 = lrange(N)
result = MultiIndex.from_product([[], lvl2, []], names=names)
expected = MultiIndex(levels=[[], lvl2, []],
labels=[[], [], []], names=names)
tm.assert_index_equal(result, expected)
def test_from_product_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_product, iterables=i)
def test_from_product_datetimeindex(self):
dt_index = date_range('2000-01-01', periods=2)
mi = pd.MultiIndex.from_product([[1, 2], dt_index])
etalon = construct_1d_object_array_from_listlike([(1, pd.Timestamp(
'2000-01-01')), (1, pd.Timestamp('2000-01-02')), (2, pd.Timestamp(
'2000-01-01')), (2, pd.Timestamp('2000-01-02'))])
tm.assert_numpy_array_equal(mi.values, etalon)
def test_from_product_index_series_categorical(self):
# GH13743
first = ['foo', 'bar']
for ordered in [False, True]:
idx = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=ordered)
expected = pd.CategoricalIndex(list("abcaab") + list("abcaab"),
categories=list("bac"),
ordered=ordered)
for arr in [idx, pd.Series(idx), idx.values]:
result = pd.MultiIndex.from_product([first, arr])
tm.assert_index_equal(result.get_level_values(1), expected)
def test_values_boxed(self):
tuples = [(1, pd.Timestamp('2000-01-01')), (2, pd.NaT),
(3, pd.Timestamp('2000-01-03')),
(1, pd.Timestamp('2000-01-04')),
(2, pd.Timestamp('2000-01-02')),
(3, pd.Timestamp('2000-01-03'))]
result = pd.MultiIndex.from_tuples(tuples)
expected = construct_1d_object_array_from_listlike(tuples)
tm.assert_numpy_array_equal(result.values, expected)
# Check that code branches for boxed values produce identical results
tm.assert_numpy_array_equal(result.values[:4], result[:4].values)
def test_values_multiindex_datetimeindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(10 ** 18, 10 ** 18 + 5)
naive = pd.DatetimeIndex(ints)
aware = pd.DatetimeIndex(ints, tz='US/Central')
idx = pd.MultiIndex.from_arrays([naive, aware])
result = idx.values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive)
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware)
# n_lev > n_lab
result = idx[:2].values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive[:2])
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware[:2])
def test_values_multiindex_periodindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(2007, 2012)
pidx = pd.PeriodIndex(ints, freq='D')
idx = pd.MultiIndex.from_arrays([ints, pidx])
result = idx.values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx)
# n_lev > n_lab
result = idx[:2].values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints[:2]))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx[:2])
def test_append(self):
result = self.index[:3].append(self.index[3:])
assert result.equals(self.index)
foos = [self.index[:1], self.index[1:3], self.index[3:]]
result = foos[0].append(foos[1:])
assert result.equals(self.index)
# empty
result = self.index.append([])
assert result.equals(self.index)
def test_append_mixed_dtypes(self):
# GH 13660
dti = date_range('2011-01-01', freq='M', periods=3, )
dti_tz = date_range('2011-01-01', freq='M', periods=3, tz='US/Eastern')
pi = period_range('2011-01', freq='M', periods=3)
mi = MultiIndex.from_arrays([[1, 2, 3],
[1.1, np.nan, 3.3],
['a', 'b', 'c'],
dti, dti_tz, pi])
assert mi.nlevels == 6
res = mi.append(mi)
exp = MultiIndex.from_arrays([[1, 2, 3, 1, 2, 3],
[1.1, np.nan, 3.3, 1.1, np.nan, 3.3],
['a', 'b', 'c', 'a', 'b', 'c'],
dti.append(dti),
dti_tz.append(dti_tz),
pi.append(pi)])
tm.assert_index_equal(res, exp)
other = MultiIndex.from_arrays([['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z']])
res = mi.append(other)
exp = MultiIndex.from_arrays([[1, 2, 3, 'x', 'y', 'z'],
[1.1, np.nan, 3.3, 'x', 'y', 'z'],
['a', 'b', 'c', 'x', 'y', 'z'],
dti.append(pd.Index(['x', 'y', 'z'])),
dti_tz.append(pd.Index(['x', 'y', 'z'])),
pi.append(pd.Index(['x', 'y', 'z']))])
tm.assert_index_equal(res, exp)
def test_get_level_values(self):
result = self.index.get_level_values(0)
expected = Index(['foo', 'foo', 'bar', 'baz', 'qux', 'qux'],
name='first')
tm.assert_index_equal(result, expected)
assert result.name == 'first'
result = self.index.get_level_values('first')
expected = self.index.get_level_values(0)
tm.assert_index_equal(result, expected)
# GH 10460
index = MultiIndex(
levels=[CategoricalIndex(['A', 'B']),
CategoricalIndex([1, 2, 3])],
labels=[np.array([0, 0, 0, 1, 1, 1]),
np.array([0, 1, 2, 0, 1, 2])])
exp = CategoricalIndex(['A', 'A', 'A', 'B', 'B', 'B'])
tm.assert_index_equal(index.get_level_values(0), exp)
exp = CategoricalIndex([1, 2, 3, 1, 2, 3])
tm.assert_index_equal(index.get_level_values(1), exp)
def test_get_level_values_int_with_na(self):
# GH 17924
arrays = [['a', 'b', 'b'], [1, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([1, np.nan, 2])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], [np.nan, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([np.nan, np.nan, 2])
tm.assert_index_equal(result, expected)
def test_get_level_values_na(self):
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan])
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], pd.DatetimeIndex([0, 1, pd.NaT])]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = pd.DatetimeIndex([0, 1, pd.NaT])
tm.assert_index_equal(result, expected)
arrays = [[], []]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([], dtype=object)
tm.assert_index_equal(result, expected)
def test_get_level_values_all_na(self):
# GH 17924 when level entirely consists of nan
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan], dtype=np.float64)
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1], dtype=object)
tm.assert_index_equal(result, expected)
def test_reorder_levels(self):
# this blows up
tm.assert_raises_regex(IndexError, '^Too many levels',
self.index.reorder_levels, [2, 1, 0])
def test_nlevels(self):
assert self.index.nlevels == 2
def test_iter(self):
result = list(self.index)
expected = [('foo', 'one'), ('foo', 'two'), ('bar', 'one'),
('baz', 'two'), ('qux', 'one'), ('qux', 'two')]
assert result == expected
def test_legacy_pickle(self):
if PY3:
pytest.skip("testing for legacy pickles not "
"support on py3")
path = tm.get_data_path('multiindex_v1.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_legacy_v2_unpickle(self):
# 0.7.3 -> 0.8.0 format manage
path = tm.get_data_path('mindex_073.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index = MultiIndex.from_product(
[[1, 2], ['a', 'b'], date_range('20130101', periods=3,
tz='US/Eastern')
], names=['one', 'two', 'three'])
unpickled = tm.round_trip_pickle(index)
assert index.equal_levels(unpickled)
def test_from_tuples_index_values(self):
result = MultiIndex.from_tuples(self.index)
assert (result.values == self.index.values).all()
def test_contains(self):
assert ('foo', 'two') in self.index
assert ('bar', 'two') not in self.index
assert None not in self.index
def test_contains_top_level(self):
midx = MultiIndex.from_product([['A', 'B'], [1, 2]])
assert 'A' in midx
assert 'A' not in midx._engine
def test_contains_with_nat(self):
# MI with a NaT
mi = MultiIndex(levels=[['C'],
pd.date_range('2012-01-01', periods=5)],
labels=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],
names=[None, 'B'])
assert ('C', pd.Timestamp('2012-01-01')) in mi
for val in mi.values:
assert val in mi
def test_is_all_dates(self):
assert not self.index.is_all_dates
def test_is_numeric(self):
# MultiIndex is never numeric
assert not self.index.is_numeric()
def test_getitem(self):
# scalar
assert self.index[2] == ('bar', 'one')
# slice
result = self.index[2:5]
expected = self.index[[2, 3, 4]]
assert result.equals(expected)
# boolean
result = self.index[[True, False, True, False, True, True]]
result2 = self.index[np.array([True, False, True, False, True, True])]
expected = self.index[[0, 2, 4, 5]]
assert result.equals(expected)
assert result2.equals(expected)
def test_getitem_group_select(self):
sorted_idx, _ = self.index.sortlevel(0)
assert sorted_idx.get_loc('baz') == slice(3, 4)
assert sorted_idx.get_loc('foo') == slice(0, 2)
def test_get_loc(self):
assert self.index.get_loc(('foo', 'two')) == 1
assert self.index.get_loc(('baz', 'two')) == 3
pytest.raises(KeyError, self.index.get_loc, ('bar', 'two'))
pytest.raises(KeyError, self.index.get_loc, 'quux')
pytest.raises(NotImplementedError, self.index.get_loc, 'foo',
method='nearest')
# 3 levels
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
pytest.raises(KeyError, index.get_loc, (1, 1))
assert index.get_loc((2, 0)) == slice(3, 5)
def test_get_loc_duplicates(self):
index = Index([2, 2, 2, 2])
result = index.get_loc(2)
expected = slice(0, 4)
assert result == expected
# pytest.raises(Exception, index.get_loc, 2)
index = Index(['c', 'a', 'a', 'b', 'b'])
rs = index.get_loc('c')
xp = 0
assert rs == xp
def test_get_value_duplicates(self):
index = MultiIndex(levels=[['D', 'B', 'C'],
[0, 26, 27, 37, 57, 67, 75, 82]],
labels=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2],
[1, 3, 4, 6, 0, 2, 2, 3, 5, 7]],
names=['tag', 'day'])
assert index.get_loc('D') == slice(0, 3)
with pytest.raises(KeyError):
index._engine.get_value(np.array([]), 'D')
def test_get_loc_level(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
loc, new_index = index.get_loc_level((0, 1))
expected = slice(1, 2)
exp_index = index[expected].droplevel(0).droplevel(0)
assert loc == expected
assert new_index.equals(exp_index)
loc, new_index = index.get_loc_level((0, 1, 0))
expected = 1
assert loc == expected
assert new_index is None
pytest.raises(KeyError, index.get_loc_level, (2, 2))
index = MultiIndex(levels=[[2000], lrange(4)], labels=[np.array(
[0, 0, 0, 0]), np.array([0, 1, 2, 3])])
result, new_index = index.get_loc_level((2000, slice(None, None)))
expected = slice(None, None)
assert result == expected
assert new_index.equals(index.droplevel(0))
@pytest.mark.parametrize('level', [0, 1])
@pytest.mark.parametrize('null_val', [np.nan, pd.NaT, None])
def test_get_loc_nan(self, level, null_val):
# GH 18485 : NaN in MultiIndex
levels = [['a', 'b'], ['c', 'd']]
key = ['b', 'd']
levels[level] = np.array([0, null_val], dtype=type(null_val))
key[level] = null_val
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
def test_get_loc_missing_nan(self):
# GH 8569
idx = MultiIndex.from_arrays([[1.0, 2.0], [3.0, 4.0]])
assert isinstance(idx.get_loc(1), slice)
pytest.raises(KeyError, idx.get_loc, 3)
pytest.raises(KeyError, idx.get_loc, np.nan)
pytest.raises(KeyError, idx.get_loc, [np.nan])
@pytest.mark.parametrize('dtype1', [int, float, bool, str])
@pytest.mark.parametrize('dtype2', [int, float, bool, str])
def test_get_loc_multiple_dtypes(self, dtype1, dtype2):
# GH 18520
levels = [np.array([0, 1]).astype(dtype1),
np.array([0, 1]).astype(dtype2)]
idx = pd.MultiIndex.from_product(levels)
assert idx.get_loc(idx[2]) == 2
@pytest.mark.parametrize('level', [0, 1])
@pytest.mark.parametrize('dtypes', [[int, float], [float, int]])
def test_get_loc_implicit_cast(self, level, dtypes):
# GH 18818, GH 15994 : as flat index, cast int to float and vice-versa
levels = [['a', 'b'], ['c', 'd']]
key = ['b', 'd']
lev_dtype, key_dtype = dtypes
levels[level] = np.array([0, 1], dtype=lev_dtype)
key[level] = key_dtype(1)
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
def test_get_loc_cast_bool(self):
# GH 19086 : int is casted to bool, but not vice-versa
levels = [[False, True], np.arange(2, dtype='int64')]
idx = MultiIndex.from_product(levels)
assert idx.get_loc((0, 1)) == 1
assert idx.get_loc((1, 0)) == 2
pytest.raises(KeyError, idx.get_loc, (False, True))
pytest.raises(KeyError, idx.get_loc, (True, False))
def test_slice_locs(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
slob = slice(*idx.slice_locs(df.index[5], df.index[15]))
sliced = stacked[slob]
expected = df[5:16].stack()
tm.assert_almost_equal(sliced.values, expected.values)
slob = slice(*idx.slice_locs(df.index[5] + timedelta(seconds=30),
df.index[15] - timedelta(seconds=30)))
sliced = stacked[slob]
expected = df[6:15].stack()
tm.assert_almost_equal(sliced.values, expected.values)
def test_slice_locs_with_type_mismatch(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
tm.assert_raises_regex(TypeError, '^Level type mismatch',
idx.slice_locs, (1, 3))
tm.assert_raises_regex(TypeError, '^Level type mismatch',
idx.slice_locs,
df.index[5] + timedelta(
seconds=30), (5, 2))
df = tm.makeCustomDataframe(5, 5)
stacked = df.stack()
idx = stacked.index
with tm.assert_raises_regex(TypeError, '^Level type mismatch'):
idx.slice_locs(timedelta(seconds=30))
# TODO: Try creating a UnicodeDecodeError in exception message
with tm.assert_raises_regex(TypeError, '^Level type mismatch'):
idx.slice_locs(df.index[1], (16, "a"))
def test_slice_locs_not_sorted(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
tm.assert_raises_regex(KeyError, "[Kk]ey length.*greater than "
"MultiIndex lexsort depth",
index.slice_locs, (1, 0, 1), (2, 1, 0))
# works
sorted_index, _ = index.sortlevel(0)
# should there be a test case here???
sorted_index.slice_locs((1, 0, 1), (2, 1, 0))
def test_slice_locs_partial(self):
sorted_idx, _ = self.index.sortlevel(0)
result = sorted_idx.slice_locs(('foo', 'two'), ('qux', 'one'))
assert result == (1, 5)
result = sorted_idx.slice_locs(None, ('qux', 'one'))
assert result == (0, 5)
result = sorted_idx.slice_locs(('foo', 'two'), None)
assert result == (1, len(sorted_idx))
result = sorted_idx.slice_locs('bar', 'baz')
assert result == (2, 4)
def test_slice_locs_not_contained(self):
# some searchsorted action
index = MultiIndex(levels=[[0, 2, 4, 6], [0, 2, 4]],
labels=[[0, 0, 0, 1, 1, 2, 3, 3, 3],
[0, 1, 2, 1, 2, 2, 0, 1, 2]], sortorder=0)
result = index.slice_locs((1, 0), (5, 2))
assert result == (3, 6)
result = index.slice_locs(1, 5)
assert result == (3, 6)
result = index.slice_locs((2, 2), (5, 2))
assert result == (3, 6)
result = index.slice_locs(2, 5)
assert result == (3, 6)
result = index.slice_locs((1, 0), (6, 3))
assert result == (3, 8)
result = index.slice_locs(-1, 10)
assert result == (0, len(index))
def test_consistency(self):
# need to construct an overflow
major_axis = lrange(70000)
minor_axis = lrange(10)
major_labels = np.arange(70000)
minor_labels = np.repeat(lrange(10), 7000)
# the fact that is works means it's consistent
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
# inconsistent
major_labels = np.array([0, 0, 1, 1, 1, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not index.is_unique
def test_truncate(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
result = index.truncate(before=1)
assert 'foo' not in result.levels[0]
assert 1 in result.levels[0]
result = index.truncate(after=1)
assert 2 not in result.levels[0]
assert 1 in result.levels[0]
result = index.truncate(before=1, after=2)
assert len(result.levels[0]) == 2
# after < before
pytest.raises(ValueError, index.truncate, 3, 1)
def test_get_indexer(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 2, 3, 3], dtype=np.intp)
minor_labels = np.array([0, 1, 0, 0, 1, 0, 1], dtype=np.intp)
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
idx1 = index[:5]
idx2 = index[[1, 3, 5]]
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, np.array([1, 3, -1], dtype=np.intp))
r1 = idx2.get_indexer(idx1, method='pad')
e1 = np.array([-1, 0, 0, 1, 1], dtype=np.intp)
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method='pad')
assert_almost_equal(r2, e1[::-1])
rffill1 = idx2.get_indexer(idx1, method='ffill')
assert_almost_equal(r1, rffill1)
r1 = idx2.get_indexer(idx1, method='backfill')
e1 = np.array([0, 0, 1, 1, 2], dtype=np.intp)
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method='backfill')
assert_almost_equal(r2, e1[::-1])
rbfill1 = idx2.get_indexer(idx1, method='bfill')
assert_almost_equal(r1, rbfill1)
# pass non-MultiIndex
r1 = idx1.get_indexer(idx2.values)
rexp1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, rexp1)
r1 = idx1.get_indexer([1, 2, 3])
assert (r1 == [-1, -1, -1]).all()
# create index with duplicates
idx1 = Index(lrange(10) + lrange(10))
idx2 = Index(lrange(20))
msg = "Reindexing only valid with uniquely valued Index objects"
with tm.assert_raises_regex(InvalidIndexError, msg):
idx1.get_indexer(idx2)
def test_get_indexer_nearest(self):
midx = MultiIndex.from_tuples([('a', 1), ('b', 2)])
with pytest.raises(NotImplementedError):
midx.get_indexer(['a'], method='nearest')
with pytest.raises(NotImplementedError):
midx.get_indexer(['a'], method='pad', tolerance=2)
def test_hash_collisions(self):
# non-smoke test that we don't get hash collisions
index = MultiIndex.from_product([np.arange(1000), np.arange(1000)],
names=['one', 'two'])
result = index.get_indexer(index.values)
tm.assert_numpy_array_equal(result, np.arange(
len(index), dtype='intp'))
for i in [0, 1, len(index) - 2, len(index) - 1]:
result = index.get_loc(index[i])
assert result == i
def test_format(self):
self.index.format()
self.index[:0].format()
def test_format_integer_names(self):
index = MultiIndex(levels=[[0, 1], [0, 1]],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]], names=[0, 1])
index.format(names=True)
def test_format_sparse_display(self):
index = MultiIndex(levels=[[0, 1], [0, 1], [0, 1], [0]],
labels=[[0, 0, 0, 1, 1, 1], [0, 0, 1, 0, 0, 1],
[0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0]])
result = index.format()
assert result[3] == '1 0 0 0'
def test_format_sparse_config(self):
warn_filters = warnings.filters
warnings.filterwarnings('ignore', category=FutureWarning,
module=".*format")
# GH1538
pd.set_option('display.multi_sparse', False)
result = self.index.format()
assert result[1] == 'foo two'
tm.reset_display_options()
warnings.filters = warn_filters
def test_to_frame(self):
tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]
index = MultiIndex.from_tuples(tuples)
result = index.to_frame(index=False)
expected = DataFrame(tuples)
tm.assert_frame_equal(result, expected)
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]
index = MultiIndex.from_tuples(tuples, names=['first', 'second'])
result = index.to_frame(index=False)
expected = DataFrame(tuples)
expected.columns = ['first', 'second']
tm.assert_frame_equal(result, expected)
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_product([range(5),
pd.date_range('20130101', periods=3)])
result = index.to_frame(index=False)
expected = DataFrame(
{0: np.repeat(np.arange(5, dtype='int64'), 3),
1: np.tile(pd.date_range('20130101', periods=3), 5)})
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_product([range(5),
pd.date_range('20130101', periods=3)])
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
def test_to_hierarchical(self):
index = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (
2, 'two')])
result = index.to_hierarchical(3)
expected = MultiIndex(levels=[[1, 2], ['one', 'two']],
labels=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]])
tm.assert_index_equal(result, expected)
assert result.names == index.names
# K > 1
result = index.to_hierarchical(3, 2)
expected = MultiIndex(levels=[[1, 2], ['one', 'two']],
labels=[[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]])
tm.assert_index_equal(result, expected)
assert result.names == index.names
# non-sorted
index = MultiIndex.from_tuples([(2, 'c'), (1, 'b'),
(2, 'a'), (2, 'b')],
names=['N1', 'N2'])
result = index.to_hierarchical(2)
expected = MultiIndex.from_tuples([(2, 'c'), (2, 'c'), (1, 'b'),
(1, 'b'),
(2, 'a'), (2, 'a'),
(2, 'b'), (2, 'b')],
names=['N1', 'N2'])
tm.assert_index_equal(result, expected)
assert result.names == index.names
def test_bounds(self):
self.index._bounds
def test_equals_multi(self):
assert self.index.equals(self.index)
assert not self.index.equals(self.index.values)
assert self.index.equals(Index(self.index.values))
assert self.index.equal_levels(self.index)
assert not self.index.equals(self.index[:-1])
assert not self.index.equals(self.index[-1])
# different number of levels
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
index2 = MultiIndex(levels=index.levels[:-1], labels=index.labels[:-1])
assert not index.equals(index2)
assert not index.equal_levels(index2)
# levels are different
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 2, 3])
minor_labels = np.array([0, 1, 0, 0, 1, 0])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not self.index.equals(index)
assert not self.index.equal_levels(index)
# some of the labels are different
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not self.index.equals(index)
def test_equals_missing_values(self):
# make sure take is not using -1
i = pd.MultiIndex.from_tuples([(0, pd.NaT),
(0, pd.Timestamp('20130101'))])
result = i[0:1].equals(i[0])
assert not result
result = i[1:2].equals(i[1])
assert not result
def test_identical(self):
mi = self.index.copy()
mi2 = self.index.copy()
assert mi.identical(mi2)
mi = mi.set_names(['new1', 'new2'])
assert mi.equals(mi2)
assert not mi.identical(mi2)
mi2 = mi2.set_names(['new1', 'new2'])
assert mi.identical(mi2)
mi3 = Index(mi.tolist(), names=mi.names)
mi4 = Index(mi.tolist(), names=mi.names, tupleize_cols=False)
assert mi.identical(mi3)
assert not mi.identical(mi4)
assert mi.equals(mi4)
def test_is_(self):
mi = MultiIndex.from_tuples(lzip(range(10), range(10)))
assert mi.is_(mi)
assert mi.is_(mi.view())
assert mi.is_(mi.view().view().view().view())
mi2 = mi.view()
# names are metadata, they don't change id
mi2.names = ["A", "B"]
assert mi2.is_(mi)
assert mi.is_(mi2)
assert mi.is_(mi.set_names(["C", "D"]))
mi2 = mi.view()
mi2.set_names(["E", "F"], inplace=True)
assert mi.is_(mi2)
# levels are inherent properties, they change identity
mi3 = mi2.set_levels([lrange(10), lrange(10)])
assert not mi3.is_(mi2)
# shouldn't change
assert mi2.is_(mi)
mi4 = mi3.view()
# GH 17464 - Remove duplicate MultiIndex levels
mi4.set_levels([lrange(10), lrange(10)], inplace=True)
assert not mi4.is_(mi3)
mi5 = mi.view()
mi5.set_levels(mi5.levels, inplace=True)
assert not mi5.is_(mi)
def test_union(self):
piece1 = self.index[:5][::-1]
piece2 = self.index[3:]
the_union = piece1 | piece2
tups = sorted(self.index.values)
expected = MultiIndex.from_tuples(tups)
assert the_union.equals(expected)
# corner case, pass self or empty thing:
the_union = self.index.union(self.index)
assert the_union is self.index
the_union = self.index.union(self.index[:0])
assert the_union is self.index
# won't work in python 3
# tuples = self.index.values
# result = self.index[:4] | tuples[4:]
# assert result.equals(tuples)
# not valid for python 3
# def test_union_with_regular_index(self):
# other = Index(['A', 'B', 'C'])
# result = other.union(self.index)
# assert ('foo', 'one') in result
# assert 'B' in result
# result2 = self.index.union(other)
# assert result.equals(result2)
def test_intersection(self):
piece1 = self.index[:5][::-1]
piece2 = self.index[3:]
the_int = piece1 & piece2
tups = sorted(self.index[3:5].values)
expected = MultiIndex.from_tuples(tups)
assert the_int.equals(expected)
# corner case, pass self
the_int = self.index.intersection(self.index)
assert the_int is self.index
# empty intersection: disjoint
empty = self.index[:2] & self.index[2:]
expected = self.index[:0]
assert empty.equals(expected)
# can't do in python 3
# tuples = self.index.values
# result = self.index & tuples
# assert result.equals(tuples)
def test_sub(self):
first = self.index
# - now raises (previously was set op difference)
with pytest.raises(TypeError):
first - self.index[-3:]
with pytest.raises(TypeError):
self.index[-3:] - first
with pytest.raises(TypeError):
self.index[-3:] - first.tolist()
with pytest.raises(TypeError):
first.tolist() - self.index[-3:]
def test_difference(self):
first = self.index
result = first.difference(self.index[-3:])
expected = MultiIndex.from_tuples(sorted(self.index[:-3].values),
sortorder=0,
names=self.index.names)
assert isinstance(result, MultiIndex)
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: reflexive
result = self.index.difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: superset
result = self.index[-3:].difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: degenerate
result = self.index[:0].difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# names not the same
chunklet = self.index[-3:]
chunklet.names = ['foo', 'baz']
result = first.difference(chunklet)
assert result.names == (None, None)
# empty, but non-equal
result = self.index.difference(self.index.sortlevel(1)[0])
assert len(result) == 0
# raise Exception called with non-MultiIndex
result = first.difference(first.values)
assert result.equals(first[:0])
# name from empty array
result = first.difference([])
assert first.equals(result)
assert first.names == result.names
# name from non-empty array
result = first.difference([('foo', 'one')])
expected = pd.MultiIndex.from_tuples([('bar', 'one'), ('baz', 'two'), (
'foo', 'two'), ('qux', 'one'), ('qux', 'two')])
expected.names = first.names
assert first.names == result.names
tm.assert_raises_regex(TypeError, "other must be a MultiIndex "
"or a list of tuples",
first.difference, [1, 2, 3, 4, 5])
def test_from_tuples(self):
tm.assert_raises_regex(TypeError, 'Cannot infer number of levels '
'from empty list',
MultiIndex.from_tuples, [])
expected = MultiIndex(levels=[[1, 3], [2, 4]],
labels=[[0, 1], [0, 1]],
names=['a', 'b'])
# input tuples
result = MultiIndex.from_tuples(((1, 2), (3, 4)), names=['a', 'b'])
tm.assert_index_equal(result, expected)
def test_from_tuples_iterator(self):
# GH 18434
# input iterator for tuples
expected = MultiIndex(levels=[[1, 3], [2, 4]],
labels=[[0, 1], [0, 1]],
names=['a', 'b'])
result = MultiIndex.from_tuples(zip([1, 3], [2, 4]), names=['a', 'b'])
tm.assert_index_equal(result, expected)
# input non-iterables
with tm.assert_raises_regex(
TypeError, 'Input must be a list / sequence of tuple-likes.'):
MultiIndex.from_tuples(0)
def test_from_tuples_empty(self):
# GH 16777
result = MultiIndex.from_tuples([], names=['a', 'b'])
expected = MultiIndex.from_arrays(arrays=[[], []],
names=['a', 'b'])
tm.assert_index_equal(result, expected)
def test_argsort(self):
result = self.index.argsort()
expected = self.index.values.argsort()
tm.assert_numpy_array_equal(result, expected)
def test_sortlevel(self):
import random
tuples = list(self.index)
random.shuffle(tuples)
index = MultiIndex.from_tuples(tuples)
sorted_idx, _ = index.sortlevel(0)
expected = MultiIndex.from_tuples(sorted(tuples))
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(0, ascending=False)
assert sorted_idx.equals(expected[::-1])
sorted_idx, _ = index.sortlevel(1)
by1 = sorted(tuples, key=lambda x: (x[1], x[0]))
expected = MultiIndex.from_tuples(by1)
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(1, ascending=False)
assert sorted_idx.equals(expected[::-1])
def test_sortlevel_not_sort_remaining(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
sorted_idx, _ = mi.sortlevel('A', sort_remaining=False)
assert sorted_idx.equals(mi)
def test_sortlevel_deterministic(self):
tuples = [('bar', 'one'), ('foo', 'two'), ('qux', 'two'),
('foo', 'one'), ('baz', 'two'), ('qux', 'one')]
index = MultiIndex.from_tuples(tuples)
sorted_idx, _ = index.sortlevel(0)
expected = MultiIndex.from_tuples(sorted(tuples))
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(0, ascending=False)
assert sorted_idx.equals(expected[::-1])
sorted_idx, _ = index.sortlevel(1)
by1 = sorted(tuples, key=lambda x: (x[1], x[0]))
expected = MultiIndex.from_tuples(by1)
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(1, ascending=False)
assert sorted_idx.equals(expected[::-1])
def test_dims(self):
pass
def test_drop(self):
dropped = self.index.drop([('foo', 'two'), ('qux', 'one')])
index = MultiIndex.from_tuples([('foo', 'two'), ('qux', 'one')])
dropped2 = self.index.drop(index)
expected = self.index[[0, 2, 3, 5]]
tm.assert_index_equal(dropped, expected)
tm.assert_index_equal(dropped2, expected)
dropped = self.index.drop(['bar'])
expected = self.index[[0, 1, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop('foo')
expected = self.index[[2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
index = MultiIndex.from_tuples([('bar', 'two')])
pytest.raises(KeyError, self.index.drop, [('bar', 'two')])
pytest.raises(KeyError, self.index.drop, index)
pytest.raises(KeyError, self.index.drop, ['foo', 'two'])
# partially correct argument
mixed_index = MultiIndex.from_tuples([('qux', 'one'), ('bar', 'two')])
pytest.raises(KeyError, self.index.drop, mixed_index)
# error='ignore'
dropped = self.index.drop(index, errors='ignore')
expected = self.index[[0, 1, 2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop(mixed_index, errors='ignore')
expected = self.index[[0, 1, 2, 3, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop(['foo', 'two'], errors='ignore')
expected = self.index[[2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
# mixed partial / full drop
dropped = self.index.drop(['foo', ('qux', 'one')])
expected = self.index[[2, 3, 5]]
tm.assert_index_equal(dropped, expected)
# mixed partial / full drop / error='ignore'
mixed_index = ['foo', ('qux', 'one'), 'two']
pytest.raises(KeyError, self.index.drop, mixed_index)
dropped = self.index.drop(mixed_index, errors='ignore')
expected = self.index[[2, 3, 5]]
tm.assert_index_equal(dropped, expected)
def test_droplevel_with_names(self):
index = self.index[self.index.get_loc('foo')]
dropped = index.droplevel(0)
assert dropped.name == 'second'
index = MultiIndex(
levels=[Index(lrange(4)), Index(lrange(4)), Index(lrange(4))],
labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])],
names=['one', 'two', 'three'])
dropped = index.droplevel(0)
assert dropped.names == ('two', 'three')
dropped = index.droplevel('two')
expected = index.droplevel(1)
assert dropped.equals(expected)
def test_droplevel_list(self):
index = MultiIndex(
levels=[Index(lrange(4)), Index(lrange(4)), Index(lrange(4))],
labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])],
names=['one', 'two', 'three'])
dropped = index[:2].droplevel(['three', 'one'])
expected = index[:2].droplevel(2).droplevel(0)
assert dropped.equals(expected)
dropped = index[:2].droplevel([])
expected = index[:2]
assert dropped.equals(expected)
with pytest.raises(ValueError):
index[:2].droplevel(['one', 'two', 'three'])
with pytest.raises(KeyError):
index[:2].droplevel(['one', 'four'])
def test_drop_not_lexsorted(self):
# GH 12078
# define the lexsorted version of the multi-index
tuples = [('a', ''), ('b1', 'c1'), ('b2', 'c2')]
lexsorted_mi = MultiIndex.from_tuples(tuples, names=['b', 'c'])
assert lexsorted_mi.is_lexsorted()
# and the not-lexsorted version
df = pd.DataFrame(columns=['a', 'b', 'c', 'd'],
data=[[1, 'b1', 'c1', 3], [1, 'b2', 'c2', 4]])
df = df.pivot_table(index='a', columns=['b', 'c'], values='d')
df = df.reset_index()
not_lexsorted_mi = df.columns
assert not not_lexsorted_mi.is_lexsorted()
# compare the results
tm.assert_index_equal(lexsorted_mi, not_lexsorted_mi)
with tm.assert_produces_warning(PerformanceWarning):
tm.assert_index_equal(lexsorted_mi.drop('a'),
not_lexsorted_mi.drop('a'))
def test_insert(self):
# key contained in all levels
new_index = self.index.insert(0, ('bar', 'two'))
assert new_index.equal_levels(self.index)
assert new_index[0] == ('bar', 'two')
# key not contained in all levels
new_index = self.index.insert(0, ('abc', 'three'))
exp0 = Index(list(self.index.levels[0]) + ['abc'], name='first')
tm.assert_index_equal(new_index.levels[0], exp0)
exp1 = Index(list(self.index.levels[1]) + ['three'], name='second')
tm.assert_index_equal(new_index.levels[1], exp1)
assert new_index[0] == ('abc', 'three')
# key wrong length
msg = "Item must have length equal to number of levels"
with tm.assert_raises_regex(ValueError, msg):
self.index.insert(0, ('foo2',))
left = pd.DataFrame([['a', 'b', 0], ['b', 'd', 1]],
columns=['1st', '2nd', '3rd'])
left.set_index(['1st', '2nd'], inplace=True)
ts = left['3rd'].copy(deep=True)
left.loc[('b', 'x'), '3rd'] = 2
left.loc[('b', 'a'), '3rd'] = -1
left.loc[('b', 'b'), '3rd'] = 3
left.loc[('a', 'x'), '3rd'] = 4
left.loc[('a', 'w'), '3rd'] = 5
left.loc[('a', 'a'), '3rd'] = 6
ts.loc[('b', 'x')] = 2
ts.loc['b', 'a'] = -1
ts.loc[('b', 'b')] = 3
ts.loc['a', 'x'] = 4
ts.loc[('a', 'w')] = 5
ts.loc['a', 'a'] = 6
right = pd.DataFrame([['a', 'b', 0], ['b', 'd', 1], ['b', 'x', 2],
['b', 'a', -1], ['b', 'b', 3], ['a', 'x', 4],
['a', 'w', 5], ['a', 'a', 6]],
columns=['1st', '2nd', '3rd'])
right.set_index(['1st', '2nd'], inplace=True)
# FIXME data types changes to float because
# of intermediate nan insertion;
tm.assert_frame_equal(left, right, check_dtype=False)
tm.assert_series_equal(ts, right['3rd'])
# GH9250
idx = [('test1', i) for i in range(5)] + \
[('test2', i) for i in range(6)] + \
[('test', 17), ('test', 18)]
left = pd.Series(np.linspace(0, 10, 11),
pd.MultiIndex.from_tuples(idx[:-2]))
left.loc[('test', 17)] = 11
left.loc[('test', 18)] = 12
right = pd.Series(np.linspace(0, 12, 13),
pd.MultiIndex.from_tuples(idx))
tm.assert_series_equal(left, right)
def test_take_preserve_name(self):
taken = self.index.take([3, 0, 1])
assert taken.names == self.index.names
def test_take_fill_value(self):
# GH 12631
vals = [['A', 'B'],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]]
idx = pd.MultiIndex.from_product(vals, names=['str', 'dt'])
result = idx.take(np.array([1, 0, -1]))
exp_vals = [('A', pd.Timestamp('2011-01-02')),
('A', pd.Timestamp('2011-01-01')),
('B', pd.Timestamp('2011-01-02'))]
expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt'])
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
exp_vals = [('A', pd.Timestamp('2011-01-02')),
('A', pd.Timestamp('2011-01-01')),
(np.nan, pd.NaT)]
expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt'])
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
exp_vals = [('A', pd.Timestamp('2011-01-02')),
('A', pd.Timestamp('2011-01-01')),
('B', pd.Timestamp('2011-01-02'))]
expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt'])
tm.assert_index_equal(result, expected)
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
with pytest.raises(IndexError):
idx.take(np.array([1, -5]))
def take_invalid_kwargs(self):
vals = [['A', 'B'],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]]
idx = pd.MultiIndex.from_product(vals, names=['str', 'dt'])
indices = [1, 2]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assert_raises_regex(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, mode='clip')
@pytest.mark.parametrize('other',
[Index(['three', 'one', 'two']),
Index(['one']),
Index(['one', 'three'])])
def test_join_level(self, other, join_type):
join_index, lidx, ridx = other.join(self.index, how=join_type,
level='second',
return_indexers=True)
exp_level = other.join(self.index.levels[1], how=join_type)
assert join_index.levels[0].equals(self.index.levels[0])
assert join_index.levels[1].equals(exp_level)
# pare down levels
mask = np.array(
[x[1] in exp_level for x in self.index], dtype=bool)
exp_values = self.index.values[mask]
tm.assert_numpy_array_equal(join_index.values, exp_values)
if join_type in ('outer', 'inner'):
join_index2, ridx2, lidx2 = \
self.index.join(other, how=join_type, level='second',
return_indexers=True)
assert join_index.equals(join_index2)
tm.assert_numpy_array_equal(lidx, lidx2)
tm.assert_numpy_array_equal(ridx, ridx2)
tm.assert_numpy_array_equal(join_index2.values, exp_values)
def test_join_level_corner_case(self):
# some corner cases
idx = Index(['three', 'one', 'two'])
result = idx.join(self.index, level='second')
assert isinstance(result, MultiIndex)
tm.assert_raises_regex(TypeError, "Join.*MultiIndex.*ambiguous",
self.index.join, self.index, level=1)
def test_join_self(self, join_type):
res = self.index
joined = res.join(res, how=join_type)
assert res is joined
def test_join_multi(self):
# GH 10665
midx = pd.MultiIndex.from_product(
[np.arange(4), np.arange(4)], names=['a', 'b'])
idx = pd.Index([1, 2, 5], name='b')
# inner
jidx, lidx, ridx = midx.join(idx, how='inner', return_indexers=True)
exp_idx = pd.MultiIndex.from_product(
[np.arange(4), [1, 2]], names=['a', 'b'])
exp_lidx = np.array([1, 2, 5, 6, 9, 10, 13, 14], dtype=np.intp)
exp_ridx = np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=np.intp)
tm.assert_index_equal(jidx, exp_idx)
tm.assert_numpy_array_equal(lidx, exp_lidx)
tm.assert_numpy_array_equal(ridx, exp_ridx)
# flip
jidx, ridx, lidx = idx.join(midx, how='inner', return_indexers=True)
tm.assert_index_equal(jidx, exp_idx)
tm.assert_numpy_array_equal(lidx, exp_lidx)
tm.assert_numpy_array_equal(ridx, exp_ridx)
# keep MultiIndex
jidx, lidx, ridx = midx.join(idx, how='left', return_indexers=True)
exp_ridx = np.array([-1, 0, 1, -1, -1, 0, 1, -1, -1, 0, 1, -1, -1, 0,
1, -1], dtype=np.intp)
tm.assert_index_equal(jidx, midx)
assert lidx is None
tm.assert_numpy_array_equal(ridx, exp_ridx)
# flip
jidx, ridx, lidx = idx.join(midx, how='right', return_indexers=True)
tm.assert_index_equal(jidx, midx)
assert lidx is None
tm.assert_numpy_array_equal(ridx, exp_ridx)
def test_reindex(self):
result, indexer = self.index.reindex(list(self.index[:4]))
assert isinstance(result, MultiIndex)
self.check_level_names(result, self.index[:4].names)
result, indexer = self.index.reindex(list(self.index))
assert isinstance(result, MultiIndex)
assert indexer is None
self.check_level_names(result, self.index.names)
def test_reindex_level(self):
idx = Index(['one'])
target, indexer = self.index.reindex(idx, level='second')
target2, indexer2 = idx.reindex(self.index, level='second')
exp_index = self.index.join(idx, level='second', how='right')
exp_index2 = self.index.join(idx, level='second', how='left')
assert target.equals(exp_index)
exp_indexer = np.array([0, 2, 4])
tm.assert_numpy_array_equal(indexer, exp_indexer, check_dtype=False)
assert target2.equals(exp_index2)
exp_indexer2 = np.array([0, -1, 0, -1, 0, -1])
tm.assert_numpy_array_equal(indexer2, exp_indexer2, check_dtype=False)
tm.assert_raises_regex(TypeError, "Fill method not supported",
self.index.reindex, self.index,
method='pad', level='second')
tm.assert_raises_regex(TypeError, "Fill method not supported",
idx.reindex, idx, method='bfill',
level='first')
def test_duplicates(self):
assert not self.index.has_duplicates
assert self.index.append(self.index).has_duplicates
index = MultiIndex(levels=[[0, 1], [0, 1, 2]], labels=[
[0, 0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 0, 1, 2]])
assert index.has_duplicates
# GH 9075
t = [(u('x'), u('out'), u('z'), 5, u('y'), u('in'), u('z'), 169),
(u('x'), u('out'), u('z'), 7, u('y'), u('in'), u('z'), 119),
(u('x'), u('out'), u('z'), 9, u('y'), u('in'), u('z'), 135),
(u('x'), u('out'), u('z'), 13, u('y'), u('in'), u('z'), 145),
(u('x'), u('out'), u('z'), 14, u('y'), u('in'), u('z'), 158),
(u('x'), u('out'), u('z'), 16, u('y'), u('in'), u('z'), 122),
(u('x'), | u('out') | pandas.compat.u |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
from numpy import nan
import numpy as np
import pandas as pd
from pandas.types.common import is_integer, is_scalar
from pandas import Index, Series, DataFrame, isnull, date_range
from pandas.core.index import MultiIndex
from pandas.core.indexing import IndexingError
from pandas.tseries.index import Timestamp
from pandas.tseries.offsets import BDay
from pandas.tseries.tdi import Timedelta
from pandas.compat import lrange, range
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tests.series.common import TestData
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
class TestSeriesIndexing(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_get(self):
# GH 6383
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56, 45,
51, 39, 55, 43, 54, 52, 51, 54]))
result = s.get(25, 0)
expected = 0
self.assertEqual(result, expected)
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56,
45, 51, 39, 55, 43, 54, 52, 51, 54]),
index=pd.Float64Index(
[25.0, 36.0, 49.0, 64.0, 81.0, 100.0,
121.0, 144.0, 169.0, 196.0, 1225.0,
1296.0, 1369.0, 1444.0, 1521.0, 1600.0,
1681.0, 1764.0, 1849.0, 1936.0],
dtype='object'))
result = s.get(25, 0)
expected = 43
self.assertEqual(result, expected)
# GH 7407
# with a boolean accessor
df = pd.DataFrame({'i': [0] * 3, 'b': [False] * 3})
vc = df.i.value_counts()
result = vc.get(99, default='Missing')
self.assertEqual(result, 'Missing')
vc = df.b.value_counts()
result = vc.get(False, default='Missing')
self.assertEqual(result, 3)
result = vc.get(True, default='Missing')
self.assertEqual(result, 'Missing')
def test_delitem(self):
# GH 5542
# should delete the item inplace
s = Series(lrange(5))
del s[0]
expected = Series(lrange(1, 5), index=lrange(1, 5))
assert_series_equal(s, expected)
del s[1]
expected = Series(lrange(2, 5), index=lrange(2, 5))
assert_series_equal(s, expected)
# empty
s = Series()
def f():
del s[0]
self.assertRaises(KeyError, f)
# only 1 left, del, add, del
s = Series(1)
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
s[0] = 1
assert_series_equal(s, Series(1))
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
# Index(dtype=object)
s = Series(1, index=['a'])
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
s['a'] = 1
assert_series_equal(s, Series(1, index=['a']))
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
def test_getitem_setitem_ellipsis(self):
s = Series(np.random.randn(10))
np.fix(s)
result = s[...]
assert_series_equal(result, s)
s[...] = 5
self.assertTrue((result == 5).all())
def test_getitem_negative_out_of_bounds(self):
s = Series(tm.rands_array(5, 10), index=tm.rands_array(10, 10))
self.assertRaises(IndexError, s.__getitem__, -11)
self.assertRaises(IndexError, s.__setitem__, -11, 'foo')
def test_pop(self):
# GH 6600
df = DataFrame({'A': 0, 'B': np.arange(5, dtype='int64'), 'C': 0, })
k = df.iloc[4]
result = k.pop('B')
self.assertEqual(result, 4)
expected = | Series([0, 0], index=['A', 'C'], name=4) | pandas.Series |
"""
Function and classes used to identify barcodes
"""
from typing import *
import pandas as pd
import numpy as np
import pickle
import logging
from sklearn.neighbors import NearestNeighbors
# from pynndescent import NNDescent
from pathlib import Path
from itertools import groupby
from pysmFISH.logger_utils import selected_logger
from pysmFISH.data_models import Output_models
from pysmFISH.errors import Registration_errors
class simplify_barcodes_reference():
"""Utility Class use to convert excels files with codebook info
in smaller size pandas dataframe/parquet files to pass to dask
workers during the processing. This utility function must be
run before running the experiment analysis. The pipeline
require the output of this function.
"""
def __init__(self, barcode_fpath: str):
"""Class initialization
Args:
barcode_fpath (str): Path to the xlsx file with the codebook
"""
self.barcode_fpath = Path(barcode_fpath)
self.barcode_fname = self.barcode_fpath.stem
@staticmethod
def format_codeword(codeword: str):
"""[summary]
Args:
codeword (str): codeword representing a gene
Returns:
byte: codeword converted in byte representation
"""
str_num = codeword.split('[')[-1].split(']')[0]
converted_codeword = np.array([int(el) for el in list(str_num)]).astype(np.int8)
converted_codeword = converted_codeword.tobytes()
return converted_codeword
def convert_codebook(self):
used_gene_codebook_df = pd.read_excel(self.barcode_fpath)
# used_gene_codebook_df = pd.read_parquet(self.barcode_fpath)
self.codebook_df = used_gene_codebook_df.loc[:,['Barcode','Gene']]
self.codebook_df.rename(columns = {'Barcode':'Code'}, inplace = True)
self.codebook_df.Code = self.codebook_df.Code.apply(lambda x: self.format_codeword(x))
self.codebook_df.to_parquet(self.barcode_fpath.parent / (self.barcode_fname + '.parquet'))
def dots_hoods(coords: np.ndarray,pxl: int)->np.ndarray:
"""Function that calculate the coords of the peaks searching
neighborhood for identifying the barcodes.
Args:
coords (np.ndarray): coords of the identified peaks
pxl (int): size of the neighborhood in pixel
Returns:
np.ndarray: coords that define the neighborhood (r_tl,r_br,c_tl,c_tr)
"""
r_tl = coords[:,0]-pxl
r_br = coords[:,0]+pxl
c_tl = coords[:,1]-pxl
c_tr = coords[:,1]+pxl
r_tl = r_tl[:,np.newaxis]
r_br = r_br[:,np.newaxis]
c_tl = c_tl[:,np.newaxis]
c_tr = c_tr[:,np.newaxis]
chunks_coords = np.hstack((r_tl,r_br,c_tl,c_tr))
chunks_coords = chunks_coords.astype(int)
return chunks_coords
def extract_dots_images(barcoded_df: pd.DataFrame,registered_img_stack: np.ndarray,
experiment_fpath: str, metadata: dict):
"""Function used to extract the images corresponding to a barcode
after running the decoding identification. It can save the images
but to avoid increasing too much the space occupied by a processed
experiment an array with the maximum intensity value of the pxl in
each round is calculated and saved
Args:
barcoded_df (pd.DataFrame): Dataframe with decoded barcodes
for a specific field of view.
registered_img_stack (np.ndarray): Preprocessed image of a single field of view
the imaging round correspond to the z-stack position
experiment_fpath (str): Path to the folder of the experiment to process
metadata (dict): Overall experiment info
"""
round_intensity_labels = ['bit_' + str(el) +'_intensity' for el in np.arange(1,int(metadata['total_rounds'])+1)]
if isinstance(registered_img_stack, np.ndarray) and (barcoded_df.shape[0] >1):
experiment_fpath = Path(experiment_fpath)
barcodes_names = barcoded_df['barcode_reference_dot_id'].values
coords = barcoded_df.loc[:, ['r_px_registered', 'c_px_registered']].to_numpy()
barcodes_extraction_resolution = barcoded_df['barcodes_extraction_resolution'].values[0]
chunks_coords = dots_hoods(coords,barcodes_extraction_resolution)
chunks_coords[chunks_coords<0]=0
chunks_coords[chunks_coords>registered_img_stack.shape[1]]= registered_img_stack.shape[1]
for idx in np.arange(chunks_coords.shape[0]):
selected_region = registered_img_stack[:,chunks_coords[idx,0]:chunks_coords[idx,1]+1,chunks_coords[idx,2]:chunks_coords[idx,3]+1]
if selected_region.size >0:
max_array = selected_region.max(axis=(1,2))
barcoded_df.loc[barcoded_df.dot_id == barcodes_names[idx],round_intensity_labels] = max_array
# for channel in channels:
# all_regions[channel] = {}
# all_max[channel] = {}
# img_stack = registered_img_stack[channel]
# trimmed_df_channel = trimmed_df.loc[trimmed_df.channel == channel]
# if trimmed_df_channel.shape[0] >0:
# barcodes_names = trimmed_df_channel['barcode_reference_dot_id'].values
# coords = trimmed_df_channel.loc[:, ['r_px_registered', 'c_px_registered']].to_numpy()
# barcodes_extraction_resolution = trimmed_df_channel['barcodes_extraction_resolution'].values[0]
# chunks_coords = dots_hoods(coords,barcodes_extraction_resolution)
# chunks_coords[chunks_coords<0]=0
# chunks_coords[chunks_coords>img_stack.shape[1]]= img_stack.shape[1]
# for idx in np.arange(chunks_coords.shape[0]):
# selected_region = img_stack[:,chunks_coords[idx,0]:chunks_coords[idx,1]+1,chunks_coords[idx,2]:chunks_coords[idx,3]+1]
# if selected_region.size >0:
# max_array = selected_region.max(axis=(1,2))
# # all_regions[channel][barcodes_names[idx]]= selected_region
# all_max[channel][barcodes_names[idx]]= max_array
# barcoded_df.loc[barcoded_df.dot_id == barcodes_names[idx],round_intensity_labels] = max_array
# fpath = experiment_fpath / 'tmp' / 'combined_rounds_images' / (experiment_name + '_' + channel + '_img_dict_fov_' + str(fov) + '.pkl')
# pickle.dump(all_regions,open(fpath,'wb'))
# fpath = experiment_fpath / 'results' / (experiment_name + '_barcodes_max_array_dict_fov_' + str(fov) + '.pkl')
# pickle.dump(all_max,open(fpath,'wb'))
else:
barcoded_df.loc[:,round_intensity_labels] = np.nan
return barcoded_df
def identify_flipped_bits(codebook: pd.DataFrame, gene: str,
raw_barcode: ByteString)-> Tuple[ByteString, ByteString]:
"""Utility function used to identify the position of the bits that are
flipped after the nearest neighbors and the definition of the
acceptable hamming distance for a single dot.
Args:
codebook (pd.DataFrame): Codebook used for the decoding
gene (str): Name of the gene identified
raw_barcode (ByteString): identifide barcode from the images
Returns:
Tuple[ByteString, ByteString]: (flipped_position, flipping_direction)
"""
gene_barcode_str =codebook.loc[codebook.Gene == gene, 'Code'].values[0]
gene_barcode = np.frombuffer(gene_barcode_str, np.int8)
raw_barcode = np.frombuffer(raw_barcode, np.int8)
flipped_positions = np.where(raw_barcode != gene_barcode)[0].astype(np.int8)
flipping_directions = (gene_barcode[flipped_positions] - raw_barcode[flipped_positions]).astype(np.int8)
# flipped_positions = flipped_positions.tobytes()
# flipping_directions = flipping_directions.tobytes()
return flipped_positions,flipping_directions
def define_flip_direction(codebook_dict: dict,experiment_fpath: str,
output_df: pd.DataFrame):
"""Function used to determinethe the position of the bits that are
flipped after the nearest neighbors and the definition of the
acceptable hamming distance for fov.
Args:
codebook (dict): Codebooks used for the decoding
experiment_fpath (str): Path to the folder of the experiment to process
output_df (pd.DataFrame): Dataframe with the decoded results for
the specific fov.
"""
if output_df.shape[0] > 1:
correct_hamming_distance = 0
selected_hamming_distance = 3 / output_df.iloc[0].barcode_length
experiment_fpath = Path(experiment_fpath)
experiment_name = experiment_fpath.stem
channels = codebook_dict.keys()
all_evaluated = []
for channel in channels:
codebook = codebook_dict[channel]
fov = output_df.fov_num.values[0]
trimmed_df = output_df.loc[(output_df.dot_id == output_df.barcode_reference_dot_id) &
(output_df.channel == channel) &
(output_df['hamming_distance'] > correct_hamming_distance) &
(output_df['hamming_distance'] < selected_hamming_distance),
['barcode_reference_dot_id', 'decoded_genes', 'raw_barcodes','hamming_distance']]
trimmed_df = trimmed_df.dropna(subset=['decoded_genes'])
trimmed_df.loc[:,('flip_and_direction')] = trimmed_df.apply(lambda x: identify_flipped_bits(codebook,x.decoded_genes,x.raw_barcodes),axis=1)
trimmed_df['flip_position'] = trimmed_df['flip_and_direction'].apply(lambda x: x[0])
trimmed_df['flip_direction'] = trimmed_df['flip_and_direction'].apply(lambda x: x[1])
trimmed_df.drop(columns=['flip_and_direction'],inplace=True)
all_evaluated.append(trimmed_df)
all_evaluated = pd.concat(all_evaluated,axis=0,ignore_index=True,inplace=True)
fpath = experiment_fpath / 'results' / (experiment_name + '_' + channel + '_df_flip_direction_fov' + str(fov) + '.parquet')
all_evaluated.to_parquet(fpath)
# return trimmed_df
def chunk_dfs(dataframes_list: list, chunk_size: int):
"""
Functions modified from
https://stackoverflow.com/questions/45217120/how-to-efficiently-join-merge-concatenate-large-data-frame-in-pandas
yields n dataframes at a time where n == chunksize
"""
dfs = []
for f in dataframes_list:
dfs.append(f)
if len(dfs) == chunk_size:
yield dfs
dfs = []
if dfs:
yield dfs
def merge_with_concat(dfs: list)->pd.DataFrame:
"""Utility function used to merge dataframes
Args:
dsf (list): List with the dataframe to merge
Returns:
pd.DataFrame: Merged dataframe
"""
# dfs = (df.set_index(col, drop=True) for df in dfs)
merged = pd.concat(dfs, axis=0, join='outer', copy=False)
return merged
"""
Class used to extract the barcodes from the registered
counts using nearest neighbour
Parameters:
-----------
counts: pandas.DataFrame
pandas file with the fov counts after
registration
analysis_parameters: dict
parameters for data processing
codebook_df: pandas.DataFrame
pandas file with the codebook used to
deconvolve the barcode
NB: if there is a problem with the registration the barcode assigned
will be 0*barcode_length
"""
def extract_barcodes_NN_fast_multicolor(registered_counts_df: pd.DataFrame, analysis_parameters: Dict,
codebook_df: pd.DataFrame, metadata:dict)-> Tuple[pd.DataFrame,pd.DataFrame]:
"""Function used to extract the barcodes from the registered
counts using nearest neighbour. if there is a problem with the registration the barcode assigned
will be 0*barcode_length
Args:
registered_counts_df (pd.Dataframe): Fov counts after registration
analysis_parameters (Dict): Parameters for data processing
codebook_df (pd.DataFrame): codebook used to deconvolve the barcode
Returns:
Tuple[pd.DataFrame,pd.DataFrame]: (barcoded_round, all_decoded_dots_df)
"""
logger = selected_logger()
barcodes_extraction_resolution = analysis_parameters['BarcodesExtractionResolution']
RegistrationMinMatchingBeads = analysis_parameters['RegistrationMinMatchingBeads']
barcode_length = metadata['barcode_length']
registration_errors = Registration_errors()
stitching_channel = metadata['stitching_channel']
registered_counts_df.dropna(subset=['dot_id'],inplace=True)
# Starting level for selection of dots
dropping_counts = registered_counts_df.copy(deep=True)
all_decoded_dots_list = []
barcoded_round = []
if registered_counts_df['r_px_registered'].isnull().values.any():
all_decoded_dots_df = pd.DataFrame(columns = registered_counts_df.columns)
all_decoded_dots_df['decoded_genes'] = np.nan
all_decoded_dots_df['hamming_distance'] = np.nan
all_decoded_dots_df['number_positive_bits'] = np.nan
all_decoded_dots_df['barcode_reference_dot_id'] = np.nan
all_decoded_dots_df['raw_barcodes'] = np.nan
all_decoded_dots_df['barcodes_extraction_resolution'] = barcodes_extraction_resolution
# Save barcoded_round and all_decoded_dots_df
return registered_counts_df, all_decoded_dots_df
else:
for ref_round_number in np.arange(1,barcode_length+1):
#ref_round_number = 1
reference_round_df = dropping_counts.loc[dropping_counts.round_num == ref_round_number,:]
# Step one (all dots not in round 1)
compare_df = dropping_counts.loc[dropping_counts.round_num!=ref_round_number,:]
if (not reference_round_df.empty):
if not compare_df.empty:
nn = NearestNeighbors(n_neighbors=1, metric="euclidean")
nn.fit(reference_round_df[['r_px_registered','c_px_registered']])
dists, indices = nn.kneighbors(compare_df[['r_px_registered','c_px_registered']], return_distance=True)
# select only the nn that are below barcodes_extraction_resolution distance
idx_distances_below_resolution = np.where(dists <= barcodes_extraction_resolution)[0]
comp_idx = idx_distances_below_resolution
ref_idx = indices[comp_idx].flatten()
# Subset the dataframe according to the selected points
# The reference selected will have repeated points
comp_selected_df = compare_df.iloc[comp_idx]
ref_selected_df = reference_round_df.iloc[ref_idx]
# The size of ref_selected_df w/o duplicates may be smaller of reference_round_df if
# some of the dots in reference_round_df have no neighbours
# Test approach where we get rid of the single dots
comp_selected_df.loc[:,'barcode_reference_dot_id'] = ref_selected_df['dot_id'].values
ref_selected_df_no_duplicates = ref_selected_df.drop_duplicates()
ref_selected_df_no_duplicates.loc[:,'barcode_reference_dot_id'] = ref_selected_df_no_duplicates['dot_id'].values
# Collect singletons
# Remeber that this method works only because there are no duplicates inside the dataframes
# https://stackoverflow.com/questions/48647534/python-pandas-find-difference-between-two-data-frames
if reference_round_df.shape[0] > ref_selected_df_no_duplicates.shape[0]:
singletons_df = pd.concat([reference_round_df,ref_selected_df_no_duplicates]).drop_duplicates(keep=False)
singletons_df.loc[:,'barcode_reference_dot_id'] = singletons_df['dot_id'].values
barcoded_round = pd.concat([comp_selected_df, ref_selected_df_no_duplicates,singletons_df], axis=0,ignore_index=False)
else:
barcoded_round = pd.concat([comp_selected_df, ref_selected_df_no_duplicates], axis=0,ignore_index=False)
# barcoded_round = pd.concat([comp_selected_df, ref_selected_df_no_duplicates,singletons_df], axis=0,ignore_index=False)
barcoded_round_grouped = barcoded_round.groupby('barcode_reference_dot_id')
compare_df = compare_df.drop(comp_selected_df.index)
dropping_counts = compare_df
else:
# Collecting singleton of last bit
reference_round_df.loc[:,'barcode_reference_dot_id'] = reference_round_df['dot_id'].values
barcoded_round_grouped = reference_round_df.groupby('barcode_reference_dot_id')
ref_selected_df_no_duplicates = reference_round_df
for brdi, grp in barcoded_round_grouped:
barcode = np.zeros([barcode_length],dtype=np.int8)
barcode[grp.round_num.values.astype(np.int8)-1] = 1
#hamming_dist, index_gene = nn_sklearn.kneighbors(barcode.reshape(1, -1), return_distance=True)
#gene= codebook_df.loc[index_gene.reshape(index_gene.shape[0]),'Gene'].tolist()
barcode = barcode.tostring()
if len(ref_selected_df_no_duplicates) != 0:
ref_selected_df_no_duplicates.loc[ref_selected_df_no_duplicates.barcode_reference_dot_id == brdi,'raw_barcodes'] = barcode
#ref_selected_df_no_duplicates.loc[ref_selected_df_no_duplicates.barcode_reference_dot_id == brdi,'decoded_gene_name'] = gene
#ref_selected_df_no_duplicates.loc[ref_selected_df_no_duplicates.barcode_reference_dot_id == brdi,'hamming_distance'] = hamming_dist.flatten()[0]
#fish_counts.loc[grp.index,'barcode_reference_dot_id'] = brdi
#fish_counts.loc[grp.index,'raw_barcodes'] = barcode
#dists, index = nn_sklearn.kneighbors(all_barcodes, return_distance=True)
all_decoded_dots_list.append(ref_selected_df_no_duplicates)
if all_decoded_dots_list:
all_decoded_dots_df = pd.concat(all_decoded_dots_list,ignore_index=False)
codebook_df = convert_str_codebook(codebook_df,'Code')
codebook_array = make_codebook_array(codebook_df,'Code')
nn_sklearn = NearestNeighbors(n_neighbors=1, metric="hamming")
nn_sklearn.fit(codebook_array)
all_barcodes = np.vstack(all_decoded_dots_df.raw_barcodes.map(lambda x: np.frombuffer(x, np.int8)).values)
dists_arr, index_arr = nn_sklearn.kneighbors(all_barcodes, return_distance=True)
genes=codebook_df.loc[index_arr.reshape(index_arr.shape[0]),'Gene'].tolist()
all_decoded_dots_df.loc[:,'decoded_genes'] = genes
all_decoded_dots_df.loc[:,'hamming_distance'] = dists_arr
all_decoded_dots_df.loc[:,'number_positive_bits'] = all_barcodes.sum(axis=1)
all_decoded_dots_df['barcodes_extraction_resolution'] = barcodes_extraction_resolution
else:
all_decoded_dots_df = pd.DataFrame(columns = registered_counts_df.columns)
all_decoded_dots_df['decoded_genes'] = np.nan
all_decoded_dots_df['hamming_distance'] = np.nan
all_decoded_dots_df['number_positive_bits'] = np.nan
all_decoded_dots_df['barcode_reference_dot_id'] = np.nan
all_decoded_dots_df['raw_barcodes'] = np.nan
all_decoded_dots_df['barcodes_extraction_resolution'] = barcodes_extraction_resolution
# Save barcoded_round and all_decoded_dots_df
return barcoded_round, all_decoded_dots_df
# TODO Remove all the functions below
######## -------------------------------------------------------------------
class extract_barcodes_NN():
"""
Class used to extract the barcodes from the registered
counts using nearest neighbour
Parameters:
-----------
counts: pandas.DataFrame
pandas file with the fov counts after
registration
analysis_parameters: dict
parameters for data processing
experiment_config: Dict
dictionary with the experimental data
codebook_df: pandas.DataFrame
pandas file with the codebook used to
deconvolve the barcode
NB: if there is a problem with the registration the barcode assigned
will be 0*barcode_length
"""
def __init__(self, counts, analysis_parameters:Dict,experiment_config:Dict,codebook_df,file_tags,status:str):
self.barcodes_extraction_resolution = analysis_parameters['BarcodesExtractionResolution']
self.RegistrationMinMatchingBeads = analysis_parameters['RegistrationMinMatchingBeads']
self.barcode_length = experiment_config['Barcode_length']
self.counts = counts
self.logger = selected_logger()
self.codebook_df = codebook_df
self.file_tags = file_tags
self.status = status
self.registration_errors = Registration_errors()
@staticmethod
def barcode_nn(counts_df, ref_round_number, barcodes_extraction_resolution):
column_names = list(counts_df.columns.values)
column_names = column_names.append('barcode_reference_dot_id')
barcoded_df = pd.DataFrame(columns=column_names)
reference_array = counts_df.loc[counts_df.round_num == ref_round_number, ['r_px_registered','c_px_registered']].to_numpy()
reference_round_df = counts_df.loc[counts_df.round_num == ref_round_number,:].reset_index(drop=True)
# Step one (all dots not in round 1)
coords_compare = counts_df.loc[counts_df.round_num != ref_round_number, ['r_px_registered','c_px_registered']].to_numpy()
compare_df = counts_df.loc[counts_df.round_num != ref_round_number,:].reset_index(drop=True)
if (reference_array.shape[0] >0) and (coords_compare.shape[0] >0):
# initialize network
nn = NearestNeighbors(n_neighbors=1, metric="euclidean")
nn.fit(reference_array)
# Get the nn
dists, indices = nn.kneighbors(coords_compare, return_distance=True)
# select only the nn that are below barcodes_extraction_resolution distance
idx_selected_coords_compare = np.where(dists <= barcodes_extraction_resolution)[0]
compare_selected_df = compare_df.loc[idx_selected_coords_compare,:]
compare_selected_df['barcode_reference_dot_id'] = np.nan
# ref_idx = indices[idx_selected_coords_compare]
# compare_selected_df.loc[compare_selected_df.index.isin(idx_selected_coords_compare),'barcode_reference_dot_id'] = reference_round_df.loc[ref_idx,'dot_id'].values[0]
for idx in idx_selected_coords_compare:
ref_idx = indices[idx]
compare_selected_df.loc[idx,'barcode_reference_dot_id'] = reference_round_df.loc[ref_idx,'dot_id'].values[0]
reference_round_df['barcode_reference_dot_id'] = reference_round_df.dot_id
barcoded_df = barcoded_df.append([compare_selected_df, reference_round_df], ignore_index=True)
compare_df = compare_df.drop(compare_selected_df.index)
compare_df = compare_df.reset_index(drop=True)
return compare_df, barcoded_df
@staticmethod
def convert_str_codebook(codebook_df,column_name):
codebook_df[column_name] = codebook_df[column_name].map(lambda x: np.frombuffer(x, np.int8))
return codebook_df
@staticmethod
def make_codebook_array(codebook_df,column_name):
codebook_array = np.zeros((len(codebook_df[column_name]),codebook_df[column_name][0].shape[0]))
for idx, el in enumerate(codebook_df[column_name]):
row = codebook_df[column_name][idx]
row = row[np.newaxis,:]
codebook_array[idx,:] = row
return codebook_array
def run_extraction(self):
data_models = Output_models()
registration_errors = Registration_errors()
fov = self.file_tags['fov']
channel = self.file_tags['channel']
self.barcoded_fov_df = data_models.barcode_analysis_df
self.barcoded_fov_df.attrs = self.counts.attrs
if self.status == 'FAILED':
error = self.counts['min_number_matching_dots_registration'].values[0]
round_num = self.counts['round_num'].values[0]
self.barcoded_fov_df = self.barcoded_fov_df.append({'min_number_matching_dots_registration':error,
'fov_num':int(fov),'dot_channel':channel,'round_num': round_num },ignore_index=True)
elif self.status == 'SUCCESS':
if (min(self.counts.loc[:,'min_number_matching_dots_registration']) < self.RegistrationMinMatchingBeads):
round_num = self.counts['round_num'].values[0]
self.barcoded_fov_df = self.barcoded_fov_df.append({'min_number_matching_dots_registration':registration_errors.registration_below_extraction_resolution,
'fov_num':int(fov),'dot_channel':channel,'round_num': round_num},ignore_index=True)
self.status = 'FAILED'
else:
hd_2 = 2 / self.barcode_length
hd_3 = 3 / self.barcode_length
# barcode_length = len(self.counts['round_num'].unique())
rounds = np.arange(1,self.barcode_length+1)
self.codebook_df = self.convert_str_codebook(self.codebook_df,'Code')
codebook_array = self.make_codebook_array(self.codebook_df,'Code')
nn_sklearn = NearestNeighbors(n_neighbors=1, metric="hamming")
nn_sklearn.fit(codebook_array)
# remove points with np.NAN
# self.counts = self.counts.dropna()
for round_num in rounds:
compare_df, barcoded_df = self.barcode_nn(self.counts, round_num, self.barcodes_extraction_resolution)
self.barcoded_fov_df = self.barcoded_fov_df.append(barcoded_df, ignore_index=True)
self.counts = compare_df
self.counts['barcode_reference_dot_id'] = self.counts.dot_id
self.barcoded_fov_df = self.barcoded_fov_df.append(self.counts, ignore_index=True)
self.barcoded_fov_df['barcodes_extraction_resolution'] = self.barcodes_extraction_resolution
self.grpd = self.barcoded_fov_df.groupby('barcode_reference_dot_id')
# self.all_barcodes = {}
# for name, group in self.grpd:
# rounds_num = group.round_num.values
# dot_ids = group.dot_id.values
# rounds_num = rounds_num.astype(int)
# barcode = np.zeros([self.barcode_length],dtype=np.int8)
# barcode[(rounds_num-1)] += 1
# dists_arr, index_arr = nn_sklearn.kneighbors(barcode.reshape(1, -1), return_distance=True)
# gene=self.codebook_df.loc[index_arr.reshape(index_arr.shape[0]),'Gene'].tolist()[0]
# self.barcoded_fov_df.loc[self.barcoded_fov_df.barcode_reference_dot_id == name,'raw_barcodes'] = barcode.tostring()
# self.barcoded_fov_df.loc[self.barcoded_fov_df.barcode_reference_dot_id == name,'all_Hdistance_genes'] = gene
# if dists_arr[0][0] == 0:
# self.barcoded_fov_df.loc[self.barcoded_fov_df.barcode_reference_dot_id == name,'0Hdistance_genes'] = gene
# elif dists_arr[0][0] < hd_2:
# self.barcoded_fov_df.loc[self.barcoded_fov_df.barcode_reference_dot_id == name,'below2Hdistance_genes'] = gene
# elif dists_arr[0][0] < hd_3:
# self.barcoded_fov_df.loc[self.barcoded_fov_df.barcode_reference_dot_id == name,'below3Hdistance_genes'] = gene
barcode_reference_dot_id_list = []
num_unique_dots = np.unique(self.barcoded_fov_df.loc[:,'barcode_reference_dot_id']).shape[0]
# There are no dots is the df
if num_unique_dots > 0:
all_barcodes = np.zeros([num_unique_dots,self.barcode_length],dtype=np.int8)
for idx, (name, group) in enumerate(self.grpd):
barcode_reference_dot_id_list.append(name)
barcode = np.zeros([self.barcode_length],dtype=np.int8)
rounds_num = group.round_num.values
rounds_num = rounds_num.astype(int)
barcode[(rounds_num-1)] += 1
all_barcodes[idx,:] = barcode
dists_arr, index_arr = nn_sklearn.kneighbors(all_barcodes, return_distance=True)
genes=self.codebook_df.loc[index_arr.reshape(index_arr.shape[0]),'Gene'].tolist()
for idx,name in enumerate(barcode_reference_dot_id_list):
barcode = all_barcodes[idx,:]
gene = genes[idx]
hd = dists_arr[idx][0]
cols = ['raw_barcodes','all_Hdistance_genes','number_positive_bits','hamming_distance'] # will add last column depending on hd
writing_data = [barcode.tostring(),gene,barcode.sum(),hd]
if hd == 0:
cols = cols + ['zeroHdistance_genes']
writing_data = writing_data + [gene]
if hd < hd_2:
cols = cols + ['below2Hdistance_genes']
writing_data = writing_data + [gene]
if hd < hd_3:
cols = cols + ['below3Hdistance_genes']
writing_data = writing_data + [gene]
self.barcoded_fov_df.loc[self.barcoded_fov_df.barcode_reference_dot_id == name,cols] = writing_data
# self.barcoded_fov_df.loc[self.barcoded_fov_df.barcode_reference_dot_id == name,'raw_barcodes'] = barcode.tostring()
# self.barcoded_fov_df.loc[self.barcoded_fov_df.barcode_reference_dot_id == name,'all_Hdistance_genes'] = gene
# self.barcoded_fov_df.loc[self.barcoded_fov_df.barcode_reference_dot_id == name,'number_positive_bits'] = barcode.sum()
# self.barcoded_fov_df.loc[self.barcoded_fov_df.barcode_reference_dot_id == name,'hamming_distance'] = hd
# if hd == 0:
# self.barcoded_fov_df.loc[self.barcoded_fov_df.barcode_reference_dot_id == name,'0Hdistance_genes'] = gene
# elif hd < hd_2:
# self.barcoded_fov_df.loc[self.barcoded_fov_df.barcode_reference_dot_id == name,'below2Hdistance_genes'] = gene
# elif hd < hd_3:
# self.barcoded_fov_df.loc[self.barcoded_fov_df.barcode_reference_dot_id == name,'below3Hdistance_genes'] = gene
fname = self.file_tags['experiment_fpath'] / 'tmp' / 'registered_counts' / (self.file_tags['experiment_name'] + '_' + self.file_tags['channel'] + '_decoded_fov_' + self.file_tags['fov'] + '.parquet')
self.barcoded_fov_df.to_parquet(fname,index=False)
class extract_barcodes_NN_test():
"""
Class used to extract the barcodes from the registered
counts using nearest neighbour
Parameters:
-----------
counts: pandas.DataFrame
pandas file with the fov counts after
registration
analysis_parameters: dict
parameters for data processing
experiment_config: Dict
dictionary with the experimental data
codebook_df: pandas.DataFrame
pandas file with the codebook used to
deconvolve the barcode
NB: if there is a problem with the registration the barcode assigned
will be 0*barcode_length
"""
def __init__(self, fov, channel, counts, analysis_parameters:Dict,experiment_config:Dict,codebook_df,status:str):
self.barcodes_extraction_resolution = analysis_parameters['BarcodesExtractionResolution']
self.RegistrationMinMatchingBeads = analysis_parameters['RegistrationMinMatchingBeads']
self.barcode_length = experiment_config['Barcode_length']
self.fov = fov
self.channel = channel
self.counts = counts
self.logger = selected_logger()
self.codebook_df = codebook_df
self.status = status
self.registration_errors = Registration_errors()
@staticmethod
def barcode_nn(counts_df, ref_round_number, barcodes_extraction_resolution):
column_names = list(counts_df.columns.values)
column_names = column_names.append('barcode_reference_dot_id')
barcoded_df = pd.DataFrame(columns=column_names)
reference_array = counts_df.loc[counts_df.round_num == ref_round_number, ['r_px_registered','c_px_registered']].to_numpy()
reference_round_df = counts_df.loc[counts_df.round_num == ref_round_number,:].reset_index(drop=True)
# Step one (all dots not in round 1)
coords_compare = counts_df.loc[counts_df.round_num != ref_round_number, ['r_px_registered','c_px_registered']].to_numpy()
compare_df = counts_df.loc[counts_df.round_num != ref_round_number,:].reset_index(drop=True)
if (reference_array.shape[0] >0) and (coords_compare.shape[0] >0):
# initialize network
nn = NearestNeighbors(n_neighbors=1, metric="euclidean")
nn.fit(reference_array)
# Get the nn
dists, indices = nn.kneighbors(coords_compare, return_distance=True)
# select only the nn that are below barcodes_extraction_resolution distance
idx_selected_coords_compare = np.where(dists <= barcodes_extraction_resolution)[0]
compare_selected_df = compare_df.loc[idx_selected_coords_compare,:]
compare_selected_df['barcode_reference_dot_id'] = np.nan
# ref_idx = indices[idx_selected_coords_compare]
# compare_selected_df.loc[compare_selected_df.index.isin(idx_selected_coords_compare),'barcode_reference_dot_id'] = reference_round_df.loc[ref_idx,'dot_id'].values[0]
for idx in idx_selected_coords_compare:
ref_idx = indices[idx]
compare_selected_df.loc[idx,'barcode_reference_dot_id'] = reference_round_df.loc[ref_idx,'dot_id'].values[0]
reference_round_df['barcode_reference_dot_id'] = reference_round_df.dot_id
barcoded_df = barcoded_df.append([compare_selected_df, reference_round_df], ignore_index=True)
compare_df = compare_df.drop(compare_selected_df.index)
compare_df = compare_df.reset_index(drop=True)
return compare_df, barcoded_df
@staticmethod
def convert_str_codebook(codebook_df,column_name):
codebook_df[column_name] = codebook_df[column_name].map(lambda x: np.frombuffer(x, np.int8))
return codebook_df
@staticmethod
def make_codebook_array(codebook_df,column_name):
codebook_array = np.zeros((len(codebook_df[column_name]),codebook_df[column_name][0].shape[0]))
for idx, el in enumerate(codebook_df[column_name]):
row = codebook_df[column_name][idx]
row = row[np.newaxis,:]
codebook_array[idx,:] = row
return codebook_array
def run_extraction(self):
data_models = Output_models()
registration_errors = Registration_errors()
self.barcoded_fov_df = data_models.barcode_analysis_df
self.barcoded_fov_df.attrs = self.counts.attrs
if self.status == 'FAILED':
error = self.counts['min_number_matching_dots_registration'].values[0]
round_num = self.counts['round_num'].values[0]
self.barcoded_fov_df = self.barcoded_fov_df.append({'min_number_matching_dots_registration':error,
'fov_num':int(self.fov),'dot_channel':self.channel,'round_num': round_num },ignore_index=True)
elif self.status == 'SUCCESS':
if (min(self.counts.loc[:,'min_number_matching_dots_registration']) < self.RegistrationMinMatchingBeads):
round_num = self.counts['round_num'].values[0]
self.barcoded_fov_df = self.barcoded_fov_df.append({'min_number_matching_dots_registration':registration_errors.registration_below_extraction_resolution,
'fov_num':int(self.fov),'dot_channel':self.channel,'round_num': round_num},ignore_index=True)
self.status = 'FAILED'
else:
hd_2 = 2 / self.barcode_length
hd_3 = 3 / self.barcode_length
# barcode_length = len(self.counts['round_num'].unique())
rounds = np.arange(1,self.barcode_length+1)
self.codebook_df = self.convert_str_codebook(self.codebook_df,'Code')
codebook_array = self.make_codebook_array(self.codebook_df,'Code')
nn_sklearn = NearestNeighbors(n_neighbors=1, metric="hamming")
nn_sklearn.fit(codebook_array)
# remove points with np.NAN
# self.counts = self.counts.dropna()
for round_num in rounds:
compare_df, barcoded_df = self.barcode_nn(self.counts, round_num, self.barcodes_extraction_resolution)
self.barcoded_fov_df = self.barcoded_fov_df.append(barcoded_df, ignore_index=True)
self.counts = compare_df
self.counts['barcode_reference_dot_id'] = self.counts.dot_id
self.barcoded_fov_df = self.barcoded_fov_df.append(self.counts, ignore_index=True)
self.barcoded_fov_df['barcodes_extraction_resolution'] = self.barcodes_extraction_resolution
self.grpd = self.barcoded_fov_df.groupby('barcode_reference_dot_id')
barcode_reference_dot_id_list = []
num_unique_dots = np.unique(self.barcoded_fov_df.loc[:,'barcode_reference_dot_id']).shape[0]
# There are no dots is the df
if num_unique_dots > 0:
all_barcodes = np.zeros([num_unique_dots,self.barcode_length],dtype=np.int8)
for idx, (name, group) in enumerate(self.grpd):
barcode_reference_dot_id_list.append(name)
barcode = np.zeros([self.barcode_length],dtype=np.int8)
rounds_num = group.round_num.values
rounds_num = rounds_num.astype(int)
barcode[(rounds_num-1)] += 1
all_barcodes[idx,:] = barcode
dists_arr, index_arr = nn_sklearn.kneighbors(all_barcodes, return_distance=True)
genes=self.codebook_df.loc[index_arr.reshape(index_arr.shape[0]),'Gene'].tolist()
for idx,name in enumerate(barcode_reference_dot_id_list):
barcode = all_barcodes[idx,:]
gene = genes[idx]
hd = dists_arr[idx][0]
cols = ['raw_barcodes','all_Hdistance_genes','number_positive_bits','hamming_distance'] # will add last column depending on hd
writing_data = [barcode.tostring(),gene,barcode.sum(),hd]
if hd == 0:
cols = cols + ['zeroHdistance_genes']
writing_data = writing_data + [gene]
if hd < hd_2:
cols = cols + ['below2Hdistance_genes']
writing_data = writing_data + [gene]
if hd < hd_3:
cols = cols + ['below3Hdistance_genes']
writing_data = writing_data + [gene]
self.barcoded_fov_df.loc[self.barcoded_fov_df.barcode_reference_dot_id == name,cols] = writing_data
class extract_barcodes_NN_new():
"""
Class used to extract the barcodes from the registered
counts using nearest neighbour
Parameters:
-----------
counts: pandas.DataFrame
pandas file with the fov counts after
registration
analysis_parameters: dict
parameters for data processing
experiment_config: Dict
dictionary with the experimental data
codebook_df: pandas.DataFrame
pandas file with the codebook used to
deconvolve the barcode
NB: if there is a problem with the registration the barcode assigned
will be 0*barcode_length
"""
def __init__(self, registered_counts, analysis_parameters:Dict,experiment_config:Dict,codebook_df):
self.counts_df = registered_counts
self.analysis_parameters = analysis_parameters
self.experiment_config = experiment_config
self.codebook_df = codebook_df
self.logger = selected_logger()
self.barcodes_extraction_resolution = analysis_parameters['BarcodesExtractionResolution']
self.RegistrationMinMatchingBeads = analysis_parameters['RegistrationMinMatchingBeads']
self.barcode_length = self.counts_df.loc[0]['barcode_length']
self.registration_errors = Registration_errors()
self.stitching_channel = self.counts_df['stitching_channel'].iloc[0]
@staticmethod
def convert_str_codebook(codebook_df,column_name):
codebook_df[column_name] = codebook_df[column_name].map(lambda x: np.frombuffer(x, np.int8))
return codebook_df
@staticmethod
def make_codebook_array(codebook_df,column_name):
codebook_array = np.zeros((len(codebook_df[column_name]),codebook_df[column_name][0].shape[0]))
for idx, el in enumerate(codebook_df[column_name]):
row = codebook_df[column_name][idx]
row = row[np.newaxis,:]
codebook_array[idx,:] = row
return codebook_array
@staticmethod
def barcode_nn(counts_df, ref_round_number, barcodes_extraction_resolution):
column_names = list(counts_df.columns.values)
column_names = column_names.append('barcode_reference_dot_id')
barcoded_df = pd.DataFrame(columns=column_names)
reference_array = counts_df.loc[counts_df.round_num == ref_round_number, ['r_px_registered','c_px_registered']].to_numpy()
reference_round_df = counts_df.loc[counts_df.round_num == ref_round_number,:].reset_index(drop=True)
# Step one (all dots not in round 1)
coords_compare = counts_df.loc[counts_df.round_num != ref_round_number, ['r_px_registered','c_px_registered']].to_numpy()
compare_df = counts_df.loc[counts_df.round_num != ref_round_number,:].reset_index(drop=True)
if (reference_array.shape[0] >0) and (coords_compare.shape[0] >0):
# initialize network
nn = NearestNeighbors(n_neighbors=1, metric="euclidean")
nn.fit(reference_array)
# Get the nn
dists, indices = nn.kneighbors(coords_compare, return_distance=True)
# select only the nn that are below barcodes_extraction_resolution distance
idx_selected_coords_compare = np.where(dists <= barcodes_extraction_resolution)[0]
compare_selected_df = compare_df.loc[idx_selected_coords_compare,:]
compare_selected_df['barcode_reference_dot_id'] = np.nan
for k,v in groupby(idx_selected_coords_compare):
if len(list(v)) > 3:
print("key: '{}'--> group: {}".format(k, len(list(v))))
# ref_idx = indices[idx_selected_coords_compare].squeeze()
# compare_selected_df.loc[compare_selected_df.index.isin(idx_selected_coords_compare),'barcode_reference_dot_id'] = reference_round_df.loc[ref_idx,'dot_id'].values[0]
for idx in idx_selected_coords_compare:
ref_idx = indices[idx]
compare_selected_df.loc[idx,'barcode_reference_dot_id'] = reference_round_df.loc[ref_idx,'dot_id'].values[0]
reference_round_df['barcode_reference_dot_id'] = reference_round_df.dot_id
barcoded_df = barcoded_df.append([compare_selected_df, reference_round_df], ignore_index=True)
compare_df = compare_df.drop(compare_selected_df.index)
compare_df = compare_df.reset_index(drop=True)
return compare_df, barcoded_df
def run_extraction(self):
data_models = Output_models()
registration_errors = Registration_errors()
self.barcoded_spec = data_models.barcode_analysis_df
if not self.counts_df[self.counts_df['dot_id'].isnull()].empty:
print('shitty FOV')
self.all_combine_df = pd.concat([self.counts_df,self.barcoded_spec],axis=1)
elif (min(self.counts_df['min_number_matching_dots_registration']) < self.RegistrationMinMatchingBeads):
self.counts_df['min_number_matching_dots_registration'] = registration_errors.registration_below_extraction_resolution
self.all_combine_df = pd.concat([self.counts_df,self.barcoded_spec],axis=1)
else:
self.counts_df = | pd.concat([self.counts_df,self.barcoded_spec],axis=1) | pandas.concat |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
##-------- [PPC] Jobshop Scheduling ---------
# * Author: <NAME>
# * Date: Apr 30th, 2020
# * Description:
# Using the event-driven scheuling method
# to solve the JSS prob. Here is a sample
# code with the style of OOP. Feel free to
# modify it as you like.
##--------------------------------------------
#
import os
import numpy as np
import pandas as pd
from gantt_plot import Gantt
#entity
class Order:
def __init__(self, ID, AT, DD, routing, PT):
self.ID = ID
self.AT = AT #AT: arrival time
self.DD = DD #DD: due date
self.PT = PT #PT: processing time
self.routing = routing
self.progress = 0
#resource in factory
class Source:
def __init__(self, order_info):
self.order_info = order_info
self.output = 0
def arrival_event(self, fac):
raise NotImplementedError
class Machine:
def __init__(self, ID, DP_rule):
self.ID = ID
self.state = 'idle'
self.buffer = []
self.wspace = [] #wspace: working space
self.DP_rule = DP_rule
def start_processing(self, fac):
raise NotImplementedError
def end_process_event(self, fac):
raise NotImplementedError
class Factory:
def __init__(self, order_info, DP_rule):
self.order_info = order_info
self.DP_rule = DP_rule
self.event_lst = | pd.DataFrame(columns=["event_type", "time"]) | pandas.DataFrame |
from rdkit import Chem
import numpy as np
import pandas as pd
def mol2bit(MOLS):
BIT = []
FP = []
for i, mol in enumerate(MOLS):
if mol is not None:
bit = {}
fp = Chem.RDKFingerprint(mol, bitInfo=bit)
BIT.append(bit)
FP.append(fp)
else:
BIT.append(np.nan)
FP.append(np.nan)
print(i)
return BIT
def bit2df(BIT):
df = pd.DataFrame(np.zeros((len(BIT),2048), dtype=int))
for i in range(len(BIT)):
if type(BIT[i])==float:
df.loc[i,:] = np.nan
else:
bit = list(BIT[i].keys())
df.loc[i,bit] = int(1)
return df
if __name__ == '__main__':
path = './data'
df = | pd.read_csv(f"{path}/SMILES.csv") | pandas.read_csv |
import logging
import time
import json
import requests
import pytz
from datetime import datetime, timedelta
import tushare as ts
import pandas as pd
from decimal import Decimal
from firestone_engine.Utils import Utils
from bson.objectid import ObjectId
class ConceptPick(object):
_logger = logging.getLogger(__name__)
UTC_8 = pytz.timezone('Asia/Shanghai')
_BATCH_SIZE = 50
_MAX_SIZE = 150
def run(self, trade, config, db, is_mock):
if(len(config['monitor_concept']) >= int(trade['params']['max_concept'])):
ConceptPick._logger.info('exceed max concpets number, stop monitoring')
return
self.is_mock = is_mock
self.db = db
self.monitor_codes = []
today = datetime.now()
self.today = '{}-{}-{}'.format(today.year,('0' + str(today.month))[-2:],('0' + str(today.day))[-2:])
self.trade = trade
self.config = config
if(not self.match_monitorTime()):
return
self.get_match_concepts()
self.pick_all_match_stocks()
if(len(self.monitor_codes) > 0):
self.updateTrade({'result' : f'创建监控:{self.monitor_codes}'})
def match_monitorTime(self):
start = datetime.strptime('{} {}:00'.format(self.today, self.trade['params']['monitorTime']['start']), '%Y-%m-%d %H:%M:%S')
end = datetime.strptime('{} {}:00'.format(self.today, self.trade['params']['monitorTime']['end']), '%Y-%m-%d %H:%M:%S')
now = datetime.now()
return now >= start and now <= end
def need_create_order(self):
return False
def get_match_concepts(self):
try:
load_time = datetime.now(tz=ConceptPick.UTC_8) - timedelta(minutes=2)
hot_concepts = list(self.db['hot_concept'].find({'time' : {'$gte' : load_time}}).sort([('time', -1)]).limit(10))
if(len(hot_concepts) == 0):
self.match_concepts = []
ConceptPick._logger.warning('failed to get hot concepts')
return
concepts = []
for concept in hot_concepts:
if(float(concept['index_percent'][:-1]) < float(self.trade['params']['index_percent']) or float(concept['index_percent'][:-1]) >= float(self.trade['params']['index_max_percent']) or int(concept['company_count']) > int(self.trade['params']['company_count']) or float(concept['stock_percent'][:-1]) < float(self.trade['params']['stock_percent']) or float(concept['net_buy']) < float(self.trade['params']['net_buy'])):
continue
if(self.trade['params']['concepts'] != "" and self.trade['params']['concepts'].find(concept['name']) < 0):
continue
concepts.append(concept)
self.match_concepts = concepts
ConceptPick._logger.info(f'match hot concepts are {concepts}')
except Exception as e:
ConceptPick._logger.error(f'get match concept failed, e = {e}')
self.match_concepts = []
def pick_all_match_stocks(self):
if(len(self.match_concepts) == 0):
ConceptPick._logger.warning(f'no match hot concepts')
return
for concept in self.match_concepts:
try:
if(concept['name'] not in self.config['monitor_concept']):
zxs = list(self.db['zx'].find({"concept" : concept['name']}))
if(len(zxs) > 0 and len(zxs[0]['codes']) > 0):
select_codes = self.filter_select_codes(concept, zxs[0]['codes'])[:ConceptPick._BATCH_SIZE]
if(len(select_codes) > 0):
self.start_monitor(select_codes)
self.pick_match_stocks(concept)
except Exception as e:
ConceptPick._logger.error(f"pick_match_stocks for concept {concept['name']} failed, e = {e}")
def filter_select_codes(self, concept, codes):
time.sleep(3)
df = ts.get_realtime_quotes(codes)
df['percent'] = (df['price'].astype(float) - df['pre_close'].astype(float)) / df['pre_close'].astype(float) * 100
index_percent = float(concept['index_percent'][:-1])
df = df[(~df['code'].str.startswith('688')) & (df['percent'] > index_percent) & (df['percent'] <= float(self.trade['params']['max_percent']))]
if(len(df) == 0):
ConceptPick._logger.warning(f"no match select stocks for concept {concept['name']}")
return []
df = df.sort_values('percent')
return list(df['code'])[0:int(self.trade['params']['monitor_count'])]
def pick_match_stocks(self, concept):
condition = []
for i in range(int(self.trade['params']['top_concept'])):
condition.append({f'concepts.{i}' : concept['name']})
stocks = list(self.db['concepts'].find({'$or' : condition}))
if(len(stocks) == 0):
ConceptPick._logger.warning(f"no match stocks for concept {concept['name']}")
return
self.filter_stocks(stocks, concept)
def rank_concept(self, stock, hot_concept):
concepts = stock['concepts']
stock['rank_concept'] = [int(k) for k, concept in concepts.items() if concept == hot_concept['name']][0]
def filter_stocks(self, stocks, concept):
result = []
for stock in stocks:
now = datetime.now()
if(stock['type'] in ['大盘股','超大盘股']):
continue
if(stock['xsjj'] is not None and stock['xsjj'] != ''):
xsjj = datetime.strptime(stock['xsjj'], '%Y-%m-%d')
if(xsjj > now and (xsjj - now).days < 30):
continue
self.rank_concept(stock, concept)
result.append(stock)
if(len(result) == 0):
ConceptPick._logger.warning(f"no match stocks for concept {concept['name']} after filter")
return
result.sort(key=lambda x: x['rank_concept'])
self.get_top_rank_stocks(result, concept)
def get_top_rank_stocks(self, stocks, concept):
full_df = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
import covasim as cv # Version used in our study is 3.07
import random
from causal_testing.specification.causal_dag import CausalDAG
from causal_testing.specification.scenario import Scenario
from causal_testing.specification.variable import Input, Output
from causal_testing.specification.causal_specification import CausalSpecification
from causal_testing.data_collection.data_collector import ExperimentalDataCollector
from causal_testing.testing.causal_test_case import CausalTestCase
from causal_testing.testing.causal_test_outcome import Positive, Negative, NoEffect
from causal_testing.testing.intervention import Intervention
from causal_testing.testing.causal_test_engine import CausalTestEngine
from causal_testing.testing.estimators import LinearRegressionEstimator
def experimental_causal_test_vaccinate_elderly(runs_per_test_per_config: int = 30, verbose: bool = False):
""" Run the causal test case for the effect of changing vaccine to prioritise elderly. This uses the experimental
data collector.
:param runs_per_test_per_config: Number of times to run each input configuration (control and treatment) per test.
Hence, the total number of runs per test will be twice this value.
:param verbose: Whether to print verbose details (causal test results).
:return results_dict: A dictionary containing ATE, 95% CIs, and Test Pass/Fail
"""
# 1. Read in the Causal DAG
causal_dag = CausalDAG('dag.dot')
# 2. Create variables
pop_size = Input('pop_size', int)
pop_infected = Input('pop_infected', int)
n_days = Input('n_days', int)
vaccine = Input('vaccine', int)
cum_infections = Output('cum_infections', int)
cum_vaccinations = Output('cum_vaccinations', int)
cum_vaccinated = Output('cum_vaccinated', int)
max_doses = Output('max_doses', int)
# 3. Create scenario by applying constraints over a subset of the input variables
scenario = Scenario(variables={pop_size, pop_infected, n_days, cum_infections, vaccine,
cum_vaccinated, cum_vaccinations, max_doses},
constraints={pop_size.z3 == 50000, pop_infected.z3 == 1000, n_days.z3 == 50})
# 4. Construct a causal specification from the scenario and causal DAG
causal_specification = CausalSpecification(scenario, causal_dag)
# 5. Instantiate the experimental data collector for Covasim
covasim_parameters_dict = {'pop_size': 50000,
'pop_type': 'hybrid',
'pop_infected': 1000,
'n_days': 50}
control_input_configuration = {'covasim_parameters_dict': covasim_parameters_dict,
'target_elderly': False}
treatment_input_configuration = {'covasim_parameters_dict': covasim_parameters_dict,
'target_elderly': True}
data_collector = CovasimVaccineDataCollector(scenario, control_input_configuration,
treatment_input_configuration,
runs_per_test_per_config)
# 6. Express expected outcomes
expected_outcome_effects = {cum_infections: Positive(),
cum_vaccinations: Negative(),
cum_vaccinated: Negative(),
max_doses: NoEffect()
}
results_dict = {'cum_infections': {},
'cum_vaccinations': {},
'cum_vaccinated': {},
'max_doses': {}
}
for outcome_variable, expected_effect in expected_outcome_effects.items():
causal_test_case = CausalTestCase(control_input_configuration={vaccine: 0},
expected_causal_effect=expected_effect,
treatment_input_configuration={vaccine: 1},
outcome_variables={outcome_variable})
# 7. Create an instance of the causal test engine
causal_test_engine = CausalTestEngine(causal_test_case, causal_specification, data_collector)
# 8. Obtain the minimal adjustment set for the causal test case from the causal DAG
minimal_adjustment_set = causal_test_engine.load_data(index_col=0)
# 9. Build statistical model
linear_regression_estimator = LinearRegressionEstimator((vaccine.name,), 1, 0,
minimal_adjustment_set,
(outcome_variable.name,))
# 10. Execute test and save results in dict
causal_test_result = causal_test_engine.execute_test(linear_regression_estimator, 'ate')
if verbose:
print(f"Causation:\n{causal_test_result}")
results_dict[outcome_variable.name]['ate'] = causal_test_result.ate
results_dict[outcome_variable.name]['cis'] = causal_test_result.confidence_intervals
results_dict[outcome_variable.name]['test_passes'] = causal_test_case.expected_causal_effect.apply(
causal_test_result)
return results_dict
class CovasimVaccineDataCollector(ExperimentalDataCollector):
"""A custom experimental data collector for the elderly vaccination Covasim case study.
This experimental data collector runs covasim with a normal Pfizer vaccine and then again with the same vaccine but
this time prioritising the elderly for vaccination.
"""
def run_system_with_input_configuration(self, input_configuration: dict) -> pd.DataFrame:
""" Run the system with a given input configuration.
:param input_configuration: A nested dictionary containing Covasim parameters, desired number of repeats, and
a bool to determine whether elderly should be prioritised for vaccination.
:return: A dataframe containing results for this input configuration.
"""
results_df = self.simulate_vaccine(input_configuration['covasim_parameters_dict'],
self.n_repeats,
input_configuration['target_elderly'])
return results_df
def simulate_vaccine(self, pars_dict: dict, n_simulations: int = 100, target_elderly: bool = False):
""" Simulate observational data that contains a vaccine that is optionally given preferentially to the elderly.
:param pars_dict: A dictionary containing simulation parameters.
:param n_simulations: Number of simulations to run.
:param target_elderly: Whether to prioritise vaccination for the elderly.
:return: A pandas dataframe containing results for each run.
"""
simulations_results_dfs = []
for sim_n in range(n_simulations):
print(f'Simulation {sim_n + 1}/{n_simulations}.')
# Update simulation parameters with vaccine and optionally sub-target
if target_elderly:
print("Prioritising the elderly for vaccination")
vaccine = cv.vaccinate_prob(vaccine="Pfizer", label="prioritise_elderly",
subtarget=self.vaccinate_by_age, days=list(range(7, pars_dict['n_days'])))
else:
print("Using standard vaccination protocol")
vaccine = cv.vaccinate_prob(vaccine="Pfizer", label="regular", days=list(range(7, pars_dict['n_days'])))
pars_dict['interventions'] = vaccine
pars_dict['use_waning'] = True # Must be set to true for vaccination
sim_results_df = self.run_sim_with_pars(pars_dict=pars_dict,
desired_outputs=['cum_infections', 'cum_deaths', 'cum_recoveries',
'cum_vaccinations', 'cum_vaccinated'],
n_runs=1)
sim_results_df['interventions'] = vaccine.label # Store label in results instead of vaccine object
sim_results_df['target_elderly'] = target_elderly
sim_results_df['vaccine'] = int(target_elderly) # 0 if standard vaccine, 1 if target elderly vaccine
sim_results_df['max_doses'] = vaccine.p['doses'] # Get max doses for the vaccine
simulations_results_dfs.append(sim_results_df)
# Create a single dataframe containing a row for every execution
obs_df = pd.concat(simulations_results_dfs, ignore_index=True)
obs_df.rename(columns={'interventions': 'vaccine_type'}, inplace=True)
return obs_df
@staticmethod
def run_sim_with_pars(pars_dict: dict, desired_outputs: [str], n_runs: int = 1, verbose: int = -1):
""" Runs a Covasim COVID-19 simulation with a given dict of parameters and collects the desired outputs,
which are given as a list of output names.
:param pars_dict: A dictionary containing the parameters and their values for the run.
:param desired_outputs: A list of outputs which should be collected.
:param n_runs: Number of times to run the simulation with a different seed.
:param verbose: Covasim verbose setting (0 for no output, 1 for output).
:return results_df: A pandas df containing the results for each run
"""
results_dict = {k: [] for k in list(pars_dict.keys()) + desired_outputs + ['rand_seed']}
for _ in range(n_runs):
# For every run, generate and use a new a random seed.
# This is to avoid using Covasim's sequential random seeds.
random.seed()
rand_seed = random.randint(0, 10000)
pars_dict['rand_seed'] = rand_seed
print(f"Rand Seed: {rand_seed}")
sim = cv.Sim(pars=pars_dict)
m_sim = cv.MultiSim(sim)
m_sim.run(n_runs=1, verbose=verbose, n_cpus=1)
for run in m_sim.sims:
results = run.results
# Append inputs to results
for param in pars_dict.keys():
results_dict[param].append(run.pars[param])
# Append outputs to results
for output in desired_outputs:
if output not in results:
raise IndexError(f'{output} is not in the Covasim outputs.')
results_dict[output].append(
results[output][-1]) # Append the final recorded value for each variable
# Any parameters without results are assigned np.nan for each execution
for param, results in results_dict.items():
if not results:
results_dict[param] = [np.nan] * len(results_dict['rand_seed'])
return | pd.DataFrame(results_dict) | pandas.DataFrame |
"""Remove images in blacklist from all other datasets"""
import argparse
from pathlib import Path
from typing import *
import pandas as pd
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('dataset_dir', type=str, default='data/datasets/')
parser.add_argument('blacklist', type=str)
return parser.parse_args()
def load_dataset(path):
return list(pd.read_csv(path, header=None, names=['image_name']).image_name)
def save_dataset(image_names: List[str], path):
images_df = | pd.DataFrame({'image_name': image_names}) | pandas.DataFrame |
import cv2
from pygame import mixer
import pandas as pd
from config import GameConfig
import time
config = GameConfig()
def play_music(music_file, time=0.0): # music 함수
mixer.init()
mixer.music.load(music_file)
mixer.music.play(1, time)
# clock = pygame.time.Clock()
# clock.tick(10)
def play_sound(sound_file): # sound effect 함수
mixer.init()
sound = mixer.Sound(sound_file)
mixer.Sound.play(sound)
# 효과음
sound_effect1 = 'musics/sound1.wav'
sound_effect2 = 'musics/sound2.wav'
sound_applause = 'musics/applause.wav'
sound_disappointed = 'musics/disappointed.wav'
def main_menu(config, params):
'''
:param config: initial configuration
:param params: 이 중에 diff, exit 쓸 것
:return: None
'''
diff = None # 뒤에서 'easy' or 'hard'
exit = None # True or False
# 메인메뉴 기본화면
bgImg = cv2.imread('images/main_menu.png')
# bgImg = cv2.resize(bgImg, (1080, 720))
# 버튼 눌렀을 때의 메뉴화면
button1 = cv2.imread('./images/easy_preview.png')
button2 = cv2.imread('./images/easy_start.png')
button3 = cv2.imread('./images/hard_preview.png')
button4 = cv2.imread('./images/hard_start.png')
buttonQ = cv2.imread('./images/quit.png')
# 버튼 눌린 상태 유지를 위해 while문 밖에 따로 변수 지정했음
key = 0
keyQ = 0
# 티저 음악들 불러오기
sunset_teaser = 'musics/sunset_glow_teaser.wav'
bingo_teaser = 'musics/bingo_teaser.wav'
# 처음 시작할 때는 기본메뉴화면으로 화면 설정
config.named_window = bgImg
while True:
a = cv2.waitKey(0)
if a & 0xFF == ord('1'): # 1번 누르면
play_sound(sound_effect1)
diff = 'easy'
play_music(sunset_teaser)
key = 1
config.named_window = button1
if a & 0xFF == ord('2'):
play_sound(sound_effect2)
diff = 'easy'
key = 2
config.named_window = button2
if a & 0xFF == ord('3'):
#play_sound(sound_effect1)
diff = 'hard'
play_music(bingo_teaser)
key = 3
config.named_window = button3
if a & 0xFF == ord('4'):
play_sound(sound_effect2)
diff = 'hard'
key = 4
config.named_window = button4
if a & 0xFF == ord('q'):
exit = True
key = 5
config.named_window = buttonQ
# 바뀐 화면을 유지해주기 위해 while 밖에
if key == 1:
config.named_window = button1
if key == 2:
config.named_window = button2
if key == 3:
config.named_window = button3
if key == 4:
config.named_window = button4
if key == 5:
config.named_window = buttonQ
# 이미지 띄우기
cv2.imshow('McgBcg', config.named_window)
if exit == True:
print('Exit')
break
if params["exit"] is True: # main menu에서 종료 버튼
print('Exit')
break
if key == 2:
print('Easy')
break
if key == 4:
print('Hard')
break
params["diff"] = diff
params["exit"] = exit
print("diff = ", diff, "exit = ", exit) # ex) diff = easy exit = False
params["restart"] = False
params["menu"] = False
params["resume"] = False
cv2.destroyAllWindows()
def get_number(list):
ret_list = []
for i in range(len(list)):
ret_list.append(list[i][0:11])
return ret_list
def load_pattern(config, params):
'''
:param config:
:param params: 이 중에 diff, pattern 씀
:return: None
'''
pattern = None
# 여기부터 주석박스 전까지 짜는 코드!
df = None # excel에서 한칸
data = None # excel에서 value 값
print("Choose easy / hard \n")
diff = params["diff"]
# def excel(diff):
if diff == 'easy':
df = | pd.read_excel('sunset_glow.xlsx', sheet_name='sunset') | pandas.read_excel |
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2020/12/15 15:18
Desc: 东方财富网-数据中心-特色数据-一致行动人
http://data.eastmoney.com/yzxdr/
"""
import demjson
import pandas as pd
import requests
def stock_em_yzxdr(date: str = "20200930") -> pd.DataFrame:
"""
东方财富网-数据中心-特色数据-一致行动人
http://data.eastmoney.com/yzxdr/
:param date: 每年的季度末时间点
:type date: str
:return: 一致行动人
:rtype: pandas.DataFrame
"""
date = '-'.join([date[:4], date[4:6], date[6:]])
url = "http://datacenter.eastmoney.com/api/data/get"
params = {
"type": "RPTA_WEB_YZXDRINDEX",
"sty": "ALL",
"source": "WEB",
"p": "1",
"ps": "500",
"st": "noticedate",
"sr": "-1",
"var": "mwUyirVm",
"filter": f"(enddate='{date}')",
"rt": "53575609",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{"): -1])
total_pages = data_json['result']['pages']
big_df = pd.DataFrame()
for page in range(1, total_pages+1):
params = {
"type": "RPTA_WEB_YZXDRINDEX",
"sty": "ALL",
"source": "WEB",
"p": str(page),
"ps": "500",
"st": "noticedate",
"sr": "-1",
"var": "mwUyirVm",
"filter": f"(enddate='{date}')",
"rt": "53575609",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{"): -1])
temp_df = | pd.DataFrame(data_json["result"]["data"]) | pandas.DataFrame |
"""The search module of elsapy.
Additional resources:
* https://github.com/ElsevierDev/elsapy
* https://dev.elsevier.com
* https://api.elsevier.com"""
import xmltodict
from . import log_util
from urllib.parse import quote_plus as url_encode
import pandas as pd, json
from .utils import recast_df
import string
from pathlib import Path
import os
logger = log_util.get_logger(__name__)
class ElsSearch():
"""Represents a search to one of the search indexes accessible
through api.elsevier.com. Returns True if successful; else, False."""
# static / class variables
_base_url = u'https://api.elsevier.com/content/search/'
_cursored_indexes = [
'scopus',
]
def __init__(self, query, index):
"""Initializes a search object with a query and target index."""
self.query = query
self.index = index
self.num_records = 25
self._cursor_supported = (index in self._cursored_indexes)
self._uri = self._base_url + self.index + '?query=' + url_encode(
self.query) + '&count=' + str(self.num_records)
columns = ['@_fa','link','prism:url','dc:identifier','eid','dc:title','prism:aggregationType','subtype',
'subtypeDescription','citedby-count','prism:publicationName','prism:isbn','prism:issn',
'prism:volume','prism:issueIdentifier','prism:pageRange','prism:coverDate','prism:coverDisplayDate',
'prism:doi','pii','pubmed-id','orcid','dc:creator','openaccess','affiliation','author','dc:description',
'authkeywords','article-number','fund-acr','fund-no','fund-sponsor','prism:eIssn', 'abstract_text']
self.results_df = pd.DataFrame(columns=columns)
self.max_file_size = 100 * 1000 # first number in KB
# properties
@property
def query(self):
"""Gets the search query"""
return self._query
@query.setter
def query(self, query):
"""Sets the search query"""
self._query = query
@property
def index(self):
"""Gets the label of the index targeted by the search"""
return self._index
@index.setter
def index(self, index):
"""Sets the label of the index targeted by the search"""
self._index = index
@property
def results(self):
"""Gets the results for the search"""
return self._results
@property
def tot_num_res(self):
"""Gets the total number of results that exist in the index for
this query. This number might be larger than can be retrieved
and stored in a single ElsSearch object (i.e. 5,000)."""
return self._tot_num_res
@property
def num_res(self):
"""Gets the number of results for this query that are stored in the
search object. This number might be smaller than the number of
results that exist in the index for the query."""
return len(self.results)
@property
def uri(self):
"""Gets the request uri for the search"""
return self._uri
def _upper_limit_reached(self):
"""Determines if the upper limit for retrieving results from of the
search index is reached. Returns True if so, else False. Upper
limit is 5,000 for indexes that don't support cursor-based
pagination."""
if self._cursor_supported:
return False
else:
return self.num_res >= 5000
def execute(self, els_client = None, get_all = False):
"""Executes the search. If get_all = False (default), this retrieves
the default number of results specified for the API. If
get_all = True, multiple API calls will be made to iteratively get
all results for the search, up to a maximum of 5,000."""
## TODO: add exception handling
api_response = els_client.exec_request(self._uri)
abstracts_index = 0
self._tot_num_res = int(api_response['search-results']['opensearch:totalResults'])
self._results = api_response['search-results']['entry']
self.add_abstracts(els_client, abstracts_index)
self.results_df = self.results_df.append(recast_df(pd.DataFrame(self._results)))
csv_filename_number = 0
csv_filename = "output/test"+ str(csv_filename_number) + ".csv"
self.results_df.to_csv(csv_filename, mode='a', sep=',', index=False, encoding="utf-8", header=not os.path.exists(csv_filename))
if get_all is True:
while (self.num_res < self.tot_num_res): #and not self._upper_limit_reached():
# breakpoint()
for e in api_response['search-results']['link']:
if e['@ref'] == 'next':
next_url = e['@href']
# next_url = next_url.replace('scopus?start=', 'scopus?cursor=')
api_response = els_client.exec_request(next_url)
self._results = api_response['search-results']['entry']
self.add_abstracts(els_client, abstracts_index)
self.results_df = self.results_df.append(recast_df( | pd.DataFrame(self._results) | pandas.DataFrame |
import unittest
import utils
import datetime
import pandas as pd
import numpy as np
from pandas.testing import *
#sample datas and its expected outputs
data1={'meta': {'currency': 'USD', 'symbol': 'TSM', 'exchangeName': 'NYQ', 'instrumentType': 'EQUITY', 'firstTradeDate': 876403800, 'regularMarketTime': 1616529601, 'gmtoffset': -14400, 'timezone': 'EDT', 'exchangeTimezoneName': 'America/New_York', 'regularMarketPrice': 114.89, 'chartPreviousClose': 77.92, 'priceHint': 2, 'currentTradingPeriod': {'pre': {'timezone': 'EDT', 'start': 1616572800, 'end': 1616592600, 'gmtoffset': -14400}, 'regular': {'timezone': 'EDT', 'start': 1616592600, 'end': 1616616000, 'gmtoffset': -14400}, 'post': {'timezone': 'EDT', 'start': 1616616000, 'end': 1616630400, 'gmtoffset': -14400}}, 'dataGranularity': '1d', 'range': '6mo', 'validRanges': ['1d', '5d', '1mo', '3mo', '6mo', '1y', '2y', '5y', '10y', 'ytd', 'max']}, 'events': {'dividends': {'1615987800': {'amount': 0.448, 'date': 1615987800}, '1608215400': {'amount': 0.442, 'date': 1608215400}}}}
data2={'meta': {'currency': 'USD', 'symbol': 'TSM', 'exchangeName': 'NYQ', 'instrumentType': 'EQUITY', 'firstTradeDate': 876403800, 'regularMarketTime': 1616529601, 'gmtoffset': -14400, 'timezone': 'EDT', 'exchangeTimezoneName': 'America/New_York', 'regularMarketPrice': 114.89, 'chartPreviousClose': 77.92, 'priceHint': 2, 'currentTradingPeriod': {'pre': {'timezone': 'EDT', 'start': 1616572800, 'end': 1616592600, 'gmtoffset': -14400}, 'regular': {'timezone': 'EDT', 'start': 1616592600, 'end': 1616616000, 'gmtoffset': -14400}, 'post': {'timezone': 'EDT', 'start': 1616616000, 'end': 1616630400, 'gmtoffset': -14400}}, 'dataGranularity': '1d', 'range': '6mo', 'validRanges': ['1d', '5d', '1mo', '3mo', '6mo', '1y', '2y', '5y', '10y', 'ytd', 'max']}}
data3={'meta': {'currency': 'EUR', 'symbol': 'UH7.F', 'exchangeName': 'FRA', 'instrumentType': 'EQUITY', 'firstTradeDate': 1140678000, 'regularMarketTime': 1584084000, 'gmtoffset': 3600, 'timezone': 'CET', 'exchangeTimezoneName': 'Europe/Berlin', 'regularMarketPrice': 0.0005, 'chartPreviousClose': 0.0005, 'priceHint': 4, 'currentTradingPeriod': {'pre': {'timezone': 'CET', 'end': 1616569200, 'start': 1616569200, 'gmtoffset': 3600}, 'regular': {'timezone': 'CET', 'end': 1616619600, 'start': 1616569200, 'gmtoffset': 3600}, 'post': {'timezone': 'CET', 'end': 1616619600, 'start': 1616619600, 'gmtoffset': 3600}}, 'dataGranularity': '1d', 'range': '6mo', 'validRanges': ['1d', '5d', '1mo', '3mo', '6mo', '1y', '2y', '5y', '10y', 'ytd', 'max']}, 'events': {'splits': {'1616482800': {'date': 1616482800, 'numerator': 1, 'denominator': 50, 'splitRatio': '1:50'}}}}
data4={'meta': {'currency': 'USD', 'symbol': 'AAPL', 'exchangeName': 'NMS', 'instrumentType': 'EQUITY', 'firstTradeDate': 345479400, 'regularMarketTime': 1616616002, 'gmtoffset': -14400, 'timezone': 'EDT', 'exchangeTimezoneName': 'America/New_York', 'regularMarketPrice': 120.09, 'chartPreviousClose': 108.22, 'priceHint': 2, 'currentTradingPeriod': {'pre': {'timezone': 'EDT', 'start': 1616659200, 'end': 1616679000, 'gmtoffset': -14400}, 'regular': {'timezone': 'EDT', 'start': 1616679000, 'end': 1616702400, 'gmtoffset': -14400}, 'post': {'timezone': 'EDT', 'start': 1616702400, 'end': 1616716800, 'gmtoffset': -14400}}, 'dataGranularity': '1d', 'range': '6mo', 'validRanges': ['1d', '5d', '1mo', '3mo', '6mo', '1y', '2y', '5y', '10y', 'ytd', 'max']}, 'events': {'dividends': {'1604673000': {'amount': 0.205, 'date': 1604673000}, '1612535400': {'amount': 0.205, 'date': 1612535400}}}}
data5=''
data6={'meta': {'currency': 'USD', 'symbol': 'AAPL', 'exchangeName': 'NMS', 'instrumentType': 'EQUITY', 'firstTradeDate': 345479400, 'regularMarketTime': 1616616002, 'gmtoffset': -14400, 'timezone': 'EDT', 'exchangeTimezoneName': 'America/New_York', 'regularMarketPrice': 120.09, 'chartPreviousClose': 108.22, 'priceHint': 2, 'currentTradingPeriod': {'pre': {'timezone': 'EDT', 'start': 1616659200, 'end': 1616679000, 'gmtoffset': -14400}, 'regular': {'timezone': 'EDT', 'start': 1616679000, 'end': 1616702400, 'gmtoffset': -14400}, 'post': {'timezone': 'EDT', 'start': 1616702400, 'end': 1616716800, 'gmtoffset': -14400}}, 'dataGranularity': '1d', 'range': '6mo', 'validRanges': ['1d', '5d', '1mo', '3mo', '6mo', '1y', '2y', '5y', '10y', 'ytd', 'max']}, 'events': {'dividends': {'1604673000': {'amount': 0.205, 'date': 0}, '1612535400': {'amount': 0.205, 'date': 0}}}}
output1=pd.DataFrame(columns=["Dividends"])
output1=pd.DataFrame(data=[{'amount': 0.448, 'date': 1615987800},{'amount': 0.442, 'date': 1608215400}])
output1.set_index("date",inplace=True)
output1.index = pd.to_datetime(output1.index, unit="s")
output1.sort_index(inplace=True)
#dividends.index = dividends.index.tz_localize(tz)
output1.columns=["Dividends"]
class Test_parse_action(unittest.TestCase):
"""
test parse_actions function
"""
def test_dividend(self):
"""
Test if it can correctly return the dividends in the correct order and format(parsed correctly)
"""
output1=pd.DataFrame(columns=["Dividends"])
output1=pd.DataFrame(data=[{'amount': 0.448, 'date': 1615987800},{'amount': 0.442, 'date': 1608215400}])
output1.set_index("date",inplace=True)
output1.index = pd.to_datetime(output1.index, unit="s")
output1.sort_index(inplace=True)
#dividends.index = dividends.index.tz_localize(tz)
output1.columns=["Dividends"]
#case1: when Data that contains two different dividends and contains no splits.
result=utils.parse_actions(data1)
self.assertNotEqual(result, None)#check if the returned result is None
self.assertFalse(result[0].empty)#check if the returned dividents is not empty
self.assertTrue(result[1].empty)#check if the returned splits returned is empty
assert_frame_equal(result[0],output1)#check if the data frame returned match with expected output
#case2: when data has no dividends and splits events
result2=utils.parse_actions(data2)
self.assertTrue(result2[0].empty)
self.assertTrue(result2[1].empty)
def test_split(self):
"""
Test if it can correctly parsr the data with splits.
"""
#mock dataframe containing a stock info with splits
output = pd.DataFrame(data=[{'date': 1616482800, 'numerator': 1, 'denominator': 50, 'splitRatio': '1:50'}])
output.set_index("date",inplace=True)
output.index = pd.to_datetime(output.index, unit="s")
output.sort_index(inplace=True)
output["Stock Splits"] = output["numerator"] / \
output["denominator"]
output = output["Stock Splits"]
#case3: data has only split event
result=utils.parse_actions(data3) #call parse action function with the above data
self.assertNotEqual(result, None) #Check if the result is None
self.assertFalse(result[1].empty) #Check if the result contains splits
self.assertTrue(result[0].empty) #Check if the result does not contain dividends
assert_series_equal(result[1], output) #Check if the result matches the mock data
def test_dateTime(self):
"""
Test if date and time are in correct format
"""
output = pd.DataFrame(columns=["Dividends"])
output = pd.DataFrame(data=[{'amount': 0.205, 'date': 1604673000}, {'amount': 0.205, 'date': 1612535400}])
output.set_index("date",inplace=True)
output.index = pd.to_datetime(output.index, unit="s")
dividends, splits = utils.parse_actions(data4)
self.assertTrue(splits.empty)
self.assertFalse(dividends.empty)
self.assertEqual(dividends.index[0], output.index[0])
def test_emptyInput(self):
"""
Test if data can handle wrong input argument
"""
#output = pd.DataFrame(columns=["Dividends"])
#output = pd.DataFrame(data=[{'amount': 0.205, 'date': 1604673000}, {'amount': 0.205, 'date': 1612535400}])
output = pd.DataFrame(columns=["Dividends"])
output = pd.DataFrame(data=[''])
output.index = pd.to_datetime(output.index, unit="s")
result = utils.parse_actions(data5)
self.assertTrue(result[0].empty)
self.assertTrue(result[1].empty)
def test_wrongDate(self):
"""
Test the date beyond the electronic stock market was created
"""
output = pd.DataFrame(columns=["Dividends"])
output = pd.DataFrame(data=[{'amount': 0.205, 'date': 1604673000}, {'amount': 0.205, 'date': 3308400}])
output.set_index("date",inplace=True)
output.index = | pd.to_datetime(output.index, unit="s") | pandas.to_datetime |
# -*- coding: utf-8 -*-
import inspect
import os # noqa: F401
import unittest
import time
import pandas as pd
from configparser import ConfigParser
from GenericsAPI.Utils.NetworkUtil import NetworkUtil
from GenericsAPI.GenericsAPIImpl import GenericsAPI
from GenericsAPI.GenericsAPIServer import MethodContext
from GenericsAPI.authclient import KBaseAuth as _KBaseAuth
from installed_clients.DataFileUtilClient import DataFileUtil
from installed_clients.WorkspaceClient import Workspace as workspaceService
class NetworkUtilTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
token = os.environ.get('KB_AUTH_TOKEN', None)
config_file = os.environ.get('KB_DEPLOYMENT_CONFIG', None)
cls.cfg = {}
config = ConfigParser()
config.read(config_file)
for nameval in config.items('GenericsAPI'):
cls.cfg[nameval[0]] = nameval[1]
# Getting username from Auth profile for token
authServiceUrl = cls.cfg['auth-service-url']
auth_client = _KBaseAuth(authServiceUrl)
user_id = auth_client.get_user(token)
# WARNING: don't call any logging methods on the context object,
# it'll result in a NoneType error
cls.ctx = MethodContext(None)
cls.ctx.update({'token': token,
'user_id': user_id,
'provenance': [
{'service': 'GenericsAPI',
'method': 'please_never_use_it_in_production',
'method_params': []
}],
'authenticated': 1})
cls.wsURL = cls.cfg['workspace-url']
cls.wsClient = workspaceService(cls.wsURL)
cls.serviceImpl = GenericsAPI(cls.cfg)
cls.scratch = cls.cfg['scratch']
cls.callback_url = os.environ['SDK_CALLBACK_URL']
cls.dfu = DataFileUtil(cls.callback_url)
cls.network_util = NetworkUtil(cls.cfg)
suffix = int(time.time() * 1000)
cls.wsName = "test_network_util_" + str(suffix)
ret = cls.wsClient.create_workspace({'workspace': cls.wsName})
cls.wsId = ret[0]
@classmethod
def tearDownClass(cls):
if hasattr(cls, 'wsName'):
cls.wsClient.delete_workspace({'workspace': cls.wsName})
print('Test workspace was deleted')
def getWsClient(self):
return self.__class__.wsClient
def getWsName(self):
return self.__class__.wsName
def getImpl(self):
return self.__class__.serviceImpl
def getContext(self):
return self.__class__.ctx
def getNetworkUtil(self):
return self.__class__.network_util
def loadCorrData(self):
if hasattr(self.__class__, 'corr_data'):
return self.__class__.corr_data
corr_data = {'row_ids': ['WRI_RS00010_CDS_1',
'WRI_RS00015_CDS_1',
'WRI_RS00025_CDS_1'],
'values': [[1.0, 0.99, 0.91],
[0.99, 1.0, 0.91],
[0.91, 0.91, 1.0]],
'col_ids': ['WRI_RS00010_CDS_1',
'WRI_RS00015_CDS_1',
'WRI_RS00025_CDS_1']}
self.__class__.corr_data = corr_data
print('Loaded Correlation Data:\n{}\n'.format(corr_data))
return corr_data
def loadGraphDF(self):
if hasattr(self.__class__, 'graph_df'):
return self.__class__.graph_df
d = {'source': ['A', 'E', 'F', 'F', 'F', 'F', 'G', 'G', 'G', 'H',
'I', 'I', 'I', 'J', 'J', 'J'],
'target': ['E', 'A', 'G', 'H', 'I', 'J', 'F', 'I', 'J', 'F',
'F', 'G', 'J', 'F', 'G', 'I'],
'value': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6]}
graph_df = | pd.DataFrame(data=d) | pandas.DataFrame |
"""
A warehouse for constant values required to initilize the PUDL Database.
This constants module stores and organizes a bunch of constant values which are
used throughout PUDL to populate static lists within the data packages or for
data cleaning purposes.
"""
import pandas as pd
import sqlalchemy as sa
######################################################################
# Constants used within the init.py module.
######################################################################
prime_movers = [
'steam_turbine',
'gas_turbine',
'hydro',
'internal_combustion',
'solar_pv',
'wind_turbine'
]
"""list: A list of the types of prime movers"""
rto_iso = {
'CAISO': 'California ISO',
'ERCOT': 'Electric Reliability Council of Texas',
'MISO': 'Midcontinent ISO',
'ISO-NE': 'ISO New England',
'NYISO': 'New York ISO',
'PJM': 'PJM Interconnection',
'SPP': 'Southwest Power Pool'
}
"""dict: A dictionary containing ISO/RTO abbreviations (keys) and names (values)
"""
us_states = {
'AK': 'Alaska',
'AL': 'Alabama',
'AR': 'Arkansas',
'AS': 'American Samoa',
'AZ': 'Arizona',
'CA': 'California',
'CO': 'Colorado',
'CT': 'Connecticut',
'DC': 'District of Columbia',
'DE': 'Delaware',
'FL': 'Florida',
'GA': 'Georgia',
'GU': 'Guam',
'HI': 'Hawaii',
'IA': 'Iowa',
'ID': 'Idaho',
'IL': 'Illinois',
'IN': 'Indiana',
'KS': 'Kansas',
'KY': 'Kentucky',
'LA': 'Louisiana',
'MA': 'Massachusetts',
'MD': 'Maryland',
'ME': 'Maine',
'MI': 'Michigan',
'MN': 'Minnesota',
'MO': 'Missouri',
'MP': 'Northern Mariana Islands',
'MS': 'Mississippi',
'MT': 'Montana',
'NA': 'National',
'NC': 'North Carolina',
'ND': 'North Dakota',
'NE': 'Nebraska',
'NH': 'New Hampshire',
'NJ': 'New Jersey',
'NM': 'New Mexico',
'NV': 'Nevada',
'NY': 'New York',
'OH': 'Ohio',
'OK': 'Oklahoma',
'OR': 'Oregon',
'PA': 'Pennsylvania',
'PR': 'Puerto Rico',
'RI': 'Rhode Island',
'SC': 'South Carolina',
'SD': 'South Dakota',
'TN': 'Tennessee',
'TX': 'Texas',
'UT': 'Utah',
'VA': 'Virginia',
'VI': 'Virgin Islands',
'VT': 'Vermont',
'WA': 'Washington',
'WI': 'Wisconsin',
'WV': 'West Virginia',
'WY': 'Wyoming'
}
"""dict: A dictionary containing US state abbreviations (keys) and names
(values)
"""
canada_prov_terr = {
'AB': 'Alberta',
'BC': 'British Columbia',
'CN': 'Canada',
'MB': 'Manitoba',
'NB': 'New Brunswick',
'NS': 'Nova Scotia',
'NL': 'Newfoundland and Labrador',
'NT': 'Northwest Territories',
'NU': 'Nunavut',
'ON': 'Ontario',
'PE': 'Prince Edwards Island',
'QC': 'Quebec',
'SK': 'Saskatchewan',
'YT': 'Yukon Territory',
}
"""dict: A dictionary containing Canadian provinces' and territories'
abbreviations (keys) and names (values)
"""
cems_states = {k: v for k, v in us_states.items() if v not in
{'Alaska',
'American Samoa',
'Guam',
'Hawaii',
'Northern Mariana Islands',
'National',
'Puerto Rico',
'Virgin Islands'}
}
"""dict: A dictionary containing US state abbreviations (keys) and names
(values) that are present in the CEMS dataset
"""
# This is imperfect for states that have split timezones. See:
# https://en.wikipedia.org/wiki/List_of_time_offsets_by_U.S._state_and_territory
# For states that are split, I went with where there seem to be more people
# List of timezones in pytz.common_timezones
# Canada: https://en.wikipedia.org/wiki/Time_in_Canada#IANA_time_zone_database
state_tz_approx = {
"AK": "US/Alaska", # Alaska; Not in CEMS
"AL": "US/Central", # Alabama
"AR": "US/Central", # Arkansas
"AS": "Pacific/Pago_Pago", # American Samoa; Not in CEMS
"AZ": "US/Arizona", # Arizona
"CA": "US/Pacific", # California
"CO": "US/Mountain", # Colorado
"CT": "US/Eastern", # Connecticut
"DC": "US/Eastern", # District of Columbia
"DE": "US/Eastern", # Delaware
"FL": "US/Eastern", # Florida (split state)
"GA": "US/Eastern", # Georgia
"GU": "Pacific/Guam", # Guam; Not in CEMS
"HI": "US/Hawaii", # Hawaii; Not in CEMS
"IA": "US/Central", # Iowa
"ID": "US/Mountain", # Idaho (split state)
"IL": "US/Central", # Illinois
"IN": "US/Eastern", # Indiana (split state)
"KS": "US/Central", # Kansas (split state)
"KY": "US/Eastern", # Kentucky (split state)
"LA": "US/Central", # Louisiana
"MA": "US/Eastern", # Massachusetts
"MD": "US/Eastern", # Maryland
"ME": "US/Eastern", # Maine
"MI": "America/Detroit", # Michigan (split state)
"MN": "US/Central", # Minnesota
"MO": "US/Central", # Missouri
"MP": "Pacific/Saipan", # Northern Mariana Islands; Not in CEMS
"MS": "US/Central", # Mississippi
"MT": "US/Mountain", # Montana
"NC": "US/Eastern", # North Carolina
"ND": "US/Central", # North Dakota (split state)
"NE": "US/Central", # Nebraska (split state)
"NH": "US/Eastern", # New Hampshire
"NJ": "US/Eastern", # New Jersey
"NM": "US/Mountain", # New Mexico
"NV": "US/Pacific", # Nevada
"NY": "US/Eastern", # New York
"OH": "US/Eastern", # Ohio
"OK": "US/Central", # Oklahoma
"OR": "US/Pacific", # Oregon (split state)
"PA": "US/Eastern", # Pennsylvania
"PR": "America/Puerto_Rico", # Puerto Rico; Not in CEMS
"RI": "US/Eastern", # Rhode Island
"SC": "US/Eastern", # South Carolina
"SD": "US/Central", # South Dakota (split state)
"TN": "US/Central", # Tennessee
"TX": "US/Central", # Texas
"UT": "US/Mountain", # Utah
"VA": "US/Eastern", # Virginia
"VI": "America/Puerto_Rico", # Virgin Islands; Not in CEMS
"VT": "US/Eastern", # Vermont
"WA": "US/Pacific", # Washington
"WI": "US/Central", # Wisconsin
"WV": "US/Eastern", # West Virginia
"WY": "US/Mountain", # Wyoming
# Canada (none of these are in CEMS)
"AB": "America/Edmonton", # Alberta
"BC": "America/Vancouver", # British Columbia (split province)
"MB": "America/Winnipeg", # Manitoba
"NB": "America/Moncton", # New Brunswick
"NS": "America/Halifax", # Nova Scotia
"NL": "America/St_Johns", # Newfoundland and Labrador (split province)
"NT": "America/Yellowknife", # Northwest Territories (split province)
"NU": "America/Iqaluit", # Nunavut (split province)
"ON": "America/Toronto", # Ontario (split province)
"PE": "America/Halifax", # Prince Edwards Island
"QC": "America/Montreal", # Quebec (split province)
"SK": "America/Regina", # Saskatchewan (split province)
"YT": "America/Whitehorse", # Yukon Territory
}
"""dict: A dictionary containing US and Canadian state/territory abbreviations
(keys) and timezones (values)
"""
ferc1_power_purchase_type = {
'RQ': 'requirement',
'LF': 'long_firm',
'IF': 'intermediate_firm',
'SF': 'short_firm',
'LU': 'long_unit',
'IU': 'intermediate_unit',
'EX': 'electricity_exchange',
'OS': 'other_service',
'AD': 'adjustment'
}
"""dict: A dictionary of abbreviations (keys) and types (values) for power
purchase agreements from FERC Form 1.
"""
# Dictionary mapping DBF files (w/o .DBF file extension) to DB table names
ferc1_dbf2tbl = {
'F1_1': 'f1_respondent_id',
'F1_2': 'f1_acb_epda',
'F1_3': 'f1_accumdepr_prvsn',
'F1_4': 'f1_accumdfrrdtaxcr',
'F1_5': 'f1_adit_190_detail',
'F1_6': 'f1_adit_190_notes',
'F1_7': 'f1_adit_amrt_prop',
'F1_8': 'f1_adit_other',
'F1_9': 'f1_adit_other_prop',
'F1_10': 'f1_allowances',
'F1_11': 'f1_bal_sheet_cr',
'F1_12': 'f1_capital_stock',
'F1_13': 'f1_cash_flow',
'F1_14': 'f1_cmmn_utlty_p_e',
'F1_15': 'f1_comp_balance_db',
'F1_16': 'f1_construction',
'F1_17': 'f1_control_respdnt',
'F1_18': 'f1_co_directors',
'F1_19': 'f1_cptl_stk_expns',
'F1_20': 'f1_csscslc_pcsircs',
'F1_21': 'f1_dacs_epda',
'F1_22': 'f1_dscnt_cptl_stk',
'F1_23': 'f1_edcfu_epda',
'F1_24': 'f1_elctrc_erg_acct',
'F1_25': 'f1_elctrc_oper_rev',
'F1_26': 'f1_elc_oper_rev_nb',
'F1_27': 'f1_elc_op_mnt_expn',
'F1_28': 'f1_electric',
'F1_29': 'f1_envrnmntl_expns',
'F1_30': 'f1_envrnmntl_fclty',
'F1_31': 'f1_fuel',
'F1_32': 'f1_general_info',
'F1_33': 'f1_gnrt_plant',
'F1_34': 'f1_important_chg',
'F1_35': 'f1_incm_stmnt_2',
'F1_36': 'f1_income_stmnt',
'F1_37': 'f1_miscgen_expnelc',
'F1_38': 'f1_misc_dfrrd_dr',
'F1_39': 'f1_mthly_peak_otpt',
'F1_40': 'f1_mtrl_spply',
'F1_41': 'f1_nbr_elc_deptemp',
'F1_42': 'f1_nonutility_prop',
'F1_43': 'f1_note_fin_stmnt', # 37% of DB
'F1_44': 'f1_nuclear_fuel',
'F1_45': 'f1_officers_co',
'F1_46': 'f1_othr_dfrrd_cr',
'F1_47': 'f1_othr_pd_in_cptl',
'F1_48': 'f1_othr_reg_assets',
'F1_49': 'f1_othr_reg_liab',
'F1_50': 'f1_overhead',
'F1_51': 'f1_pccidica',
'F1_52': 'f1_plant_in_srvce',
'F1_53': 'f1_pumped_storage',
'F1_54': 'f1_purchased_pwr',
'F1_55': 'f1_reconrpt_netinc',
'F1_56': 'f1_reg_comm_expn',
'F1_57': 'f1_respdnt_control',
'F1_58': 'f1_retained_erng',
'F1_59': 'f1_r_d_demo_actvty',
'F1_60': 'f1_sales_by_sched',
'F1_61': 'f1_sale_for_resale',
'F1_62': 'f1_sbsdry_totals',
'F1_63': 'f1_schedules_list',
'F1_64': 'f1_security_holder',
'F1_65': 'f1_slry_wg_dstrbtn',
'F1_66': 'f1_substations',
'F1_67': 'f1_taxacc_ppchrgyr',
'F1_68': 'f1_unrcvrd_cost',
'F1_69': 'f1_utltyplnt_smmry',
'F1_70': 'f1_work',
'F1_71': 'f1_xmssn_adds',
'F1_72': 'f1_xmssn_elc_bothr',
'F1_73': 'f1_xmssn_elc_fothr',
'F1_74': 'f1_xmssn_line',
'F1_75': 'f1_xtraordnry_loss',
'F1_76': 'f1_codes_val',
'F1_77': 'f1_sched_lit_tbl',
'F1_78': 'f1_audit_log',
'F1_79': 'f1_col_lit_tbl',
'F1_80': 'f1_load_file_names',
'F1_81': 'f1_privilege',
'F1_82': 'f1_sys_error_log',
'F1_83': 'f1_unique_num_val',
'F1_84': 'f1_row_lit_tbl',
'F1_85': 'f1_footnote_data',
'F1_86': 'f1_hydro',
'F1_87': 'f1_footnote_tbl', # 52% of DB
'F1_88': 'f1_ident_attsttn',
'F1_89': 'f1_steam',
'F1_90': 'f1_leased',
'F1_91': 'f1_sbsdry_detail',
'F1_92': 'f1_plant',
'F1_93': 'f1_long_term_debt',
'F1_106_2009': 'f1_106_2009',
'F1_106A_2009': 'f1_106a_2009',
'F1_106B_2009': 'f1_106b_2009',
'F1_208_ELC_DEP': 'f1_208_elc_dep',
'F1_231_TRN_STDYCST': 'f1_231_trn_stdycst',
'F1_324_ELC_EXPNS': 'f1_324_elc_expns',
'F1_325_ELC_CUST': 'f1_325_elc_cust',
'F1_331_TRANSISO': 'f1_331_transiso',
'F1_338_DEP_DEPL': 'f1_338_dep_depl',
'F1_397_ISORTO_STL': 'f1_397_isorto_stl',
'F1_398_ANCL_PS': 'f1_398_ancl_ps',
'F1_399_MTH_PEAK': 'f1_399_mth_peak',
'F1_400_SYS_PEAK': 'f1_400_sys_peak',
'F1_400A_ISO_PEAK': 'f1_400a_iso_peak',
'F1_429_TRANS_AFF': 'f1_429_trans_aff',
'F1_ALLOWANCES_NOX': 'f1_allowances_nox',
'F1_CMPINC_HEDGE_A': 'f1_cmpinc_hedge_a',
'F1_CMPINC_HEDGE': 'f1_cmpinc_hedge',
'F1_EMAIL': 'f1_email',
'F1_RG_TRN_SRV_REV': 'f1_rg_trn_srv_rev',
'F1_S0_CHECKS': 'f1_s0_checks',
'F1_S0_FILING_LOG': 'f1_s0_filing_log',
'F1_SECURITY': 'f1_security'
# 'F1_PINS': 'f1_pins', # private data, not publicized.
# 'F1_FREEZE': 'f1_freeze', # private data, not publicized
}
"""dict: A dictionary mapping FERC Form 1 DBF files(w / o .DBF file extension)
(keys) to database table names (values).
"""
ferc1_huge_tables = {
'f1_footnote_tbl',
'f1_footnote_data',
'f1_note_fin_stmnt',
}
"""set: A set containing large FERC Form 1 tables.
"""
# Invert the map above so we can go either way as needed
ferc1_tbl2dbf = {v: k for k, v in ferc1_dbf2tbl.items()}
"""dict: A dictionary mapping database table names (keys) to FERC Form 1 DBF
files(w / o .DBF file extension) (values).
"""
# This dictionary maps the strings which are used to denote field types in the
# DBF objects to the corresponding generic SQLAlchemy Column types:
# These definitions come from a combination of the dbfread example program
# dbf2sqlite and this DBF file format documentation page:
# http://www.dbase.com/KnowledgeBase/int/db7_file_fmt.htm
# Un-mapped types left as 'XXX' which should obviously make an error...
dbf_typemap = {
'C': sa.String,
'D': sa.Date,
'F': sa.Float,
'I': sa.Integer,
'L': sa.Boolean,
'M': sa.Text, # 10 digit .DBT block number, stored as a string...
'N': sa.Float,
'T': sa.DateTime,
'0': sa.Integer, # based on dbf2sqlite mapping
'B': 'XXX', # .DBT block number, binary string
'@': 'XXX', # Timestamp... Date = Julian Day, Time is in milliseconds?
'+': 'XXX', # Autoincrement (e.g. for IDs)
'O': 'XXX', # Double, 8 bytes
'G': 'XXX', # OLE 10 digit/byte number of a .DBT block, stored as string
}
"""dict: A dictionary mapping field types in the DBF objects (keys) to the
corresponding generic SQLAlchemy Column types.
"""
# This is the set of tables which have been successfully integrated into PUDL:
ferc1_pudl_tables = (
'fuel_ferc1', # Plant-level data, linked to plants_steam_ferc1
'plants_steam_ferc1', # Plant-level data
'plants_small_ferc1', # Plant-level data
'plants_hydro_ferc1', # Plant-level data
'plants_pumped_storage_ferc1', # Plant-level data
'purchased_power_ferc1', # Inter-utility electricity transactions
'plant_in_service_ferc1', # Row-mapped plant accounting data.
# 'accumulated_depreciation_ferc1' # Requires row-mapping to be useful.
)
"""tuple: A tuple containing the FERC Form 1 tables that can be successfully
integrated into PUDL.
"""
table_map_ferc1_pudl = {
'fuel_ferc1': 'f1_fuel',
'plants_steam_ferc1': 'f1_steam',
'plants_small_ferc1': 'f1_gnrt_plant',
'plants_hydro_ferc1': 'f1_hydro',
'plants_pumped_storage_ferc1': 'f1_pumped_storage',
'plant_in_service_ferc1': 'f1_plant_in_srvce',
'purchased_power_ferc1': 'f1_purchased_pwr',
# 'accumulated_depreciation_ferc1': 'f1_accumdepr_prvsn'
}
"""dict: A dictionary mapping PUDL table names (keys) to the corresponding FERC
Form 1 DBF table names.
"""
# This is the list of EIA923 tables that can be successfully pulled into PUDL
eia923_pudl_tables = ('generation_fuel_eia923',
'boiler_fuel_eia923',
'generation_eia923',
'coalmine_eia923',
'fuel_receipts_costs_eia923')
"""tuple: A tuple containing the EIA923 tables that can be successfully
integrated into PUDL.
"""
epaipm_pudl_tables = (
'transmission_single_epaipm',
'transmission_joint_epaipm',
'load_curves_epaipm',
'plant_region_map_epaipm',
)
"""tuple: A tuple containing the EPA IPM tables that can be successfully
integrated into PUDL.
"""
# List of entity tables
entity_tables = ['utilities_entity_eia',
'plants_entity_eia',
'generators_entity_eia',
'boilers_entity_eia',
'regions_entity_epaipm', ]
"""list: A list of PUDL entity tables.
"""
xlsx_maps_pkg = 'pudl.package_data.meta.xlsx_maps'
"""string: The location of the xlsx maps within the PUDL package data."""
##############################################################################
# EIA 923 Spreadsheet Metadata
##############################################################################
##############################################################################
# EIA 860 Spreadsheet Metadata
##############################################################################
# This is the list of EIA860 tables that can be successfully pulled into PUDL
eia860_pudl_tables = (
'boiler_generator_assn_eia860',
'utilities_eia860',
'plants_eia860',
'generators_eia860',
'ownership_eia860'
)
"""tuple: A tuple enumerating EIA 860 tables for which PUDL's ETL works."""
# The set of FERC Form 1 tables that have the same composite primary keys: [
# respondent_id, report_year, report_prd, row_number, spplmnt_num ].
# TODO: THIS ONLY PERTAINS TO 2015 AND MAY NEED TO BE ADJUSTED BY YEAR...
ferc1_data_tables = (
'f1_acb_epda', 'f1_accumdepr_prvsn', 'f1_accumdfrrdtaxcr',
'f1_adit_190_detail', 'f1_adit_190_notes', 'f1_adit_amrt_prop',
'f1_adit_other', 'f1_adit_other_prop', 'f1_allowances', 'f1_bal_sheet_cr',
'f1_capital_stock', 'f1_cash_flow', 'f1_cmmn_utlty_p_e',
'f1_comp_balance_db', 'f1_construction', 'f1_control_respdnt',
'f1_co_directors', 'f1_cptl_stk_expns', 'f1_csscslc_pcsircs',
'f1_dacs_epda', 'f1_dscnt_cptl_stk', 'f1_edcfu_epda', 'f1_elctrc_erg_acct',
'f1_elctrc_oper_rev', 'f1_elc_oper_rev_nb', 'f1_elc_op_mnt_expn',
'f1_electric', 'f1_envrnmntl_expns', 'f1_envrnmntl_fclty', 'f1_fuel',
'f1_general_info', 'f1_gnrt_plant', 'f1_important_chg', 'f1_incm_stmnt_2',
'f1_income_stmnt', 'f1_miscgen_expnelc', 'f1_misc_dfrrd_dr',
'f1_mthly_peak_otpt', 'f1_mtrl_spply', 'f1_nbr_elc_deptemp',
'f1_nonutility_prop', 'f1_note_fin_stmnt', 'f1_nuclear_fuel',
'f1_officers_co', 'f1_othr_dfrrd_cr', 'f1_othr_pd_in_cptl',
'f1_othr_reg_assets', 'f1_othr_reg_liab', 'f1_overhead', 'f1_pccidica',
'f1_plant_in_srvce', 'f1_pumped_storage', 'f1_purchased_pwr',
'f1_reconrpt_netinc', 'f1_reg_comm_expn', 'f1_respdnt_control',
'f1_retained_erng', 'f1_r_d_demo_actvty', 'f1_sales_by_sched',
'f1_sale_for_resale', 'f1_sbsdry_totals', 'f1_schedules_list',
'f1_security_holder', 'f1_slry_wg_dstrbtn', 'f1_substations',
'f1_taxacc_ppchrgyr', 'f1_unrcvrd_cost', 'f1_utltyplnt_smmry', 'f1_work',
'f1_xmssn_adds', 'f1_xmssn_elc_bothr', 'f1_xmssn_elc_fothr',
'f1_xmssn_line', 'f1_xtraordnry_loss',
'f1_hydro', 'f1_steam', 'f1_leased', 'f1_sbsdry_detail',
'f1_plant', 'f1_long_term_debt', 'f1_106_2009', 'f1_106a_2009',
'f1_106b_2009', 'f1_208_elc_dep', 'f1_231_trn_stdycst', 'f1_324_elc_expns',
'f1_325_elc_cust', 'f1_331_transiso', 'f1_338_dep_depl',
'f1_397_isorto_stl', 'f1_398_ancl_ps', 'f1_399_mth_peak',
'f1_400_sys_peak', 'f1_400a_iso_peak', 'f1_429_trans_aff',
'f1_allowances_nox', 'f1_cmpinc_hedge_a', 'f1_cmpinc_hedge',
'f1_rg_trn_srv_rev')
"""tuple: A tuple containing the FERC Form 1 tables that have the same composite
primary keys: [respondent_id, report_year, report_prd, row_number,
spplmnt_num].
"""
# Line numbers, and corresponding FERC account number
# from FERC Form 1 pages 204-207, Electric Plant in Service.
# Descriptions from: https://www.law.cornell.edu/cfr/text/18/part-101
ferc_electric_plant_accounts = pd.DataFrame.from_records([
# 1. Intangible Plant
(2, '301', 'Intangible: Organization'),
(3, '302', 'Intangible: Franchises and consents'),
(4, '303', 'Intangible: Miscellaneous intangible plant'),
(5, 'subtotal_intangible', 'Subtotal: Intangible Plant'),
# 2. Production Plant
# A. steam production
(8, '310', 'Steam production: Land and land rights'),
(9, '311', 'Steam production: Structures and improvements'),
(10, '312', 'Steam production: Boiler plant equipment'),
(11, '313', 'Steam production: Engines and engine-driven generators'),
(12, '314', 'Steam production: Turbogenerator units'),
(13, '315', 'Steam production: Accessory electric equipment'),
(14, '316', 'Steam production: Miscellaneous power plant equipment'),
(15, '317', 'Steam production: Asset retirement costs for steam production\
plant'),
(16, 'subtotal_steam_production', 'Subtotal: Steam Production Plant'),
# B. nuclear production
(18, '320', 'Nuclear production: Land and land rights (Major only)'),
(19, '321', 'Nuclear production: Structures and improvements (Major\
only)'),
(20, '322', 'Nuclear production: Reactor plant equipment (Major only)'),
(21, '323', 'Nuclear production: Turbogenerator units (Major only)'),
(22, '324', 'Nuclear production: Accessory electric equipment (Major\
only)'),
(23, '325', 'Nuclear production: Miscellaneous power plant equipment\
(Major only)'),
(24, '326', 'Nuclear production: Asset retirement costs for nuclear\
production plant (Major only)'),
(25, 'subtotal_nuclear_produciton', 'Subtotal: Nuclear Production Plant'),
# C. hydraulic production
(27, '330', 'Hydraulic production: Land and land rights'),
(28, '331', 'Hydraulic production: Structures and improvements'),
(29, '332', 'Hydraulic production: Reservoirs, dams, and waterways'),
(30, '333', 'Hydraulic production: Water wheels, turbines and generators'),
(31, '334', 'Hydraulic production: Accessory electric equipment'),
(32, '335', 'Hydraulic production: Miscellaneous power plant equipment'),
(33, '336', 'Hydraulic production: Roads, railroads and bridges'),
(34, '337', 'Hydraulic production: Asset retirement costs for hydraulic\
production plant'),
(35, 'subtotal_hydraulic_production', 'Subtotal: Hydraulic Production\
Plant'),
# D. other production
(37, '340', 'Other production: Land and land rights'),
(38, '341', 'Other production: Structures and improvements'),
(39, '342', 'Other production: Fuel holders, producers, and accessories'),
(40, '343', 'Other production: Prime movers'),
(41, '344', 'Other production: Generators'),
(42, '345', 'Other production: Accessory electric equipment'),
(43, '346', 'Other production: Miscellaneous power plant equipment'),
(44, '347', 'Other production: Asset retirement costs for other production\
plant'),
(None, '348', 'Other production: Energy Storage Equipment'),
(45, 'subtotal_other_production', 'Subtotal: Other Production Plant'),
(46, 'subtotal_production', 'Subtotal: Production Plant'),
# 3. Transmission Plant,
(48, '350', 'Transmission: Land and land rights'),
(None, '351', 'Transmission: Energy Storage Equipment'),
(49, '352', 'Transmission: Structures and improvements'),
(50, '353', 'Transmission: Station equipment'),
(51, '354', 'Transmission: Towers and fixtures'),
(52, '355', 'Transmission: Poles and fixtures'),
(53, '356', 'Transmission: Overhead conductors and devices'),
(54, '357', 'Transmission: Underground conduit'),
(55, '358', 'Transmission: Underground conductors and devices'),
(56, '359', 'Transmission: Roads and trails'),
(57, '359.1', 'Transmission: Asset retirement costs for transmission\
plant'),
(58, 'subtotal_transmission', 'Subtotal: Transmission Plant'),
# 4. Distribution Plant
(60, '360', 'Distribution: Land and land rights'),
(61, '361', 'Distribution: Structures and improvements'),
(62, '362', 'Distribution: Station equipment'),
(63, '363', 'Distribution: Storage battery equipment'),
(64, '364', 'Distribution: Poles, towers and fixtures'),
(65, '365', 'Distribution: Overhead conductors and devices'),
(66, '366', 'Distribution: Underground conduit'),
(67, '367', 'Distribution: Underground conductors and devices'),
(68, '368', 'Distribution: Line transformers'),
(69, '369', 'Distribution: Services'),
(70, '370', 'Distribution: Meters'),
(71, '371', 'Distribution: Installations on customers\' premises'),
(72, '372', 'Distribution: Leased property on customers\' premises'),
(73, '373', 'Distribution: Street lighting and signal systems'),
(74, '374', 'Distribution: Asset retirement costs for distribution plant'),
(75, 'subtotal_distribution', 'Subtotal: Distribution Plant'),
# 5. Regional Transmission and Market Operation Plant
(77, '380', 'Regional transmission: Land and land rights'),
(78, '381', 'Regional transmission: Structures and improvements'),
(79, '382', 'Regional transmission: Computer hardware'),
(80, '383', 'Regional transmission: Computer software'),
(81, '384', 'Regional transmission: Communication Equipment'),
(82, '385', 'Regional transmission: Miscellaneous Regional Transmission\
and Market Operation Plant'),
(83, '386', 'Regional transmission: Asset Retirement Costs for Regional\
Transmission and Market Operation\
Plant'),
(84, 'subtotal_regional_transmission', 'Subtotal: Transmission and Market\
Operation Plant'),
(None, '387', 'Regional transmission: [Reserved]'),
# 6. General Plant
(86, '389', 'General: Land and land rights'),
(87, '390', 'General: Structures and improvements'),
(88, '391', 'General: Office furniture and equipment'),
(89, '392', 'General: Transportation equipment'),
(90, '393', 'General: Stores equipment'),
(91, '394', 'General: Tools, shop and garage equipment'),
(92, '395', 'General: Laboratory equipment'),
(93, '396', 'General: Power operated equipment'),
(94, '397', 'General: Communication equipment'),
(95, '398', 'General: Miscellaneous equipment'),
(96, 'subtotal_general', 'Subtotal: General Plant'),
(97, '399', 'General: Other tangible property'),
(98, '399.1', 'General: Asset retirement costs for general plant'),
(99, 'total_general', 'TOTAL General Plant'),
(100, '101_and_106', 'Electric plant in service (Major only)'),
(101, '102_purchased', 'Electric plant purchased'),
(102, '102_sold', 'Electric plant sold'),
(103, '103', 'Experimental plant unclassified'),
(104, 'total_electric_plant', 'TOTAL Electric Plant in Service')],
columns=['row_number', 'ferc_account_id', 'ferc_account_description'])
"""list: A list of tuples containing row numbers, FERC account IDs, and FERC
account descriptions from FERC Form 1 pages 204 - 207, Electric Plant in
Service.
"""
# Line numbers, and corresponding FERC account number
# from FERC Form 1 page 219, ACCUMULATED PROVISION FOR DEPRECIATION
# OF ELECTRIC UTILITY PLANT (Account 108).
ferc_accumulated_depreciation = pd.DataFrame.from_records([
# Section A. Balances and Changes During Year
(1, 'balance_beginning_of_year', 'Balance Beginning of Year'),
(3, 'depreciation_expense', '(403) Depreciation Expense'),
(4, 'depreciation_expense_asset_retirement', \
'(403.1) Depreciation Expense for Asset Retirement Costs'),
(5, 'expense_electric_plant_leased_to_others', \
'(413) Exp. of Elec. Plt. Leas. to Others'),
(6, 'transportation_expenses_clearing',\
'Transportation Expenses-Clearing'),
(7, 'other_clearing_accounts', 'Other Clearing Accounts'),
(8, 'other_accounts_specified',\
'Other Accounts (Specify, details in footnote):'),
# blank: might also be other charges like line 17.
(9, 'other_charges', 'Other Charges:'),
(10, 'total_depreciation_provision_for_year',\
'TOTAL Deprec. Prov for Year (Enter Total of lines 3 thru 9)'),
(11, 'net_charges_for_plant_retired', 'Net Charges for Plant Retired:'),
(12, 'book_cost_of_plant_retired', 'Book Cost of Plant Retired'),
(13, 'cost_of_removal', 'Cost of Removal'),
(14, 'salvage_credit', 'Salvage (Credit)'),
(15, 'total_net_charges_for_plant_retired',\
'TOTAL Net Chrgs. for Plant Ret. (Enter Total of lines 12 thru 14)'),
(16, 'other_debit_or_credit_items',\
'Other Debit or Cr. Items (Describe, details in footnote):'),
# blank: can be "Other Charges", e.g. in 2012 for PSCo.
(17, 'other_charges_2', 'Other Charges 2'),
(18, 'book_cost_or_asset_retirement_costs_retired',\
'Book Cost or Asset Retirement Costs Retired'),
(19, 'balance_end_of_year', \
'Balance End of Year (Enter Totals of lines 1, 10, 15, 16, and 18)'),
# Section B. Balances at End of Year According to Functional Classification
(20, 'steam_production_end_of_year', 'Steam Production'),
(21, 'nuclear_production_end_of_year', 'Nuclear Production'),
(22, 'hydraulic_production_end_of_year',\
'Hydraulic Production-Conventional'),
(23, 'pumped_storage_end_of_year', 'Hydraulic Production-Pumped Storage'),
(24, 'other_production', 'Other Production'),
(25, 'transmission', 'Transmission'),
(26, 'distribution', 'Distribution'),
(27, 'regional_transmission_and_market_operation',
'Regional Transmission and Market Operation'),
(28, 'general', 'General'),
(29, 'total', 'TOTAL (Enter Total of lines 20 thru 28)')],
columns=['row_number', 'line_id', 'ferc_account_description'])
"""list: A list of tuples containing row numbers, FERC account IDs, and FERC
account descriptions from FERC Form 1 page 219, Accumulated Provision for
Depreciation of electric utility plant(Account 108).
"""
######################################################################
# Constants from EIA From 923 used within init.py module
######################################################################
# From Page 7 of EIA Form 923, Census Region a US state is located in
census_region = {
'NEW': 'New England',
'MAT': 'Middle Atlantic',
'SAT': 'South Atlantic',
'ESC': 'East South Central',
'WSC': 'West South Central',
'ENC': 'East North Central',
'WNC': 'West North Central',
'MTN': 'Mountain',
'PACC': 'Pacific Contiguous (OR, WA, CA)',
'PACN': 'Pacific Non-Contiguous (AK, HI)',
}
"""dict: A dictionary mapping Census Region abbreviations (keys) to Census
Region names (values).
"""
# From Page 7 of EIA Form923
# Static list of NERC (North American Electric Reliability Corporation)
# regions, used for where plant is located
nerc_region = {
'NPCC': 'Northeast Power Coordinating Council',
'ASCC': 'Alaska Systems Coordinating Council',
'HICC': 'Hawaiian Islands Coordinating Council',
'MRO': 'Midwest Reliability Organization',
'SERC': 'SERC Reliability Corporation',
'RFC': 'Reliability First Corporation',
'SPP': 'Southwest Power Pool',
'TRE': 'Texas Regional Entity',
'FRCC': 'Florida Reliability Coordinating Council',
'WECC': 'Western Electricity Coordinating Council'
}
"""dict: A dictionary mapping NERC Region abbreviations (keys) to NERC
Region names (values).
"""
# From Page 7 of EIA Form 923 EIA’s internal consolidated NAICS sectors.
# For internal purposes, EIA consolidates NAICS categories into seven groups.
sector_eia = {
# traditional regulated electric utilities
'1': 'Electric Utility',
# Independent power producers which are not cogenerators
'2': 'NAICS-22 Non-Cogen',
# Independent power producers which are cogenerators, but whose
# primary business purpose is the sale of electricity to the public
'3': 'NAICS-22 Cogen',
# Commercial non-cogeneration facilities that produce electric power,
# are connected to the gird, and can sell power to the public
'4': 'Commercial NAICS Non-Cogen',
# Commercial cogeneration facilities that produce electric power, are
# connected to the grid, and can sell power to the public
'5': 'Commercial NAICS Cogen',
# Industrial non-cogeneration facilities that produce electric power, are
# connected to the gird, and can sell power to the public
'6': 'Industrial NAICS Non-Cogen',
# Industrial cogeneration facilities that produce electric power, are
# connected to the gird, and can sell power to the public
'7': 'Industrial NAICS Cogen'
}
"""dict: A dictionary mapping EIA numeric codes (keys) to EIA’s internal
consolidated NAICS sectors (values).
"""
# EIA 923: EIA Type of prime mover:
prime_movers_eia923 = {
'BA': 'Energy Storage, Battery',
'BT': 'Turbines Used in a Binary Cycle. Including those used for geothermal applications',
'CA': 'Combined-Cycle -- Steam Part',
'CC': 'Combined-Cycle, Total Unit',
'CE': 'Energy Storage, Compressed Air',
'CP': 'Energy Storage, Concentrated Solar Power',
'CS': 'Combined-Cycle Single-Shaft Combustion Turbine and Steam Turbine share of single',
'CT': 'Combined-Cycle Combustion Turbine Part',
'ES': 'Energy Storage, Other (Specify on Schedule 9, Comments)',
'FC': 'Fuel Cell',
'FW': 'Energy Storage, Flywheel',
'GT': 'Combustion (Gas) Turbine. Including Jet Engine design',
'HA': 'Hydrokinetic, Axial Flow Turbine',
'HB': 'Hydrokinetic, Wave Buoy',
'HK': 'Hydrokinetic, Other',
'HY': 'Hydraulic Turbine. Including turbines associated with delivery of water by pipeline.',
'IC': 'Internal Combustion (diesel, piston, reciprocating) Engine',
'PS': 'Energy Storage, Reversible Hydraulic Turbine (Pumped Storage)',
'OT': 'Other',
'ST': 'Steam Turbine. Including Nuclear, Geothermal, and Solar Steam (does not include Combined Cycle).',
'PV': 'Photovoltaic',
'WT': 'Wind Turbine, Onshore',
'WS': 'Wind Turbine, Offshore'
}
"""dict: A dictionary mapping EIA 923 prime mover codes (keys) and prime mover
names / descriptions (values).
"""
# EIA 923: The fuel code reported to EIA.Two or three letter alphanumeric:
fuel_type_eia923 = {
'AB': 'Agricultural By-Products',
'ANT': 'Anthracite Coal',
'BFG': 'Blast Furnace Gas',
'BIT': 'Bituminous Coal',
'BLQ': 'Black Liquor',
'CBL': 'Coal, Blended',
'DFO': 'Distillate Fuel Oil. Including diesel, No. 1, No. 2, and No. 4 fuel oils.',
'GEO': 'Geothermal',
'JF': 'Jet Fuel',
'KER': 'Kerosene',
'LFG': 'Landfill Gas',
'LIG': 'Lignite Coal',
'MSB': 'Biogenic Municipal Solid Waste',
'MSN': 'Non-biogenic Municipal Solid Waste',
'MSW': 'Municipal Solid Waste',
'MWH': 'Electricity used for energy storage',
'NG': 'Natural Gas',
'NUC': 'Nuclear. Including Uranium, Plutonium, and Thorium.',
'OBG': 'Other Biomass Gas. Including digester gas, methane, and other biomass gases.',
'OBL': 'Other Biomass Liquids',
'OBS': 'Other Biomass Solids',
'OG': 'Other Gas',
'OTH': 'Other Fuel',
'PC': 'Petroleum Coke',
'PG': 'Gaseous Propane',
'PUR': 'Purchased Steam',
'RC': 'Refined Coal',
'RFO': 'Residual Fuel Oil. Including No. 5 & 6 fuel oils and bunker C fuel oil.',
'SC': 'Coal-based Synfuel. Including briquettes, pellets, or extrusions, which are formed by binding materials or processes that recycle materials.',
'SGC': 'Coal-Derived Synthesis Gas',
'SGP': 'Synthesis Gas from Petroleum Coke',
'SLW': 'Sludge Waste',
'SUB': 'Subbituminous Coal',
'SUN': 'Solar',
'TDF': 'Tire-derived Fuels',
'WAT': 'Water at a Conventional Hydroelectric Turbine and water used in Wave Buoy Hydrokinetic Technology, current Hydrokinetic Technology, Tidal Hydrokinetic Technology, and Pumping Energy for Reversible (Pumped Storage) Hydroelectric Turbines.',
'WC': 'Waste/Other Coal. Including anthracite culm, bituminous gob, fine coal, lignite waste, waste coal.',
'WDL': 'Wood Waste Liquids, excluding Black Liquor. Including red liquor, sludge wood, spent sulfite liquor, and other wood-based liquids.',
'WDS': 'Wood/Wood Waste Solids. Including paper pellets, railroad ties, utility polies, wood chips, bark, and other wood waste solids.',
'WH': 'Waste Heat not directly attributed to a fuel source',
'WND': 'Wind',
'WO': 'Waste/Other Oil. Including crude oil, liquid butane, liquid propane, naphtha, oil waste, re-refined moto oil, sludge oil, tar oil, or other petroleum-based liquid wastes.'
}
"""dict: A dictionary mapping EIA 923 fuel type codes (keys) and fuel type
names / descriptions (values).
"""
# Fuel type strings for EIA 923 generator fuel table
fuel_type_eia923_gen_fuel_coal_strings = [
'ant', 'bit', 'cbl', 'lig', 'pc', 'rc', 'sc', 'sub', 'wc', ]
"""list: The list of EIA 923 Generation Fuel strings associated with coal fuel.
"""
fuel_type_eia923_gen_fuel_oil_strings = [
'dfo', 'rfo', 'wo', 'jf', 'ker', ]
"""list: The list of EIA 923 Generation Fuel strings associated with oil fuel.
"""
fuel_type_eia923_gen_fuel_gas_strings = [
'bfg', 'lfg', 'ng', 'og', 'obg', 'pg', 'sgc', 'sgp', ]
"""list: The list of EIA 923 Generation Fuel strings associated with gas fuel.
"""
fuel_type_eia923_gen_fuel_solar_strings = ['sun', ]
"""list: The list of EIA 923 Generation Fuel strings associated with solar
power.
"""
fuel_type_eia923_gen_fuel_wind_strings = ['wnd', ]
"""list: The list of EIA 923 Generation Fuel strings associated with wind
power.
"""
fuel_type_eia923_gen_fuel_hydro_strings = ['wat', ]
"""list: The list of EIA 923 Generation Fuel strings associated with hydro
power.
"""
fuel_type_eia923_gen_fuel_nuclear_strings = ['nuc', ]
"""list: The list of EIA 923 Generation Fuel strings associated with nuclear
power.
"""
fuel_type_eia923_gen_fuel_waste_strings = [
'ab', 'blq', 'msb', 'msn', 'msw', 'obl', 'obs', 'slw', 'tdf', 'wdl', 'wds']
"""list: The list of EIA 923 Generation Fuel strings associated with solid waste
fuel.
"""
fuel_type_eia923_gen_fuel_other_strings = ['geo', 'mwh', 'oth', 'pur', 'wh', ]
"""list: The list of EIA 923 Generation Fuel strings associated with geothermal
power.
"""
fuel_type_eia923_gen_fuel_simple_map = {
'coal': fuel_type_eia923_gen_fuel_coal_strings,
'oil': fuel_type_eia923_gen_fuel_oil_strings,
'gas': fuel_type_eia923_gen_fuel_gas_strings,
'solar': fuel_type_eia923_gen_fuel_solar_strings,
'wind': fuel_type_eia923_gen_fuel_wind_strings,
'hydro': fuel_type_eia923_gen_fuel_hydro_strings,
'nuclear': fuel_type_eia923_gen_fuel_nuclear_strings,
'waste': fuel_type_eia923_gen_fuel_waste_strings,
'other': fuel_type_eia923_gen_fuel_other_strings,
}
"""dict: A dictionary mapping EIA 923 Generation Fuel fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# Fuel type strings for EIA 923 boiler fuel table
fuel_type_eia923_boiler_fuel_coal_strings = [
'ant', 'bit', 'lig', 'pc', 'rc', 'sc', 'sub', 'wc', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
coal.
"""
fuel_type_eia923_boiler_fuel_oil_strings = ['dfo', 'rfo', 'wo', 'jf', 'ker', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
oil.
"""
fuel_type_eia923_boiler_fuel_gas_strings = [
'bfg', 'lfg', 'ng', 'og', 'obg', 'pg', 'sgc', 'sgp', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
gas.
"""
fuel_type_eia923_boiler_fuel_waste_strings = [
'ab', 'blq', 'msb', 'msn', 'obl', 'obs', 'slw', 'tdf', 'wdl', 'wds', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
waste.
"""
fuel_type_eia923_boiler_fuel_other_strings = ['oth', 'pur', 'wh', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
other.
"""
fuel_type_eia923_boiler_fuel_simple_map = {
'coal': fuel_type_eia923_boiler_fuel_coal_strings,
'oil': fuel_type_eia923_boiler_fuel_oil_strings,
'gas': fuel_type_eia923_boiler_fuel_gas_strings,
'waste': fuel_type_eia923_boiler_fuel_waste_strings,
'other': fuel_type_eia923_boiler_fuel_other_strings,
}
"""dict: A dictionary mapping EIA 923 Boiler Fuel fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# PUDL consolidation of EIA923 AER fuel type strings into same categories as
# 'energy_source_eia923' plus additional renewable and nuclear categories.
# These classifications are not currently used, as the EIA fuel type and energy
# source designations provide more detailed information.
aer_coal_strings = ['col', 'woc', 'pc']
"""list: A list of EIA 923 AER fuel type strings associated with coal.
"""
aer_gas_strings = ['mlg', 'ng', 'oog']
"""list: A list of EIA 923 AER fuel type strings associated with gas.
"""
aer_oil_strings = ['dfo', 'rfo', 'woo']
"""list: A list of EIA 923 AER fuel type strings associated with oil.
"""
aer_solar_strings = ['sun']
"""list: A list of EIA 923 AER fuel type strings associated with solar power.
"""
aer_wind_strings = ['wnd']
"""list: A list of EIA 923 AER fuel type strings associated with wind power.
"""
aer_hydro_strings = ['hps', 'hyc']
"""list: A list of EIA 923 AER fuel type strings associated with hydro power.
"""
aer_nuclear_strings = ['nuc']
"""list: A list of EIA 923 AER fuel type strings associated with nuclear power.
"""
aer_waste_strings = ['www']
"""list: A list of EIA 923 AER fuel type strings associated with waste.
"""
aer_other_strings = ['geo', 'orw', 'oth']
"""list: A list of EIA 923 AER fuel type strings associated with other fuel.
"""
aer_fuel_type_strings = {
'coal': aer_coal_strings,
'gas': aer_gas_strings,
'oil': aer_oil_strings,
'solar': aer_solar_strings,
'wind': aer_wind_strings,
'hydro': aer_hydro_strings,
'nuclear': aer_nuclear_strings,
'waste': aer_waste_strings,
'other': aer_other_strings
}
"""dict: A dictionary mapping EIA 923 AER fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# EIA 923: A partial aggregation of the reported fuel type codes into
# larger categories used by EIA in, for example,
# the Annual Energy Review (AER).Two or three letter alphanumeric.
# See the Fuel Code table (Table 5), below:
fuel_type_aer_eia923 = {
'SUN': 'Solar PV and thermal',
'COL': 'Coal',
'DFO': 'Distillate Petroleum',
'GEO': 'Geothermal',
'HPS': 'Hydroelectric Pumped Storage',
'HYC': 'Hydroelectric Conventional',
'MLG': 'Biogenic Municipal Solid Waste and Landfill Gas',
'NG': 'Natural Gas',
'NUC': 'Nuclear',
'OOG': 'Other Gases',
'ORW': 'Other Renewables',
'OTH': 'Other (including nonbiogenic MSW)',
'PC': 'Petroleum Coke',
'RFO': 'Residual Petroleum',
'WND': 'Wind',
'WOC': 'Waste Coal',
'WOO': 'Waste Oil',
'WWW': 'Wood and Wood Waste'
}
"""dict: A dictionary mapping EIA 923 AER fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
fuel_type_eia860_coal_strings = ['ant', 'bit', 'cbl', 'lig', 'pc', 'rc', 'sc',
'sub', 'wc', 'coal', 'petroleum coke', 'col',
'woc']
"""list: A list of strings from EIA 860 associated with fuel type coal.
"""
fuel_type_eia860_oil_strings = ['dfo', 'jf', 'ker', 'rfo', 'wo', 'woo',
'petroleum']
"""list: A list of strings from EIA 860 associated with fuel type oil.
"""
fuel_type_eia860_gas_strings = ['bfg', 'lfg', 'mlg', 'ng', 'obg', 'og', 'pg',
'sgc', 'sgp', 'natural gas', 'other gas',
'oog', 'sg']
"""list: A list of strings from EIA 860 associated with fuel type gas.
"""
fuel_type_eia860_solar_strings = ['sun', 'solar']
"""list: A list of strings from EIA 860 associated with solar power.
"""
fuel_type_eia860_wind_strings = ['wnd', 'wind', 'wt']
"""list: A list of strings from EIA 860 associated with wind power.
"""
fuel_type_eia860_hydro_strings = ['wat', 'hyc', 'hps', 'hydro']
"""list: A list of strings from EIA 860 associated with hydro power.
"""
fuel_type_eia860_nuclear_strings = ['nuc', 'nuclear']
"""list: A list of strings from EIA 860 associated with nuclear power.
"""
fuel_type_eia860_waste_strings = ['ab', 'blq', 'bm', 'msb', 'msn', 'obl',
'obs', 'slw', 'tdf', 'wdl', 'wds', 'biomass',
'msw', 'www']
"""list: A list of strings from EIA 860 associated with fuel type waste.
"""
fuel_type_eia860_other_strings = ['mwh', 'oth', 'pur', 'wh', 'geo', 'none',
'orw', 'other']
"""list: A list of strings from EIA 860 associated with fuel type other.
"""
fuel_type_eia860_simple_map = {
'coal': fuel_type_eia860_coal_strings,
'oil': fuel_type_eia860_oil_strings,
'gas': fuel_type_eia860_gas_strings,
'solar': fuel_type_eia860_solar_strings,
'wind': fuel_type_eia860_wind_strings,
'hydro': fuel_type_eia860_hydro_strings,
'nuclear': fuel_type_eia860_nuclear_strings,
'waste': fuel_type_eia860_waste_strings,
'other': fuel_type_eia860_other_strings,
}
"""dict: A dictionary mapping EIA 860 fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# EIA 923/860: Lumping of energy source categories.
energy_source_eia_simple_map = {
'coal': ['ANT', 'BIT', 'LIG', 'PC', 'SUB', 'WC', 'RC'],
'oil': ['DFO', 'JF', 'KER', 'RFO', 'WO'],
'gas': ['BFG', 'LFG', 'NG', 'OBG', 'OG', 'PG', 'SG', 'SGC', 'SGP'],
'solar': ['SUN'],
'wind': ['WND'],
'hydro': ['WAT'],
'nuclear': ['NUC'],
'waste': ['AB', 'BLQ', 'MSW', 'OBL', 'OBS', 'SLW', 'TDF', 'WDL', 'WDS'],
'other': ['GEO', 'MWH', 'OTH', 'PUR', 'WH']
}
"""dict: A dictionary mapping EIA fuel types (keys) to fuel codes (values).
"""
fuel_group_eia923_simple_map = {
'coal': ['coal', 'petroleum coke'],
'oil': ['petroleum'],
'gas': ['natural gas', 'other gas']
}
"""dict: A dictionary mapping EIA 923 simple fuel types("oil", "coal", "gas")
(keys) to fuel types (values).
"""
# EIA 923: The type of physical units fuel consumption is reported in.
# All consumption is reported in either short tons for solids,
# thousands of cubic feet for gases, and barrels for liquids.
fuel_units_eia923 = {
'mcf': 'Thousands of cubic feet (for gases)',
'short_tons': 'Short tons (for solids)',
'barrels': 'Barrels (for liquids)'
}
"""dict: A dictionary mapping EIA 923 fuel units (keys) to fuel unit
descriptions (values).
"""
# EIA 923: Designates the purchase type under which receipts occurred
# in the reporting month. One or two character alphanumeric:
contract_type_eia923 = {
'C': 'Contract - Fuel received under a purchase order or contract with a term of one year or longer. Contracts with a shorter term are considered spot purchases ',
'NC': 'New Contract - Fuel received under a purchase order or contract with duration of one year or longer, under which deliveries were first made during the reporting month',
'N': 'New Contract - see NC code. This abbreviation existed only in 2008 before being replaced by NC.',
'S': 'Spot Purchase',
'T': 'Tolling Agreement – Fuel received under a tolling agreement (bartering arrangement of fuel for generation)'
}
"""dict: A dictionary mapping EIA 923 contract codes (keys) to contract
descriptions (values) for each month in the Fuel Receipts and Costs table.
"""
# EIA 923: The fuel code associated with the fuel receipt.
# Defined on Page 7 of EIA Form 923
# Two or three character alphanumeric:
energy_source_eia923 = {
'ANT': 'Anthracite Coal',
'BFG': 'Blast Furnace Gas',
'BM': 'Biomass',
'BIT': 'Bituminous Coal',
'DFO': 'Distillate Fuel Oil. Including diesel, No. 1, No. 2, and No. 4 fuel oils.',
'JF': 'Jet Fuel',
'KER': 'Kerosene',
'LIG': 'Lignite Coal',
'NG': 'Natural Gas',
'PC': 'Petroleum Coke',
'PG': 'Gaseous Propone',
'OG': 'Other Gas',
'RC': 'Refined Coal',
'RFO': 'Residual Fuel Oil. Including No. 5 & 6 fuel oils and bunker C fuel oil.',
'SG': 'Synthesis Gas from Petroleum Coke',
'SGP': 'Petroleum Coke Derived Synthesis Gas',
'SC': 'Coal-based Synfuel. Including briquettes, pellets, or extrusions, which are formed by binding materials or processes that recycle materials.',
'SUB': 'Subbituminous Coal',
'WC': 'Waste/Other Coal. Including anthracite culm, bituminous gob, fine coal, lignite waste, waste coal.',
'WO': 'Waste/Other Oil. Including crude oil, liquid butane, liquid propane, naphtha, oil waste, re-refined moto oil, sludge oil, tar oil, or other petroleum-based liquid wastes.',
}
"""dict: A dictionary mapping fuel codes (keys) to fuel descriptions (values)
for each fuel receipt from the EIA 923 Fuel Receipts and Costs table.
"""
# EIA 923 Fuel Group, from Page 7 EIA Form 923
# Groups fossil fuel energy sources into fuel groups that are located in the
# Electric Power Monthly: Coal, Natural Gas, Petroleum, Petroleum Coke.
fuel_group_eia923 = (
'coal',
'natural_gas',
'petroleum',
'petroleum_coke',
'other_gas'
)
"""tuple: A tuple containing EIA 923 fuel groups.
"""
# EIA 923: Type of Coal Mine as defined on Page 7 of EIA Form 923
coalmine_type_eia923 = {
'P': 'Preparation Plant',
'S': 'Surface',
'U': 'Underground',
'US': 'Both an underground and surface mine with most coal extracted from underground',
'SU': 'Both an underground and surface mine with most coal extracted from surface',
}
"""dict: A dictionary mapping EIA 923 coal mine type codes (keys) to
descriptions (values).
"""
# EIA 923: State abbreviation related to coal mine location.
# Country abbreviations are also used in this category, but they are
# non-standard because of collisions with US state names. Instead of using
# the provided non-standard names, we convert to ISO-3166-1 three letter
# country codes https://en.wikipedia.org/wiki/ISO_3166-1_alpha-3
coalmine_country_eia923 = {
'AU': 'AUS', # Australia
'CL': 'COL', # Colombia
'CN': 'CAN', # Canada
'IS': 'IDN', # Indonesia
'PL': 'POL', # Poland
'RS': 'RUS', # Russia
'UK': 'GBR', # United Kingdom of Great Britain
'VZ': 'VEN', # Venezuela
'OT': 'other_country',
'IM': 'unknown'
}
"""dict: A dictionary mapping coal mine country codes (keys) to ISO-3166-1 three
letter country codes (values).
"""
# EIA 923: Mode for the longest / second longest distance.
transport_modes_eia923 = {
'RR': 'Rail: Shipments of fuel moved to consumers by rail \
(private or public/commercial). Included is coal hauled to or \
away from a railroad siding by truck if the truck did not use public\
roads.',
'RV': 'River: Shipments of fuel moved to consumers via river by barge. \
Not included are shipments to Great Lakes coal loading docks, \
tidewater piers, or coastal ports.',
'GL': 'Great Lakes: Shipments of coal moved to consumers via \
the Great Lakes. These shipments are moved via the Great Lakes \
coal loading docks, which are identified by name and location as \
follows: Conneaut Coal Storage & Transfer, Conneaut, Ohio; \
NS Coal Dock (Ashtabula Coal Dock), Ashtabula, Ohio; \
Sandusky Coal Pier, Sandusky, Ohio; Toledo Docks, Toledo, Ohio; \
KCBX Terminals Inc., Chicago, Illinois; \
Superior Midwest Energy Terminal, Superior, Wisconsin',
'TP': 'Tidewater Piers and Coastal Ports: Shipments of coal moved to \
Tidewater Piers and Coastal Ports for further shipments to consumers \
via coastal water or ocean. The Tidewater Piers and Coastal Ports \
are identified by name and location as follows: Dominion Terminal \
Associates, Newport News, Virginia; McDuffie Coal Terminal, Mobile, \
Alabama; IC Railmarine Terminal, Convent, Louisiana; \
International Marine Terminals, Myrtle Grove, Louisiana; \
Cooper/T. Smith Stevedoring Co. Inc., Darrow, Louisiana; \
Seward Terminal Inc., Seward, Alaska; Los Angeles Export Terminal, \
Inc., Los Angeles, California; Levin-Richmond Terminal Corp., \
Richmond, California; Baltimore Terminal, Baltimore, Maryland; \
Norfolk Southern Lamberts Point P-6, Norfolk, Virginia; \
Chesapeake Bay Piers, Baltimore, Maryland; Pier IX Terminal Company, \
Newport News, Virginia; Electro-Coal Transport Corp., Davant, \
Louisiana',
'WT': 'Water: Shipments of fuel moved to consumers by other waterways.',
'TR': 'Truck: Shipments of fuel moved to consumers by truck. \
Not included is fuel hauled to or away from a railroad siding by \
truck on non-public roads.',
'tr': 'Truck: Shipments of fuel moved to consumers by truck. \
Not included is fuel hauled to or away from a railroad siding by \
truck on non-public roads.',
'TC': 'Tramway/Conveyor: Shipments of fuel moved to consumers \
by tramway or conveyor.',
'SP': 'Slurry Pipeline: Shipments of coal moved to consumers \
by slurry pipeline.',
'PL': 'Pipeline: Shipments of fuel moved to consumers by pipeline'
}
"""dict: A dictionary mapping primary and secondary transportation mode codes
(keys) to descriptions (values).
"""
# we need to include all of the columns which we want to keep for either the
# entity or annual tables. The order here matters. We need to harvest the plant
# location before harvesting the location of the utilites for example.
entities = {
'plants': [
# base cols
['plant_id_eia'],
# static cols
['balancing_authority_code_eia', 'balancing_authority_name_eia',
'city', 'county', 'ferc_cogen_status',
'ferc_exempt_wholesale_generator', 'ferc_small_power_producer',
'grid_voltage_2_kv', 'grid_voltage_3_kv', 'grid_voltage_kv',
'iso_rto_code', 'latitude', 'longitude', 'service_area',
'plant_name_eia', 'primary_purpose_naics_id',
'sector_id', 'sector_name', 'state', 'street_address', 'zip_code'],
# annual cols
['ash_impoundment', 'ash_impoundment_lined', 'ash_impoundment_status',
'datum', 'energy_storage', 'ferc_cogen_docket_no', 'water_source',
'ferc_exempt_wholesale_generator_docket_no',
'ferc_small_power_producer_docket_no',
'liquefied_natural_gas_storage',
'natural_gas_local_distribution_company', 'natural_gas_storage',
'natural_gas_pipeline_name_1', 'natural_gas_pipeline_name_2',
'natural_gas_pipeline_name_3', 'nerc_region', 'net_metering',
'pipeline_notes', 'regulatory_status_code',
'transmission_distribution_owner_id',
'transmission_distribution_owner_name',
'transmission_distribution_owner_state', 'utility_id_eia'],
# need type fixing
{},
],
'generators': [
# base cols
['plant_id_eia', 'generator_id'],
# static cols
['prime_mover_code', 'duct_burners', 'operating_date',
'topping_bottoming_code', 'solid_fuel_gasification',
'pulverized_coal_tech', 'fluidized_bed_tech', 'subcritical_tech',
'supercritical_tech', 'ultrasupercritical_tech', 'stoker_tech',
'other_combustion_tech', 'bypass_heat_recovery',
'rto_iso_lmp_node_id', 'rto_iso_location_wholesale_reporting_id',
'associated_combined_heat_power', 'original_planned_operating_date',
'operating_switch', 'previously_canceled'],
# annual cols
['capacity_mw', 'fuel_type_code_pudl', 'multiple_fuels',
'ownership_code', 'owned_by_non_utility', 'deliver_power_transgrid',
'summer_capacity_mw', 'winter_capacity_mw', 'summer_capacity_estimate',
'winter_capacity_estimate', 'minimum_load_mw', 'distributed_generation',
'technology_description', 'reactive_power_output_mvar',
'energy_source_code_1', 'energy_source_code_2',
'energy_source_code_3', 'energy_source_code_4',
'energy_source_code_5', 'energy_source_code_6',
'energy_source_1_transport_1', 'energy_source_1_transport_2',
'energy_source_1_transport_3', 'energy_source_2_transport_1',
'energy_source_2_transport_2', 'energy_source_2_transport_3',
'startup_source_code_1', 'startup_source_code_2',
'startup_source_code_3', 'startup_source_code_4',
'time_cold_shutdown_full_load_code', 'syncronized_transmission_grid',
'turbines_num', 'operational_status_code', 'operational_status',
'planned_modifications', 'planned_net_summer_capacity_uprate_mw',
'planned_net_winter_capacity_uprate_mw', 'planned_new_capacity_mw',
'planned_uprate_date', 'planned_net_summer_capacity_derate_mw',
'planned_net_winter_capacity_derate_mw', 'planned_derate_date',
'planned_new_prime_mover_code', 'planned_energy_source_code_1',
'planned_repower_date', 'other_planned_modifications',
'other_modifications_date', 'planned_retirement_date',
'carbon_capture', 'cofire_fuels', 'switch_oil_gas',
'turbines_inverters_hydrokinetics', 'nameplate_power_factor',
'uprate_derate_during_year', 'uprate_derate_completed_date',
'current_planned_operating_date', 'summer_estimated_capability_mw',
'winter_estimated_capability_mw', 'retirement_date',
'utility_id_eia', 'data_source'],
# need type fixing
{}
],
# utilities must come after plants. plant location needs to be
# removed before the utility locations are compiled
'utilities': [
# base cols
['utility_id_eia'],
# static cols
['utility_name_eia'],
# annual cols
['street_address', 'city', 'state', 'zip_code', 'entity_type',
'plants_reported_owner', 'plants_reported_operator',
'plants_reported_asset_manager', 'plants_reported_other_relationship',
'attention_line', 'address_2', 'zip_code_4',
'contact_firstname', 'contact_lastname', 'contact_title',
'contact_firstname_2', 'contact_lastname_2', 'contact_title_2',
'phone_extension_1', 'phone_extension_2', 'phone_number_1',
'phone_number_2'],
# need type fixing
{'utility_id_eia': 'int64', }, ],
'boilers': [
# base cols
['plant_id_eia', 'boiler_id'],
# static cols
['prime_mover_code'],
# annual cols
[],
# need type fixing
{},
]
}
"""dict: A dictionary containing table name strings (keys) and lists of columns
to keep for those tables (values).
"""
epacems_tables = ("hourly_emissions_epacems")
"""tuple: A tuple containing tables of EPA CEMS data to pull into PUDL.
"""
files_dict_epaipm = {
'transmission_single_epaipm': '*table_3-21*',
'transmission_joint_epaipm': '*transmission_joint_ipm*',
'load_curves_epaipm': '*table_2-2_*',
'plant_region_map_epaipm': '*needs_v6*',
}
"""dict: A dictionary of EPA IPM tables and strings that files of those tables
contain.
"""
epaipm_url_ext = {
'transmission_single_epaipm': 'table_3-21_annual_transmission_capabilities_of_u.s._model_regions_in_epa_platform_v6_-_2021.xlsx',
'load_curves_epaipm': 'table_2-2_load_duration_curves_used_in_epa_platform_v6.xlsx',
'plant_region_map_epaipm': 'needs_v6_november_2018_reference_case_0.xlsx',
}
"""dict: A dictionary of EPA IPM tables and associated URLs extensions for
downloading that table's data.
"""
epaipm_region_names = [
'ERC_PHDL', 'ERC_REST', 'ERC_FRNT', 'ERC_GWAY', 'ERC_WEST',
'FRCC', 'NENG_CT', 'NENGREST', 'NENG_ME', 'MIS_AR', 'MIS_IL',
'MIS_INKY', 'MIS_IA', 'MIS_MIDA', 'MIS_LA', 'MIS_LMI', 'MIS_MNWI',
'MIS_D_MS', 'MIS_MO', 'MIS_MAPP', 'MIS_AMSO', 'MIS_WOTA',
'MIS_WUMS', 'NY_Z_A', 'NY_Z_B', 'NY_Z_C&E', 'NY_Z_D', 'NY_Z_F',
'NY_Z_G-I', 'NY_Z_J', 'NY_Z_K', 'PJM_West', 'PJM_AP', 'PJM_ATSI',
'PJM_COMD', 'PJM_Dom', 'PJM_EMAC', 'PJM_PENE', 'PJM_SMAC',
'PJM_WMAC', 'S_C_KY', 'S_C_TVA', 'S_D_AECI', 'S_SOU', 'S_VACA',
'SPP_NEBR', 'SPP_N', 'SPP_SPS', 'SPP_WEST', 'SPP_KIAM', 'SPP_WAUE',
'WECC_AZ', 'WEC_BANC', 'WECC_CO', 'WECC_ID', 'WECC_IID',
'WEC_LADW', 'WECC_MT', 'WECC_NM', 'WEC_CALN', 'WECC_NNV',
'WECC_PNW', 'WEC_SDGE', 'WECC_SCE', 'WECC_SNV', 'WECC_UT',
'WECC_WY', 'CN_AB', 'CN_BC', 'CN_NL', 'CN_MB', 'CN_NB', 'CN_NF',
'CN_NS', 'CN_ON', 'CN_PE', 'CN_PQ', 'CN_SK',
]
"""list: A list of EPA IPM region names."""
epaipm_region_aggregations = {
'PJM': [
'PJM_AP', 'PJM_ATSI', 'PJM_COMD', 'PJM_Dom',
'PJM_EMAC', 'PJM_PENE', 'PJM_SMAC', 'PJM_WMAC'
],
'NYISO': [
'NY_Z_A', 'NY_Z_B', 'NY_Z_C&E', 'NY_Z_D',
'NY_Z_F', 'NY_Z_G-I', 'NY_Z_J', 'NY_Z_K'
],
'ISONE': ['NENG_CT', 'NENGREST', 'NENG_ME'],
'MISO': [
'MIS_AR', 'MIS_IL', 'MIS_INKY', 'MIS_IA',
'MIS_MIDA', 'MIS_LA', 'MIS_LMI', 'MIS_MNWI', 'MIS_D_MS',
'MIS_MO', 'MIS_MAPP', 'MIS_AMSO', 'MIS_WOTA', 'MIS_WUMS'
],
'SPP': [
'SPP_NEBR', 'SPP_N', 'SPP_SPS', 'SPP_WEST', 'SPP_KIAM', 'SPP_WAUE'
],
'WECC_NW': [
'WECC_CO', 'WECC_ID', 'WECC_MT', 'WECC_NNV',
'WECC_PNW', 'WECC_UT', 'WECC_WY'
]
}
"""
dict: A dictionary containing EPA IPM regions (keys) and lists of their
associated abbreviations (values).
"""
epaipm_rename_dict = {
'transmission_single_epaipm': {
'From': 'region_from',
'To': 'region_to',
'Capacity TTC (MW)': 'firm_ttc_mw',
'Energy TTC (MW)': 'nonfirm_ttc_mw',
'Transmission Tariff (2016 mills/kWh)': 'tariff_mills_kwh',
},
'load_curves_epaipm': {
'day': 'day_of_year',
'region': 'region_id_epaipm',
},
'plant_region_map_epaipm': {
'ORIS Plant Code': 'plant_id_eia',
'Region Name': 'region',
},
}
glue_pudl_tables = ('plants_eia', 'plants_ferc', 'plants', 'utilities_eia',
'utilities_ferc', 'utilities', 'utility_plant_assn')
"""
dict: A dictionary of dictionaries containing EPA IPM tables (keys) and items
for each table to be renamed along with the replacement name (values).
"""
data_sources = (
'eia860',
'eia861',
'eia923',
'epacems',
'epaipm',
'ferc1',
'ferc714',
# 'pudl'
)
"""tuple: A tuple containing the data sources we are able to pull into PUDL."""
# All the years for which we ought to be able to download these data sources
data_years = {
'eia860': tuple(range(2001, 2020)),
'eia861': tuple(range(1990, 2020)),
'eia923': tuple(range(2001, 2020)),
'epacems': tuple(range(1995, 2021)),
'epaipm': (None, ),
'ferc1': tuple(range(1994, 2020)),
'ferc714': (None, ),
}
"""
dict: A dictionary of data sources (keys) and tuples containing the years
that we expect to be able to download for each data source (values).
"""
# The full set of years we currently expect to be able to ingest, per source:
working_partitions = {
'eia860': {
'years': tuple(range(2004, 2020))
},
'eia860m': {
'year_month': '2020-11'
},
'eia861': {
'years': tuple(range(2001, 2020))
},
'eia923': {
'years': tuple(range(2001, 2020))
},
'epacems': {
'years': tuple(range(1995, 2021)),
'states': tuple(cems_states.keys())},
'ferc1': {
'years': tuple(range(1994, 2020))
},
'ferc714': {},
}
"""
dict: A dictionary of data sources (keys) and dictionaries (values) of names of
partition type (sub-key) and paritions (sub-value) containing the paritions
such as tuples of years for each data source that are able to be ingested
into PUDL.
"""
pudl_tables = {
'eia860': eia860_pudl_tables,
'eia861': (
"service_territory_eia861",
"balancing_authority_eia861",
"sales_eia861",
"advanced_metering_infrastructure_eia861",
"demand_response_eia861",
"demand_side_management_eia861",
"distributed_generation_eia861",
"distribution_systems_eia861",
"dynamic_pricing_eia861",
"energy_efficiency_eia861",
"green_pricing_eia861",
"mergers_eia861",
"net_metering_eia861",
"non_net_metering_eia861",
"operational_data_eia861",
"reliability_eia861",
"utility_data_eia861",
),
'eia923': eia923_pudl_tables,
'epacems': epacems_tables,
'epaipm': epaipm_pudl_tables,
'ferc1': ferc1_pudl_tables,
'ferc714': (
"respondent_id_ferc714",
"id_certification_ferc714",
"gen_plants_ba_ferc714",
"demand_monthly_ba_ferc714",
"net_energy_load_ba_ferc714",
"adjacency_ba_ferc714",
"interchange_ba_ferc714",
"lambda_hourly_ba_ferc714",
"lambda_description_ferc714",
"description_pa_ferc714",
"demand_forecast_pa_ferc714",
"demand_hourly_pa_ferc714",
),
'glue': glue_pudl_tables,
}
"""
dict: A dictionary containing data sources (keys) and the list of associated
tables from that datasource that can be pulled into PUDL (values).
"""
base_data_urls = {
'eia860': 'https://www.eia.gov/electricity/data/eia860',
'eia861': 'https://www.eia.gov/electricity/data/eia861/zip',
'eia923': 'https://www.eia.gov/electricity/data/eia923',
'epacems': 'ftp://newftp.epa.gov/dmdnload/emissions/hourly/monthly',
'ferc1': 'ftp://eforms1.ferc.gov/f1allyears',
'ferc714': 'https://www.ferc.gov/docs-filing/forms/form-714/data',
'ferceqr': 'ftp://eqrdownload.ferc.gov/DownloadRepositoryProd/BulkNew/CSV',
'msha': 'https://arlweb.msha.gov/OpenGovernmentData/DataSets',
'epaipm': 'https://www.epa.gov/sites/production/files/2019-03',
'pudl': 'https://catalyst.coop/pudl/'
}
"""
dict: A dictionary containing data sources (keys) and their base data URLs
(values).
"""
need_fix_inting = {
'plants_steam_ferc1': ('construction_year', 'installation_year'),
'plants_small_ferc1': ('construction_year', 'ferc_license_id'),
'plants_hydro_ferc1': ('construction_year', 'installation_year',),
'plants_pumped_storage_ferc1': ('construction_year', 'installation_year',),
'hourly_emissions_epacems': ('facility_id', 'unit_id_epa',),
}
"""
dict: A dictionary containing tables (keys) and column names (values)
containing integer - type columns whose null values need fixing.
"""
contributors = {
"catalyst-cooperative": {
"title": "Catalyst Cooperative",
"path": "https://catalyst.coop/",
"role": "publisher",
"email": "<EMAIL>",
"organization": "Catalyst Cooperative",
},
"zane-selvans": {
"title": "<NAME>",
"email": "<EMAIL>",
"path": "https://amateurearthling.org/",
"role": "wrangler",
"organization": "Catalyst Cooperative"
},
"christina-gosnell": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"steven-winter": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"alana-wilson": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"karl-dunkle-werner": {
"title": "<NAME>",
"email": "<EMAIL>",
"path": "https://karldw.org/",
"role": "contributor",
"organization": "UC Berkeley",
},
'greg-schivley': {
"title": "<NAME>",
"role": "contributor",
},
}
"""
dict: A dictionary of dictionaries containing organization names (keys) and
their attributes (values).
"""
data_source_info = {
"eia860": {
"title": "EIA Form 860",
"path": "https://www.eia.gov/electricity/data/eia860/",
},
"eia861": {
"title": "EIA Form 861",
"path": "https://www.eia.gov/electricity/data/eia861/",
},
"eia923": {
"title": "EIA Form 923",
"path": "https://www.eia.gov/electricity/data/eia923/",
},
"eiawater": {
"title": "EIA Water Use for Power",
"path": "https://www.eia.gov/electricity/data/water/",
},
"epacems": {
"title": "EPA Air Markets Program Data",
"path": "https://ampd.epa.gov/ampd/",
},
"epaipm": {
"title": "EPA Integrated Planning Model",
"path": "https://www.epa.gov/airmarkets/national-electric-energy-data-system-needs-v6",
},
"ferc1": {
"title": "FERC Form 1",
"path": "https://www.ferc.gov/docs-filing/forms/form-1/data.asp",
},
"ferc714": {
"title": "FERC Form 714",
"path": "https://www.ferc.gov/docs-filing/forms/form-714/data.asp",
},
"ferceqr": {
"title": "FERC Electric Quarterly Report",
"path": "https://www.ferc.gov/docs-filing/eqr.asp",
},
"msha": {
"title": "Mining Safety and Health Administration",
"path": "https://www.msha.gov/mine-data-retrieval-system",
},
"phmsa": {
"title": "Pipelines and Hazardous Materials Safety Administration",
"path": "https://www.phmsa.dot.gov/data-and-statistics/pipeline/data-and-statistics-overview",
},
"pudl": {
"title": "The Public Utility Data Liberation Project (PUDL)",
"path": "https://catalyst.coop/pudl/",
"email": "<EMAIL>",
},
}
"""
dict: A dictionary of dictionaries containing datasources (keys) and
associated attributes (values)
"""
contributors_by_source = {
"pudl": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
"karl-dunkle-werner",
],
"eia923": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
],
"eia860": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
],
"ferc1": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
],
"epacems": [
"catalyst-cooperative",
"karl-dunkle-werner",
"zane-selvans",
],
"epaipm": [
"greg-schivley",
],
}
"""
dict: A dictionary of data sources (keys) and lists of contributors (values).
"""
licenses = {
"cc-by-4.0": {
"name": "CC-BY-4.0",
"title": "Creative Commons Attribution 4.0",
"path": "https://creativecommons.org/licenses/by/4.0/"
},
"us-govt": {
"name": "other-pd",
"title": "U.S. Government Work",
"path": "http://www.usa.gov/publicdomain/label/1.0/",
}
}
"""
dict: A dictionary of dictionaries containing license types and their
attributes.
"""
output_formats = [
'sqlite',
'parquet',
'datapkg',
]
"""list: A list of types of PUDL output formats."""
keywords_by_data_source = {
'pudl': [
'us', 'electricity',
],
'eia860': [
'electricity', 'electric', 'boiler', 'generator', 'plant', 'utility',
'fuel', 'coal', 'natural gas', 'prime mover', 'eia860', 'retirement',
'capacity', 'planned', 'proposed', 'energy', 'hydro', 'solar', 'wind',
'nuclear', 'form 860', 'eia', 'annual', 'gas', 'ownership', 'steam',
'turbine', 'combustion', 'combined cycle', 'eia',
'energy information administration'
],
'eia923': [
'fuel', 'boiler', 'generator', 'plant', 'utility', 'cost', 'price',
'natural gas', 'coal', 'eia923', 'energy', 'electricity', 'form 923',
'receipts', 'generation', 'net generation', 'monthly', 'annual', 'gas',
'fuel consumption', 'MWh', 'energy information administration', 'eia',
'mercury', 'sulfur', 'ash', 'lignite', 'bituminous', 'subbituminous',
'heat content'
],
'epacems': [
'epa', 'us', 'emissions', 'pollution', 'ghg', 'so2', 'co2', 'sox',
'nox', 'load', 'utility', 'electricity', 'plant', 'generator', 'unit',
'generation', 'capacity', 'output', 'power', 'heat content', 'mmbtu',
'steam', 'cems', 'continuous emissions monitoring system', 'hourly'
'environmental protection agency', 'ampd', 'air markets program data',
],
'ferc1': [
'electricity', 'electric', 'utility', 'plant', 'steam', 'generation',
'cost', 'expense', 'price', 'heat content', 'ferc', 'form 1',
'federal energy regulatory commission', 'capital', 'accounting',
'depreciation', 'finance', 'plant in service', 'hydro', 'coal',
'natural gas', 'gas', 'opex', 'capex', 'accounts', 'investment',
'capacity'
],
'ferc714': [
'electricity', 'electric', 'utility', 'planning area', 'form 714',
'balancing authority', 'demand', 'system lambda', 'ferc',
'federal energy regulatory commission', "hourly", "generation",
"interchange", "forecast", "load", "adjacency", "plants",
],
'epaipm': [
'epaipm', 'integrated planning',
]
}
"""dict: A dictionary of datasets (keys) and keywords (values). """
ENTITY_TYPE_DICT = {
'M': 'Municipal',
'C': 'Cooperative',
'R': 'Retail Power Marketer',
'I': 'Investor Owned',
'P': 'Political Subdivision',
'T': 'Transmission',
'S': 'State',
'W': 'Wholesale Power Marketer',
'F': 'Federal',
'A': 'Municipal Mktg Authority',
'G': 'Community Choice Aggregator',
'D': 'Nonutility DSM Administrator',
'B': 'Behind the Meter',
'Q': 'Independent Power Producer',
'IND': 'Industrial',
'COM': 'Commercial',
'PR': 'Private', # Added by AES for OD table (Arbitrary moniker)
'PO': 'Power Marketer', # Added by AES for OD table
'U': 'Unknown', # Added by AES for OD table
'O': 'Other' # Added by AES for OD table
}
# Confirm these designations -- educated guess based on the form instructions
MOMENTARY_INTERRUPTION_DEF = { # Added by AES for R table
'L': 'Less than 1 minute',
'F': 'Less than or equal to 5 minutes',
'O': 'Other',
}
# https://www.eia.gov/electricity/data/eia411/#tabs_NERC-3
RECOGNIZED_NERC_REGIONS = [
'BASN', # ASSESSMENT AREA Basin (WECC)
'CALN', # ASSESSMENT AREA California (WECC)
'CALS', # ASSESSMENT AREA California (WECC)
'DSW', # ASSESSMENT AREA Desert Southwest (WECC)
'ASCC', # Alaska
'ISONE', # ISO New England (NPCC)
'ERCOT', # lumped under TRE in 2017 Form instructions
'NORW', # ASSESSMENT AREA Northwest (WECC)
'NYISO', # ISO (NPCC)
'PJM', # RTO
'ROCK', # ASSESSMENT AREA Rockies (WECC)
'ECAR', # OLD RE Now part of RFC and SERC
'FRCC', # included in 2017 Form instructions, recently joined with SERC
'HICC', # Hawaii
'MAAC', # OLD RE Now part of RFC
'MAIN', # OLD RE Now part of SERC, RFC, MRO
'MAPP', # OLD/NEW RE Became part of MRO, resurfaced in 2010
'MRO', # RE included in 2017 Form instructions
'NPCC', # RE included in 2017 Form instructions
'RFC', # RE included in 2017 Form instructions
'SERC', # RE included in 2017 Form instructions
'SPP', # RE included in 2017 Form instructions
'TRE', # RE included in 2017 Form instructions (included ERCOT)
'WECC', # RE included in 2017 Form instructions
'WSCC', # OLD RE pre-2002 version of WECC
'MISO', # ISO unclear whether technically a regional entity, but lots of entries
'ECAR_MAAC',
'MAPP_WECC',
'RFC_SERC',
'SPP_WECC',
'MRO_WECC',
'ERCOT_SPP',
'SPP_TRE',
'ERCOT_TRE',
'MISO_TRE',
'VI', # Virgin Islands
'GU', # Guam
'PR', # Puerto Rico
'AS', # American Samoa
'UNK',
]
CUSTOMER_CLASSES = [
"commercial",
"industrial",
"direct_connection",
"other",
"residential",
"total",
"transportation"
]
TECH_CLASSES = [
'backup', # WHERE Is this used? because removed from DG table b/c not a real component
'chp_cogen',
'combustion_turbine',
'fuel_cell',
'hydro',
'internal_combustion',
'other',
'pv',
'steam',
'storage_pv',
'all_storage', # need 'all' as prefix so as not to confuse with other storage category
'total',
'virtual_pv',
'wind',
]
REVENUE_CLASSES = [
'retail_sales',
'unbundled',
'delivery_customers',
'sales_for_resale',
'credits_or_adjustments',
'other',
'transmission',
'total',
]
RELIABILITY_STANDARDS = [
'ieee_standard',
'other_standard'
]
FUEL_CLASSES = [
'gas',
'oil',
'other',
'renewable',
'water',
'wind',
'wood',
]
RTO_CLASSES = [
'caiso',
'ercot',
'pjm',
'nyiso',
'spp',
'miso',
'isone',
'other'
]
ESTIMATED_OR_ACTUAL = {'E': 'estimated', 'A': 'actual'}
TRANSIT_TYPE_DICT = {
'CV': 'conveyer',
'PL': 'pipeline',
'RR': 'railroad',
'TK': 'truck',
'WA': 'water',
'UN': 'unknown',
}
"""dict: A dictionary of datasets (keys) and keywords (values). """
column_dtypes = {
"ferc1": { # Obviously this is not yet a complete list...
"construction_year": pd.Int64Dtype(),
"installation_year": pd.Int64Dtype(),
"plant_id_ferc1": pd.Int64Dtype(),
"plant_id_pudl": pd.Int64Dtype(),
"report_date": "datetime64[ns]",
"report_year": pd.Int64Dtype(),
"utility_id_ferc1": pd.Int64Dtype(),
"utility_id_pudl": pd.Int64Dtype(),
},
"ferc714": { # INCOMPLETE
"demand_mwh": float,
"demand_annual_mwh": float,
"eia_code": pd.Int64Dtype(),
"peak_demand_summer_mw": float,
"peak_demand_winter_mw": float,
"report_date": "datetime64[ns]",
"respondent_id_ferc714": pd.Int64Dtype(),
"respondent_name_ferc714": pd.StringDtype(),
"respondent_type": pd.CategoricalDtype(categories=[
"utility", "balancing_authority",
]),
"timezone": pd.CategoricalDtype(categories=[
"America/New_York", "America/Chicago", "America/Denver",
"America/Los_Angeles", "America/Anchorage", "Pacific/Honolulu"]),
"utc_datetime": "datetime64[ns]",
},
"epacems": {
'state': pd.StringDtype(),
'plant_id_eia': pd.Int64Dtype(), # Nullable Integer
'unitid': pd.StringDtype(),
'operating_datetime_utc': "datetime64[ns]",
'operating_time_hours': float,
'gross_load_mw': float,
'steam_load_1000_lbs': float,
'so2_mass_lbs': float,
'so2_mass_measurement_code': pd.StringDtype(),
'nox_rate_lbs_mmbtu': float,
'nox_rate_measurement_code': pd.StringDtype(),
'nox_mass_lbs': float,
'nox_mass_measurement_code': pd.StringDtype(),
'co2_mass_tons': float,
'co2_mass_measurement_code': pd.StringDtype(),
'heat_content_mmbtu': float,
'facility_id': pd.Int64Dtype(), # Nullable Integer
'unit_id_epa': pd.Int64Dtype(), # Nullable Integer
},
"eia": {
'actual_peak_demand_savings_mw': float, # Added by AES for DR table
'address_2': pd.StringDtype(), # Added by AES for 860 utilities table
'advanced_metering_infrastructure': pd.Int64Dtype(), # Added by AES for AMI table
# Added by AES for UD misc table
'alternative_fuel_vehicle_2_activity': pd.BooleanDtype(),
'alternative_fuel_vehicle_activity': pd.BooleanDtype(),
'annual_indirect_program_cost': float,
'annual_total_cost': float,
'ash_content_pct': float,
'ash_impoundment': pd.BooleanDtype(),
'ash_impoundment_lined': pd.BooleanDtype(),
# TODO: convert this field to more descriptive words
'ash_impoundment_status': pd.StringDtype(),
'associated_combined_heat_power': pd.BooleanDtype(),
'attention_line': pd.StringDtype(),
'automated_meter_reading': pd.Int64Dtype(), # Added by AES for AMI table
'backup_capacity_mw': float, # Added by AES for NNM & DG misc table
'balancing_authority_code_eia': pd.CategoricalDtype(),
'balancing_authority_id_eia': pd.Int64Dtype(),
'balancing_authority_name_eia': pd.StringDtype(),
'bga_source': pd.StringDtype(),
'boiler_id': pd.StringDtype(),
'bunded_activity': pd.BooleanDtype(),
'business_model': pd.CategoricalDtype(categories=[
"retail", "energy_services"]),
'buy_distribution_activity': pd.BooleanDtype(),
'buying_transmission_activity': pd.BooleanDtype(),
'bypass_heat_recovery': pd.BooleanDtype(),
'caidi_w_major_event_days_minus_loss_of_service_minutes': float,
'caidi_w_major_event_dats_minutes': float,
'caidi_wo_major_event_days_minutes': float,
'capacity_mw': float,
'carbon_capture': pd.BooleanDtype(),
'chlorine_content_ppm': float,
'circuits_with_voltage_optimization': pd.Int64Dtype(),
'city': pd.StringDtype(),
'cofire_fuels': pd.BooleanDtype(),
'consumed_by_facility_mwh': float,
'consumed_by_respondent_without_charge_mwh': float,
'contact_firstname': pd.StringDtype(),
'contact_firstname_2': pd.StringDtype(),
'contact_lastname': pd.StringDtype(),
'contact_lastname_2': pd.StringDtype(),
'contact_title': pd.StringDtype(),
'contact_title_2': pd.StringDtype(),
'contract_expiration_date': 'datetime64[ns]',
'contract_type_code': pd.StringDtype(),
'county': pd.StringDtype(),
'county_id_fips': pd.StringDtype(), # Must preserve leading zeroes
'credits_or_adjustments': float,
'critical_peak_pricing': pd.BooleanDtype(),
'critical_peak_rebate': pd.BooleanDtype(),
'current_planned_operating_date': 'datetime64[ns]',
'customers': float,
'customer_class': pd.CategoricalDtype(categories=CUSTOMER_CLASSES),
'customer_incentives_cost': float,
'customer_incentives_incremental_cost': float,
'customer_incentives_incremental_life_cycle_cost': float,
'customer_other_costs_incremental_life_cycle_cost': float,
'daily_digital_access_customers': pd.Int64Dtype(),
'data_observed': pd.BooleanDtype(),
'datum': pd.StringDtype(),
'deliver_power_transgrid': pd.BooleanDtype(),
'delivery_customers': float,
'direct_load_control_customers': pd.Int64Dtype(),
'distributed_generation': pd.BooleanDtype(),
'distributed_generation_owned_capacity_mw': float,
'distribution_activity': pd.BooleanDtype(),
'distribution_circuits': pd.Int64Dtype(),
'duct_burners': pd.BooleanDtype(),
'energy_displaced_mwh': float,
'energy_efficiency_annual_cost': float,
'energy_efficiency_annual_actual_peak_reduction_mw': float,
'energy_efficiency_annual_effects_mwh': float,
'energy_efficiency_annual_incentive_payment': float,
'energy_efficiency_incremental_actual_peak_reduction_mw': float,
'energy_efficiency_incremental_effects_mwh': float,
'energy_savings_estimates_independently_verified': pd.BooleanDtype(),
'energy_savings_independently_verified': pd.BooleanDtype(),
'energy_savings_mwh': float,
'energy_served_ami_mwh': float,
'energy_source_1_transport_1': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_1_transport_2': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_1_transport_3': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_2_transport_1': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_2_transport_2': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_2_transport_3': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_code': pd.StringDtype(),
'energy_source_code_1': pd.StringDtype(),
'energy_source_code_2': pd.StringDtype(),
'energy_source_code_3': pd.StringDtype(),
'energy_source_code_4': pd.StringDtype(),
'energy_source_code_5': pd.StringDtype(),
'energy_source_code_6': pd.StringDtype(),
'energy_storage': pd.BooleanDtype(),
'entity_type': pd.CategoricalDtype(categories=ENTITY_TYPE_DICT.values()),
'estimated_or_actual_capacity_data': pd.CategoricalDtype(categories=ESTIMATED_OR_ACTUAL.values()),
'estimated_or_actual_fuel_data': pd.CategoricalDtype(categories=ESTIMATED_OR_ACTUAL.values()),
'estimated_or_actual_tech_data': pd.CategoricalDtype(categories=ESTIMATED_OR_ACTUAL.values()),
'exchange_energy_delivered_mwh': float,
'exchange_energy_recieved_mwh': float,
'ferc_cogen_docket_no': pd.StringDtype(),
'ferc_cogen_status': pd.BooleanDtype(),
'ferc_exempt_wholesale_generator': pd.BooleanDtype(),
'ferc_exempt_wholesale_generator_docket_no': pd.StringDtype(),
'ferc_small_power_producer': pd.BooleanDtype(),
'ferc_small_power_producer_docket_no': pd.StringDtype(),
'fluidized_bed_tech': pd.BooleanDtype(),
'fraction_owned': float,
'fuel_class': pd.StringDtype(),
'fuel_consumed_for_electricity_mmbtu': float,
'fuel_consumed_for_electricity_units': float,
'fuel_consumed_mmbtu': float,
'fuel_consumed_units': float,
'fuel_cost_per_mmbtu': float,
'fuel_group_code': pd.StringDtype(),
'fuel_group_code_simple': pd.StringDtype(),
'fuel_mmbtu_per_unit': float,
'fuel_pct': float,
'fuel_qty_units': float,
# are fuel_type and fuel_type_code the same??
# fuel_type includes 40 code-like things.. WAT, SUN, NUC, etc.
'fuel_type': pd.StringDtype(),
# from the boiler_fuel_eia923 table, there are 30 code-like things, like NG, BIT, LIG
'fuel_type_code': pd.StringDtype(),
'fuel_type_code_aer': pd.StringDtype(),
'fuel_type_code_pudl': pd.StringDtype(),
'furnished_without_charge_mwh': float,
'generation_activity': pd.BooleanDtype(),
# this is a mix of integer-like values (2 or 5) and strings like AUGSF
'generator_id': pd.StringDtype(),
'generators_number': float,
'generators_num_less_1_mw': float,
'green_pricing_revenue': float,
'grid_voltage_2_kv': float,
'grid_voltage_3_kv': float,
'grid_voltage_kv': float,
'heat_content_mmbtu_per_unit': float,
'highest_distribution_voltage_kv': float,
'home_area_network': pd.Int64Dtype(),
'inactive_accounts_included': pd.BooleanDtype(),
'incremental_energy_savings_mwh': float,
'incremental_life_cycle_energy_savings_mwh': float,
'incremental_life_cycle_peak_reduction_mwh': float,
'incremental_peak_reduction_mw': float,
'iso_rto_code': pd.StringDtype(),
'latitude': float,
'liquefied_natural_gas_storage': pd.BooleanDtype(),
'load_management_annual_cost': float,
'load_management_annual_actual_peak_reduction_mw': float,
'load_management_annual_effects_mwh': float,
'load_management_annual_incentive_payment': float,
'load_management_annual_potential_peak_reduction_mw': float,
'load_management_incremental_actual_peak_reduction_mw': float,
'load_management_incremental_effects_mwh': float,
'load_management_incremental_potential_peak_reduction_mw': float,
'longitude': float,
'major_program_changes': pd.BooleanDtype(),
'mercury_content_ppm': float,
'merge_address': pd.StringDtype(),
'merge_city': pd.StringDtype(),
'merge_company': pd.StringDtype(),
'merge_date': 'datetime64[ns]',
'merge_state': pd.StringDtype(),
'mine_id_msha': pd.Int64Dtype(),
'mine_id_pudl': pd.Int64Dtype(),
'mine_name': pd.StringDtype(),
'mine_type_code': pd.StringDtype(),
'minimum_load_mw': float,
'moisture_content_pct': float,
'momentary_interruption_definition': pd.CategoricalDtype(categories=MOMENTARY_INTERRUPTION_DEF.values()),
'multiple_fuels': pd.BooleanDtype(),
'nameplate_power_factor': float,
'natural_gas_delivery_contract_type_code': pd.StringDtype(),
'natural_gas_local_distribution_company': pd.StringDtype(),
'natural_gas_pipeline_name_1': pd.StringDtype(),
'natural_gas_pipeline_name_2': pd.StringDtype(),
'natural_gas_pipeline_name_3': pd.StringDtype(),
'natural_gas_storage': pd.BooleanDtype(),
'natural_gas_transport_code': pd.StringDtype(),
'nerc_region': pd.CategoricalDtype(categories=RECOGNIZED_NERC_REGIONS),
'nerc_regions_of_operation': pd.CategoricalDtype(categories=RECOGNIZED_NERC_REGIONS),
'net_generation_mwh': float,
'net_metering': pd.BooleanDtype(),
'net_power_exchanged_mwh': float,
'net_wheeled_power_mwh': float,
'new_parent': pd.StringDtype(),
'non_amr_ami': pd.Int64Dtype(),
'nuclear_unit_id': pd.Int64Dtype(),
'operates_generating_plant': pd.BooleanDtype(),
'operating_date': 'datetime64[ns]',
'operating_switch': pd.StringDtype(),
# TODO: double check this for early 860 years
'operational_status': pd.StringDtype(),
'operational_status_code': pd.StringDtype(),
'original_planned_operating_date': 'datetime64[ns]',
'other': float,
'other_combustion_tech': pd.BooleanDtype(),
'other_costs': float,
'other_costs_incremental_cost': float,
'other_modifications_date': 'datetime64[ns]',
'other_planned_modifications': pd.BooleanDtype(),
'outages_recorded_automatically': pd.BooleanDtype(),
'owned_by_non_utility': pd.BooleanDtype(),
'owner_city': pd.StringDtype(),
'owner_name': pd.StringDtype(),
'owner_state': pd.StringDtype(),
'owner_street_address': pd.StringDtype(),
'owner_utility_id_eia': pd.Int64Dtype(),
'owner_zip_code': pd.StringDtype(),
# we should transition these into readable codes, not a one letter thing
'ownership_code': pd.StringDtype(),
'phone_extension_1': pd.StringDtype(),
'phone_extension_2': pd.StringDtype(),
'phone_number_1': pd.StringDtype(),
'phone_number_2': pd.StringDtype(),
'pipeline_notes': pd.StringDtype(),
'planned_derate_date': 'datetime64[ns]',
'planned_energy_source_code_1': pd.StringDtype(),
'planned_modifications': pd.BooleanDtype(),
'planned_net_summer_capacity_derate_mw': float,
'planned_net_summer_capacity_uprate_mw': float,
'planned_net_winter_capacity_derate_mw': float,
'planned_net_winter_capacity_uprate_mw': float,
'planned_new_capacity_mw': float,
'planned_new_prime_mover_code': pd.StringDtype(),
'planned_repower_date': 'datetime64[ns]',
'planned_retirement_date': 'datetime64[ns]',
'planned_uprate_date': 'datetime64[ns]',
'plant_id_eia': pd.Int64Dtype(),
'plant_id_epa': pd.Int64Dtype(),
'plant_id_pudl': pd.Int64Dtype(),
'plant_name_eia': pd.StringDtype(),
'plants_reported_asset_manager': pd.BooleanDtype(),
'plants_reported_operator': pd.BooleanDtype(),
'plants_reported_other_relationship': pd.BooleanDtype(),
'plants_reported_owner': pd.BooleanDtype(),
'point_source_unit_id_epa': pd.StringDtype(),
'potential_peak_demand_savings_mw': float,
'pulverized_coal_tech': pd.BooleanDtype(),
'previously_canceled': pd.BooleanDtype(),
'price_responsive_programes': pd.BooleanDtype(),
'price_responsiveness_customers': pd.Int64Dtype(),
'primary_transportation_mode_code': pd.StringDtype(),
'primary_purpose_naics_id': pd.Int64Dtype(),
'prime_mover_code': pd.StringDtype(),
'pv_current_flow_type': pd.CategoricalDtype(categories=['AC', 'DC']),
'reactive_power_output_mvar': float,
'real_time_pricing_program': pd.BooleanDtype(),
'rec_revenue': float,
'rec_sales_mwh': float,
'regulatory_status_code': pd.StringDtype(),
'report_date': 'datetime64[ns]',
'reported_as_another_company': pd.StringDtype(),
'retail_marketing_activity': pd.BooleanDtype(),
'retail_sales': float,
'retail_sales_mwh': float,
'retirement_date': 'datetime64[ns]',
'revenue_class': pd.CategoricalDtype(categories=REVENUE_CLASSES),
'rto_iso_lmp_node_id': pd.StringDtype(),
'rto_iso_location_wholesale_reporting_id': pd.StringDtype(),
'rtos_of_operation': pd.StringDtype(),
'saidi_w_major_event_dats_minus_loss_of_service_minutes': float,
'saidi_w_major_event_days_minutes': float,
'saidi_wo_major_event_days_minutes': float,
'saifi_w_major_event_days_customers': float,
'saifi_w_major_event_days_minus_loss_of_service_customers': float,
'saifi_wo_major_event_days_customers': float,
'sales_for_resale': float,
'sales_for_resale_mwh': float,
'sales_mwh': float,
'sales_revenue': float,
'sales_to_ultimate_consumers_mwh': float,
'secondary_transportation_mode_code': pd.StringDtype(),
'sector_id': pd.Int64Dtype(),
'sector_name': | pd.StringDtype() | pandas.StringDtype |
import re
import pandas as pd
import numpy as np
class Resampler(object):
"""Resamples time-series data from one frequency to another frequency.
"""
min_in_freqs = {
'MIN': 1,
'MINUTE': 1,
'DAILY': 1440,
'D': 1440,
'HOURLY': 60,
'HOUR': 60,
'H': 60,
'MONTHLY': 43200,
'M': 43200,
'YEARLY': 525600
}
def __init__(self, data, freq, how='mean', verbosity=1):
"""
Arguments:
data : data to use
freq : frequency at which to transform/resample
how : string or dictionary mapping to columns in data defining how to resample the data.
"""
data = pd.DataFrame(data)
self.orig_df = data.copy()
self.target_freq = self.freq_in_mins_from_string(freq)
self.how = self.check_how(how)
self.verbosity = verbosity
def __call__(self, *args, **kwargs):
if self.target_freq > self.orig_freq:
# we want to calculate at higher/larger time-step
return self.downsample()
else:
# we want to calculate at smaller time-step
return self.upsamle()
@property
def orig_freq(self):
return self.freq_in_mins_from_string(pd.infer_freq(self.orig_df.index))
@property
def allowed_freqs(self):
return self.min_in_freqs.keys()
def check_how(self, how):
if not isinstance(how, str):
assert isinstance(how, dict)
assert len(how) == len(self.orig_df.columns)
else:
assert isinstance(how, str)
how = {col:how for col in self.orig_df.columns}
return how
def downsample(self):
df = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pytest
from pandas import (
DataFrame,
MultiIndex,
Series,
concat,
date_range,
)
import pandas._testing as tm
from pandas.api.indexers import (
BaseIndexer,
FixedForwardWindowIndexer,
)
from pandas.core.window.indexers import (
ExpandingIndexer,
FixedWindowIndexer,
VariableOffsetWindowIndexer,
)
from pandas.tseries.offsets import BusinessDay
def test_bad_get_window_bounds_signature():
class BadIndexer(BaseIndexer):
def get_window_bounds(self):
return None
indexer = BadIndexer()
with pytest.raises(ValueError, match="BadIndexer does not implement"):
Series(range(5)).rolling(indexer)
def test_expanding_indexer():
s = Series(range(10))
indexer = ExpandingIndexer()
result = s.rolling(indexer).mean()
expected = s.expanding().mean()
tm.assert_series_equal(result, expected)
def test_indexer_constructor_arg():
# Example found in computation.rst
use_expanding = [True, False, True, False, True]
df = DataFrame({"values": range(5)})
class CustomIndexer(BaseIndexer):
def get_window_bounds(self, num_values, min_periods, center, closed):
start = np.empty(num_values, dtype=np.int64)
end = np.empty(num_values, dtype=np.int64)
for i in range(num_values):
if self.use_expanding[i]:
start[i] = 0
end[i] = i + 1
else:
start[i] = i
end[i] = i + self.window_size
return start, end
indexer = CustomIndexer(window_size=1, use_expanding=use_expanding)
result = df.rolling(indexer).sum()
expected = DataFrame({"values": [0.0, 1.0, 3.0, 3.0, 10.0]})
tm.assert_frame_equal(result, expected)
def test_indexer_accepts_rolling_args():
df = DataFrame({"values": range(5)})
class CustomIndexer(BaseIndexer):
def get_window_bounds(self, num_values, min_periods, center, closed):
start = np.empty(num_values, dtype=np.int64)
end = np.empty(num_values, dtype=np.int64)
for i in range(num_values):
if center and min_periods == 1 and closed == "both" and i == 2:
start[i] = 0
end[i] = num_values
else:
start[i] = i
end[i] = i + self.window_size
return start, end
indexer = CustomIndexer(window_size=1)
result = df.rolling(indexer, center=True, min_periods=1, closed="both").sum()
expected = DataFrame({"values": [0.0, 1.0, 10.0, 3.0, 4.0]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("constructor", [Series, DataFrame])
@pytest.mark.parametrize(
"func,np_func,expected,np_kwargs",
[
("count", len, [3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 2.0, np.nan], {}),
("min", np.min, [0.0, 1.0, 2.0, 3.0, 4.0, 6.0, 6.0, 7.0, 8.0, np.nan], {}),
(
"max",
np.max,
[2.0, 3.0, 4.0, 100.0, 100.0, 100.0, 8.0, 9.0, 9.0, np.nan],
{},
),
(
"std",
np.std,
[
1.0,
1.0,
1.0,
55.71654452,
54.85739087,
53.9845657,
1.0,
1.0,
0.70710678,
np.nan,
],
{"ddof": 1},
),
(
"var",
np.var,
[
1.0,
1.0,
1.0,
3104.333333,
3009.333333,
2914.333333,
1.0,
1.0,
0.500000,
np.nan,
],
{"ddof": 1},
),
(
"median",
np.median,
[1.0, 2.0, 3.0, 4.0, 6.0, 7.0, 7.0, 8.0, 8.5, np.nan],
{},
),
],
)
@pytest.mark.filterwarnings("ignore:min_periods:FutureWarning")
def test_rolling_forward_window(constructor, func, np_func, expected, np_kwargs):
# GH 32865
values = np.arange(10.0)
values[5] = 100.0
indexer = FixedForwardWindowIndexer(window_size=3)
match = "Forward-looking windows can't have center=True"
with pytest.raises(ValueError, match=match):
rolling = constructor(values).rolling(window=indexer, center=True)
getattr(rolling, func)()
match = "Forward-looking windows don't support setting the closed argument"
with pytest.raises(ValueError, match=match):
rolling = constructor(values).rolling(window=indexer, closed="right")
getattr(rolling, func)()
rolling = constructor(values).rolling(window=indexer, min_periods=2)
result = getattr(rolling, func)()
# Check that the function output matches the explicitly provided array
expected = constructor(expected)
tm.assert_equal(result, expected)
# Check that the rolling function output matches applying an alternative
# function to the rolling window object
expected2 = constructor(rolling.apply(lambda x: np_func(x, **np_kwargs)))
tm.assert_equal(result, expected2)
# Check that the function output matches applying an alternative function
# if min_periods isn't specified
# GH 39604: After count-min_periods deprecation, apply(lambda x: len(x))
# is equivalent to count after setting min_periods=0
min_periods = 0 if func == "count" else None
rolling3 = constructor(values).rolling(window=indexer, min_periods=min_periods)
result3 = getattr(rolling3, func)()
expected3 = constructor(rolling3.apply(lambda x: np_func(x, **np_kwargs)))
tm.assert_equal(result3, expected3)
@pytest.mark.parametrize("constructor", [Series, DataFrame])
def test_rolling_forward_skewness(constructor):
values = np.arange(10.0)
values[5] = 100.0
indexer = | FixedForwardWindowIndexer(window_size=5) | pandas.api.indexers.FixedForwardWindowIndexer |
# import libraries
import streamlit as st
import pandas as pd
import plotly.express as px
import os
from wordcloud import WordCloud, STOPWORDS
import matplotlib.pyplot as plt
# set title for the dashboard
st.title('Sentiment Analysis of Tweets about US Airlines')
# set title for the dashboard sidebar
st.sidebar.title('Sentiment Analysis of Tweets about US Airlines')
# add markdown text under the dashboard title
st.markdown('This application is a Streamlit dashboard to analyze the sentiment\
of Tweets 🐦')
# add markdown text under the dashboard sidebar title
st.sidebar.markdown('This application is a Streamlit dashboard to analyze the\
sentiment of Tweets 🐦')
# define the dataset location
DATA_URL = os.path.join(os.getcwd(), 'tweets.csv')
# use streamlit cache decorator to upload only the data that have been changed
@st.cache(persist=True)
# function to load data
def load_data():
# load dataset
data = | pd.read_csv(DATA_URL) | pandas.read_csv |
import argparse
import pandas as pd
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--annotations_file", type=str, required=True,
help="CSV file of annotations.")
parser.add_argument("--mrsty_file", type=str, required=True,
help="Path to UMLS MRSTY.RRF")
parser.add_argument("--semgroups_file", type=str, required=True,
help="Path to UMLS semantic groups file.")
parser.add_argument("--outfile", type=str, required=True,
help="Where to save the summary.")
return parser.parse_args()
def main(annotations_file, mrsty_file, semgroups_file, outfile):
anns = | pd.read_csv(annotations_file) | pandas.read_csv |
import numpy as np
from datetime import timedelta
from distutils.version import LooseVersion
import pandas as pd
import pandas.util.testing as tm
from pandas import to_timedelta
from pandas.util.testing import assert_series_equal, assert_frame_equal
from pandas import (Series, Timedelta, DataFrame, Timestamp, TimedeltaIndex,
timedelta_range, date_range, DatetimeIndex, Int64Index,
_np_version_under1p10, Float64Index, Index, tslib)
from pandas.tests.test_base import Ops
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
td.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
idx = TimedeltaIndex(['1 day', '2 day'])
msg = "cannot subtract a datelike from a TimedeltaIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx - Timestamp('2011-01-01')
result = Timestamp('2011-01-01') + idx
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
tm.assert_index_equal(result, expected)
def test_ops_compat(self):
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
rng = timedelta_range('1 days', '10 days', name='foo')
# multiply
for offset in offsets:
self.assertRaises(TypeError, lambda: rng * offset)
# divide
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected, exact=False)
# divide with nats
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected)
# don't allow division by NaT (make could in the future)
self.assertRaises(TypeError, lambda: rng / pd.NaT)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
self.assertRaises(TypeError, lambda: tdi - dt)
self.assertRaises(TypeError, lambda: tdi - dti)
self.assertRaises(TypeError, lambda: td - dt)
self.assertRaises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
self.assertEqual(result, expected)
self.assertIsInstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
self.assertRaises(TypeError, lambda: dt_tz - ts)
self.assertRaises(TypeError, lambda: dt_tz - dt)
self.assertRaises(TypeError, lambda: dt_tz - ts_tz2)
self.assertRaises(TypeError, lambda: dt - dt_tz)
self.assertRaises(TypeError, lambda: ts - dt_tz)
self.assertRaises(TypeError, lambda: ts_tz2 - ts)
self.assertRaises(TypeError, lambda: ts_tz2 - dt)
self.assertRaises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
self.assertRaises(TypeError, lambda: dti - ts_tz)
self.assertRaises(TypeError, lambda: dti_tz - ts)
self.assertRaises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'H']:
idx = pd.TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
self.assertRaises(ValueError, lambda: tdi + dti[0:1])
self.assertRaises(ValueError, lambda: tdi[0:1] + dti)
# random indexes
self.assertRaises(TypeError, lambda: tdi + Int64Index([1, 2, 3]))
# this is a union!
# self.assertRaises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
self.assertEqual(result, expected)
result = td + dt
expected = Timestamp('20130102')
self.assertEqual(result, expected)
def test_comp_nat(self):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
idx = timedelta_range('1 days 09:00:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = TimedeltaIndex(np.repeat(idx.values, range(1, len(idx) + 1)))
exp_idx = timedelta_range('1 days 18:00:00', freq='-1H', periods=10)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = timedelta_range('1 days 09:00:00', freq='H', periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = TimedeltaIndex(['1 days 09:00:00', '1 days 09:00:00',
'1 days 09:00:00', '1 days 08:00:00',
'1 days 08:00:00', pd.NaT])
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00'])
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00',
pd.NaT])
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(TimedeltaIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1],
['00:01:00', '00:01:00', '00:02:00'],
['00:01:00', '00:01:00', '00:00:01'])):
tm.assertIn(idx[0], idx)
def test_unknown_attribute(self):
# GH 9680
tdi = pd.timedelta_range(start=0, periods=10, freq='1s')
ts = pd.Series(np.random.normal(size=10), index=tdi)
self.assertNotIn('foo', ts.__dict__.keys())
self.assertRaises(AttributeError, lambda: ts.foo)
def test_order(self):
# GH 10295
idx1 = TimedeltaIndex(['1 day', '2 day', '3 day'], freq='D',
name='idx')
idx2 = TimedeltaIndex(
['1 hour', '2 hour', '3 hour'], freq='H', name='idx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, idx[::-1])
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
idx1 = TimedeltaIndex(['1 hour', '3 hour', '5 hour',
'2 hour ', '1 hour'], name='idx1')
exp1 = TimedeltaIndex(['1 hour', '1 hour', '2 hour',
'3 hour', '5 hour'], name='idx1')
idx2 = TimedeltaIndex(['1 day', '3 day', '5 day',
'2 day', '1 day'], name='idx2')
# TODO(wesm): unused?
# exp2 = TimedeltaIndex(['1 day', '1 day', '2 day',
# '3 day', '5 day'], name='idx2')
# idx3 = TimedeltaIndex([pd.NaT, '3 minute', '5 minute',
# '2 minute', pd.NaT], name='idx3')
# exp3 = TimedeltaIndex([pd.NaT, pd.NaT, '2 minute', '3 minute',
# '5 minute'], name='idx3')
for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx[0]
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx[0:5]
expected = pd.timedelta_range('1 day', '5 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.timedelta_range('1 day', '9 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.timedelta_range('12 day', '24 day', freq='3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = TimedeltaIndex(['5 day', '4 day', '3 day',
'2 day', '1 day'],
freq='-1D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = | pd.timedelta_range('1 day', '31 day', freq='D', name='idx') | pandas.timedelta_range |
"""Create csv files with emotions in the first column and a column for every
expanded body part.
Usage: python generate_emotion2bodyparts.py <corpus metadata> <dir
with input texts> <output file (.csv)>
In addition to the output.csv, also files for each time period are written.
"""
import os
import argparse
from collections import Counter
from embem.machinelearningdata.count_labels import load_data, corpus_metadata
from embem.emotools.heem_utils import heem_body_part_labels, \
heem_emotion_labels
import pandas as pd
def get_emotion_body_part_pairs(file_name):
# load data set
X_data, Y_data = load_data(file_name)
Y = [s.split('_') for s in Y_data]
emotions2body = {}
emotions = Counter()
for labelset in Y:
body_parts = [lb for lb in labelset if lb in heem_body_part_labels]
emotion_lbls = [lb for lb in labelset if lb in heem_emotion_labels]
if body_parts and emotion_lbls:
for em in emotion_lbls:
for bp in body_parts:
if not emotions2body.get(em):
emotions2body[em] = Counter()
emotions2body[em][bp] += 1
emotions[em] += 1
return emotions, emotions2body
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('in_file', help='csv file containing corpus metadata')
parser.add_argument('input_dir', help='the directory where the input text '
'files can be found.')
parser.add_argument('out_file', help='csv file containing corpus metadata')
args = parser.parse_args()
f_name = args.in_file
input_dir = args.input_dir
out_file = args.out_file
text2period, text2year, text2genre, period2text, genre2text = \
corpus_metadata(f_name)
# statistics for entire corpus
global_emotions = Counter()
emotion_body_pairs = Counter()
period_counters = {}
periods = set()
# process texts
text_files = [t for t in os.listdir(input_dir) if t.endswith('.txt')]
for text_file in text_files:
text_id = text_file.replace('.txt', '')
in_file = os.path.join(input_dir, text_file)
period = text2period.get(text_id)
periods.add(period)
emotions, emotions2body = get_emotion_body_part_pairs(in_file)
global_emotions.update(emotions)
for em, body_counter in emotions2body.iteritems():
if not emotion_body_pairs.get(em):
emotion_body_pairs[em] = Counter()
emotion_body_pairs[em].update(body_counter)
if not period_counters.get(em):
period_counters[em] = {}
if not period_counters.get(em).get(period):
period_counters[em][period] = Counter()
period_counters[em][period].update(body_counter)
df = | pd.DataFrame(columns=heem_body_part_labels, index=heem_emotion_labels) | pandas.DataFrame |
"""
MLTrace: A machine learning progress tracker
====================================================
This module provides some basic functionality to track the process of machine learning model development.
It sets up a SQLite db-file and stores selected models, graphs, and data (for convenience) and recovers them
as requested.
``mltrace`` uses `peewee <http://docs.peewee-orm.com/en/latest/>`_ and `pandas <https://pandas.pydata.org/>`_ for
data manipulation.
It also has built in capabilities to generate some typical plots and graph in machine learning.
"""
try:
from peewee import *
except ModuleNotFoundError:
Model = type("Model", (object,), dict(Simple=lambda: 0.0))
SqliteDatabase = lambda x: None
from datetime import datetime
MLTRACK_DB = SqliteDatabase(None)
class np2df(object):
"""
A class to convert numpy ndarray to a pandas DataFrame. It produces a callable object which returns a
`pandas.DataFrame`
:param data: `numpy.ndarray` data
:param clmns: a list of titles for pandas DataFrame column names.
If None, it produces `C{num}` where `num` changes as the index of the ndarray does.
"""
def __init__(self, data, clmns=None):
self.data = data
self.N = len(data[0])
if clmns is None:
self.Columns = ["C%d" % (_) for _ in range(self.N)]
else:
self.Columns = clmns
def __call__(self, *args, **kwargs):
from pandas import DataFrame
dct = {}
for idx in range(self.N):
dct[self.Columns[idx]] = list(self.data[:, idx])
return DataFrame(dct)
class Task(Model):
"""
The class to generate the 'task` table in the SQLite db-file.
This table keeps basic information about the task on hand, e.g., the task name, a brief description,
target column, and columns to be ignored.
"""
try:
task_id = IntegerField(primary_key=True, unique=True, null=False, default=1)
name = CharField(null=True)
description = TextField(null=True)
target = CharField(null=True)
ignore = CharField(null=True)
init_date = DateTimeField(default=datetime.now, null=True)
last_mod_date = DateTimeField(default=datetime.now, null=True)
except:
pass
class Meta:
database = MLTRACK_DB
class MLModel(Model):
"""
The class to generate the 'mlmodel` table in the SQLite db-file.
It stores the scikit-learn scheme of the model/pipeline, its parameters, etc.
"""
try:
model_id = IntegerField(primary_key=True, unique=True, null=False)
task_id = ForeignKeyField(Task)
name = CharField(null=True)
model_str = TextField(null=True)
model_type = CharField(null=True)
parameters = BareField(null=True)
date_modified = DateTimeField(default=datetime.now, null=True)
except:
pass
class Meta:
database = MLTRACK_DB
class Metrics(Model):
"""
The class to generate the 'metrics` table in the SQLite db-file.
This table stores the calculated metrics of each stored model.
"""
try:
metrics_id = IntegerField(primary_key=True, unique=True, null=False)
model_id = ForeignKeyField(MLModel)
accuracy = FloatField(null=True)
auc = FloatField(null=True)
precision = FloatField(null=True)
recall = FloatField(null=True)
f1 = FloatField(null=True)
mcc = FloatField(null=True)
logloss = FloatField(null=True)
variance = FloatField(null=True)
max_error = FloatField(null=True)
mse = FloatField(null=True)
mae = FloatField(null=True)
r2 = FloatField(null=True)
except:
pass
class Meta:
database = MLTRACK_DB
class Saved(Model):
"""
The class to generate the 'saved` table in the SQLite db-file.
It keeps the pickled version of a stored model that can be later recovered.
"""
try:
pickle_id = IntegerField(primary_key=True, unique=True, null=False)
model_id = ForeignKeyField(MLModel)
pickle = BareField(null=True)
init_date = DateTimeField(default=datetime.now, null=True)
except:
pass
class Meta:
database = MLTRACK_DB
class Plots(Model):
"""
The class to generate the 'plots` table in the SQLite db-file.
This table stores `matplotlib` plots associated to each model.
"""
try:
plot_id = IntegerField(primary_key=True, unique=True, null=False)
model_id = ForeignKeyField(MLModel)
title = CharField(null=True)
plot = BareField(null=True)
init_date = DateTimeField(default=datetime.now, null=True)
except:
pass
class Meta:
database = MLTRACK_DB
class Data(Model):
"""
The class to generate the 'data` table in the SQLite db-file.
This table stores the whole given data for convenience.
"""
class Meta:
database = MLTRACK_DB
class Weights(Model):
"""
The class to generate the 'weights` table in the SQLite db-file.
Stores some sensitivity measures, correlations, etc.
"""
class Meta:
database = MLTRACK_DB
class mltrack(object):
"""
This class instantiates an object that tracks the ML activities and store them upon request.
:param task: 'str' the task name
:param task_is: the id of an existing task, if the name is not provided.
:param db_name: a file name for the SQLite database
:param cv: the default cross validation method, must be a valid cv based on `sklearn.model_selection`;
default: `ShuffleSplit(n_splits=3, test_size=.25)`
"""
def __init__(self, task, task_id=None, db_name="mltrack.db", cv=None):
self.db_name = db_name
tables = [Task, MLModel, Metrics, Saved, Plots, Data, Weights]
for tbl in tables:
tbl._meta.database.init(self.db_name)
MLTRACK_DB.create_tables(tables)
res = Task.select().where((Task.name == task) | (Task.task_id == task_id))
if len(res) > 0:
self.task = res[0].name
self.task_id = res[0].task_id
self.target = res[0].target
else:
new_task = Task.create(name=task, description="Initiated automatically")
self.task_id = new_task.task_id
import sqlite3
self.conn = sqlite3.connect(self.db_name)
if cv is None:
from sklearn.model_selection import ShuffleSplit
self.cv = ShuffleSplit(n_splits=3, test_size=0.25)
else:
self.cv = cv
self.X, self.y = None, None
self.Updated, self.Loaded, self.Recovered = [], [], []
def UpdateTask(self, data):
"""
Updates the current task info.
:param data: a dictionary that may include some the followings as its keys:
+ 'name': the corresponding value will replace the current task name
+ 'description': the corresponding value will replace the current description
+ 'ignore': the corresponding value will replace the current ignored columns
:return: None
"""
task = Task.select().where(Task.task_id == self.task_id).get()
if "name" in data:
task.name = data["name"]
if "description" in data:
task.description = data["description"]
if "ignore" in data:
task.ignore = ",".join(data["ignore"])
task.last_mod_date = datetime.now()
task.save()
def UpdateModel(self, mdl, name):
"""
Updates an already logged model which has `mltrack_id` set.
:param mdl: a scikit-learn compatible estimator/pipeline
:param name: an arbitrary string to name the model
:return: None
"""
from pickle import dumps
if "mltrack_id" not in mdl.__dict__:
return
else:
mltrack_id = mdl.mltrack_id
model = MLModel.select().where(MLModel.model_id == mltrack_id).get()
model.name = name
model.model_str = str(mdl)
model.parameters = dumps(mdl.get_params())
model.date_modified = datetime.now()
model.save()
if mltrack_id not in self.Updated:
self.Updated.append(mltrack_id)
def LogModel(self, mdl, name=None):
"""
Log a machine learning model
:param mdl: a scikit-learn compatible estimator/pipeline
:param name: an arbitrary string to name the model
:return: modified instance of `mdl` which carries a new attribute `mltrack_id` as its id.
"""
from pickle import dumps
if name is not None:
mdl.mltrack_name = name
else:
mdl.mltrack_name = name if name is not None else str(mdl).split("(")[0]
if "mltrack_id" not in mdl.__dict__:
MLModel.create(
task_id=self.task_id,
name=mdl.mltrack_name,
model_str=str(mdl),
model_type=str(type(mdl)).split("'")[1],
parameters=dumps(mdl.get_params()),
)
mdl.mltrack_id = (
MLModel.select(MLModel.model_id).order_by(MLModel.model_id.desc()).get()
)
else:
res = MLModel.select().where(MLModel.model_id == mdl.mltrack_id)[0]
res.name = mdl.mltrack_name
res.model_str = str(mdl)
res.parameters = dumps(mdl.get_params())
res.date_modified = datetime.now()
res.save()
# TBM
Tskres = Task.select().where(Task.task_id == self.task_id)[0]
Tskres.last_mod_date = datetime.now()
Tskres.save()
return mdl
def RegisterData(self, source_df, target):
"""
Registers a pandas DataFrame into the SQLite database.
Upon a call, it also sets `self.X` and `self.y` which are numpy arrays.
:param source_df: the pandas DataFrame to be stored
:param target: the name of the target column to be predicted
:return: None
"""
# TBM
res = Task.select().where(Task.task_id == self.task_id)[0]
res.target = target
res.last_mod_date = datetime.now()
res.save()
self.target = target
clmns = list(source_df.columns)
if target not in clmns:
raise BaseException("`%s` is not a part of data source." % target)
source_df.to_sql("data", self.conn, if_exists="replace", index=False)
clmns.remove(target)
self.X = source_df[clmns].values
self.y = source_df[target].values
def get_data(self):
"""
Retrieves data in numpy format
:return: numpy arrays X, y
"""
from pandas import read_sql
df = read_sql("SELECT * FROM data", self.conn)
clmns = list(df.columns)
clmns.remove(self.target)
self.X = df[clmns].values
self.y = df[self.target].values
return self.X, self.y
def get_dataframe(self):
"""
Retrieves data in pandas DataFrame format
:return: pandas DataFrame containing all data
"""
from pandas import read_sql
df = read_sql("SELECT * FROM data", self.conn)
return df
def LogMetrics(self, mdl, cv=None):
"""
Logs metrics of an already logged model using a cross validation methpd
:param mdl: the model to be measured
:param cv: cross validation method
:return: a dictionary of all measures with their corresponding values for the model
"""
if cv is not None:
self.cv = cv
if self.X is None:
self.get_data()
if "mltrack_id" not in mdl.__dict__:
mdl = self.LogModel(mdl)
mdl_id = mdl.mltrack_id
mdl_type = mdl._estimator_type
#######################################################
prds = []
prbs = []
for train_idx, test_idx in self.cv.split(self.X, self.y):
X_train, y_train = self.X[train_idx], self.y[train_idx]
X_test, y_test = self.X[test_idx], self.y[test_idx]
mdl.fit(X_train, y_train)
prds.append((mdl.predict(X_test), y_test))
try:
prbs.append(mdl.predict_proba(X_test)[:, 1])
except AttributeError:
try:
prbs.append(mdl.decision_function(X_test))
except AttributeError:
pass
#######################################################
acc = None
f_1 = None
prs = None
rcl = None
aur = None
mcc = None
lgl = None
vrn = None
mxe = None
mse = None
mae = None
r2 = None
n_ = float(len(prbs))
if mdl_type == "classifier":
from sklearn.metrics import (
accuracy_score,
f1_score,
precision_score,
recall_score,
roc_curve,
auc,
log_loss,
matthews_corrcoef,
)
acc = sum([accuracy_score(y_tst, y_prd) for y_prd, y_tst in prds]) / n_
f_1 = sum([f1_score(y_tst, y_prd) for y_prd, y_tst in prds]) / n_
prs = sum([precision_score(y_tst, y_prd) for y_prd, y_tst in prds]) / n_
rcl = sum([recall_score(y_tst, y_prd) for y_prd, y_tst in prds]) / n_
mcc = sum([matthews_corrcoef(y_tst, y_prd) for y_prd, y_tst in prds]) / n_
lgl = sum([log_loss(y_tst, y_prd) for y_prd, y_tst in prds]) / n_
aur = 0.0
for i in range(int(n_)):
fpr, tpr, _ = roc_curve(prds[i][1], prbs[i])
aur += auc(fpr, tpr)
aur /= n_
elif mdl_type == "regressor":
from sklearn.metrics import (
explained_variance_score,
median_absolute_error,
mean_squared_error,
mean_absolute_error,
r2_score,
)
vrn = (
sum([explained_variance_score(y_tst, y_prd) for y_prd, y_tst in prds])
/ n_
)
mxe = (
sum([median_absolute_error(y_tst, y_prd) for y_prd, y_tst in prds]) / n_
)
mse = sum([mean_squared_error(y_tst, y_prd) for y_prd, y_tst in prds]) / n_
mae = sum([mean_absolute_error(y_tst, y_prd) for y_prd, y_tst in prds]) / n_
r2 = sum([r2_score(y_tst, y_prd) for y_prd, y_tst in prds]) / n_
Metrics.create(
model_id=mdl_id,
accuracy=acc,
auc=aur,
precision=prs,
f1=f_1,
recall=rcl,
mcc=mcc,
logloss=lgl,
variance=vrn,
max_error=mxe,
mse=mse,
mae=mae,
r2=r2,
)
# TBM
res = Task.select().where(Task.task_id == self.task_id)[0]
res.last_mod_date = datetime.now()
res.save()
return dict(
accuracy=acc,
auc=aur,
precision=prs,
f1=f_1,
recall=rcl,
mcc=mcc,
logloss=lgl,
variance=vrn,
max_error=mxe,
mse=mse,
mae=mae,
r2=r2,
)
def LoadModel(self, mid):
"""
Loads a model corresponding to an id
:param mid: the model id
:return: an unfitted model
"""
from importlib import import_module
from pickle import loads
res = MLModel.select().where(MLModel.model_id == mid)
if len(res) == 0:
raise BaseException("No model with id '%d' were found" % (mid))
detail = res[0].model_type.split(".")
module_str = ".".join(detail[:-1])
clss = detail[-1]
module = import_module(module_str)
params = loads(res[0].parameters)
mdl = module.__getattribute__(clss)()
mdl.set_params(**params)
mdl.mltrack_id = mid
if mid not in self.Loaded:
self.Loaded.append(mid)
return mdl
@staticmethod
def getBest(metric):
"""
Finds the model with the best metric.
:param metric: the metric to find the best stored model for
:return: the model wiith the best `metric`
"""
res = (
Metrics.select()
.order_by(Metrics.__dict__[metric].__dict__["field"].desc())
.dicts()
)
return res[0]
def allTasks(self):
"""
Lists all tasks as a pandas DataFrame
:return: a pandas DataFrame
"""
from pandas import read_sql
return read_sql("SELECT * FROM task", self.conn)
def allModels(self):
"""
Lists all logged models as a pandas DataFrame
:return: a pandas DataFrame
"""
from pandas import read_sql
return read_sql(
"SELECT model_id, task_id, name, model_str, model_type, date_modified FROM mlmodel WHERE task_id=%d"
% (self.task_id),
self.conn,
)
def allPreserved(self):
"""
Lists all pickled models as a pandas DataFrame
:return: a pandas DataFrame
"""
from pandas import read_sql
return read_sql("SELECT pickle_id, model_id, init_date FROM saved", self.conn)
def PreserveModel(self, mdl):
"""
Pickles and preserves an already logged model
:param mdl: a logged model
:return: None
"""
from sklearn.externals import joblib
if "mltrack_id" not in mdl.__dict__:
mdl = self.LogModel(mdl)
mdl_id = mdl.mltrack_id
file = open("track_ml_tmp_mdl.joblib", "wb")
joblib.dump(mdl, file)
file.close()
file = open("track_ml_tmp_mdl.joblib", "rb")
str_cntnt = file.read()
Saved.create(model_id=mdl_id, pickle=str_cntnt)
file.close()
import os
os.remove("track_ml_tmp_mdl.joblib")
def RecoverModel(self, mdl_id):
"""
Recovers a pickled model
:param mdl_id: a valid `mltrack_id`
:return: a fitted model
"""
from sklearn.externals import joblib
res = (
Saved.select()
.where(Saved.model_id == mdl_id)
.order_by(Saved.init_date.desc())
.dicts()
)
file = open("track_ml_tmp_mdl.joblib", "wb")
file.write(res[0]["pickle"])
file.close()
file = open("track_ml_tmp_mdl.joblib", "rb")
mdl = joblib.load(file)
file.close()
import os
os.remove("track_ml_tmp_mdl.joblib")
if mdl_id not in self.Recovered:
self.Recovered.append(mdl_id)
return mdl
def allPlots(self, mdl_id):
"""
Lists all stored plots for a model with `mdl_id` as a pandas DataFrame
:param mdl_id: a valid `mltrack_id`
:return: a pandas DataFrame
"""
from pandas import read_sql
return read_sql(
"SELECT plot_id, model_id, title, init_date FROM plots WHERE model_id=%d"
% (mdl_id),
self.conn,
)
@staticmethod
def LoadPlot(pid):
"""
Loads a `matplotlib` plot
:param pid: the id of the plot
:return: a `matplotlib` figure
"""
from pickle import loads
# ax = plt.subplot(111)
res = Plots.select().where(Plots.plot_id == pid).dicts()
fig = loads(res[0]["plot"])
return fig
def plot_learning_curve(
self, mdl, title, ylim=None, cv=None, n_jobs=1, train_sizes=None, **kwargs
):
"""
Generate a simple plot of the test and training learning curve.
:param mdl: object type that implements the "fit" and "predict" methods;
An object of that type which is cloned for each validation.
:param title: string;
Title for the chart.
:param measure: string, a performance measure; must be one of hte followings:
`accuracy`, `f1`, `precision`, `recall`, `roc_auc`
:param ylim: tuple, shape (ymin, ymax), optional;
Defines minimum and maximum yvalues plotted.
:param cv: int, cross-validation generator or an iterable, optional;
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the mdl is not a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
:param n_jobs: integer, optional;
Number of jobs to run in parallel (default 1).
:return: a `matplotlib` plot
"""
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import learning_curve
if cv is not None:
self.cv = cv
if self.X is None:
self.get_data()
if "mltrack_id" not in mdl.__dict__:
mdl = self.LogModel(mdl)
mdl_id = mdl.mltrack_id
meas = kwargs.get("measure", "accuracy")
if meas not in ["accuracy", "f1", "precision", "recall", "roc_auc"]:
meas = "accuracy"
if train_sizes is None:
train_sizes = np.linspace(0.1, 1.0, 5)
plt.subplot(111)
fig = plt.figure()
plt.title(title)
if ylim is None:
ylim = (-0.05, 1.05)
plt.ylim(*ylim)
plt.xlabel("Training size")
plt.ylabel("Score (%s)" % (meas))
train_sizes, train_scores, test_scores = learning_curve(
mdl,
self.X,
self.y,
cv=self.cv,
n_jobs=n_jobs,
train_sizes=train_sizes,
scoring=meas,
)
xlbls = np.array(
[str(round(_ * 100, 1)) + " %" for _ in train_sizes / len(self.y)]
)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(
xlbls,
train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std,
alpha=0.1,
color="r",
)
plt.fill_between(
xlbls,
test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std,
alpha=0.1,
color="g",
)
plt.plot(xlbls, train_scores_mean, "o-", color="r", label="Training score")
plt.plot(
xlbls, test_scores_mean, "o-", color="g", label="Cross-validation score"
)
plt.legend(loc="best")
from pickle import dumps
pckl = dumps(fig)
Plots.create(model_id=mdl_id, title=meas, plot=pckl)
return plt
def split_train(self, mdl):
from sklearn.model_selection import train_test_split
if "mltrack_id" not in mdl.__dict__:
mdl = self.LogModel(mdl)
mdl_id = mdl.mltrack_id
if self.X is None:
self.get_data()
X_train, X_test, y_train, y_test = train_test_split(
self.X, self.y, train_size=0.75
)
from sklearn.exceptions import NotFittedError
x_ = X_test[0]
try:
mdl.predict([x_])
except NotFittedError as _:
mdl.fit(X_train, y_train)
return mdl, mdl_id, X_train, X_test, y_train, y_test
def plot_calibration_curve(self, mdl, name, fig_index=1, bins=10):
"""
Plots calibration curves.
:param mdl: object type that implements the "fit" and "predict" methods;
An object of that type which is cloned for each validation.
:param name: string;
Title for the chart.
:param bins: number of bins to partition samples
:return: a `matplotlib` plot
"""
import matplotlib.pyplot as plt
from sklearn.calibration import calibration_curve
fig = plt.figure(fig_index, figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
mdl, mdl_id, _, X_test, _, y_test = self.split_train(mdl)
if hasattr(mdl, "predict_proba"):
prob_pos = mdl.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = mdl.decision_function(X_test)
prob_pos = (prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
fraction_of_positives, mean_predicted_value = calibration_curve(
y_test, prob_pos, n_bins=bins
)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-", label="%s" % (name))
ax2.hist(prob_pos, range=(0, 1), bins=bins, label=name, histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title("Calibration plots (reliability curve)")
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
from pickle import dumps
pckl = dumps(fig)
Plots.create(model_id=mdl_id, title="calibration", plot=pckl)
return plt
def plot_roc_curve(self, mdl, label=None):
"""
The ROC curve, modified from Hands-On Machine learning with Scikit-Learn.
:param mdl: object type that implements the "fit" and "predict" methods;
An object of that type which is cloned for each validation.
:param label: string;
label for the chart.
:return: a `matplotlib` plot
"""
import matplotlib.pyplot as plt
from numpy import arange
from sklearn.metrics import roc_curve
mdl, mdl_id, _, X_test, _, y_test = self.split_train(mdl)
_ = plt.subplot(111)
fig = plt.figure(figsize=(8, 8))
plt.title("ROC Curve")
try:
y_score = mdl.predict_proba(X_test)[:, 1]
except:
y_score_ = mdl.decision_function(X_test)
y_score = (y_score_ - y_score_.min()) / (y_score_.max() - y_score_.min())
fpr, tpr, _ = roc_curve(y_test, y_score)
plt.plot(fpr, tpr, linewidth=2, label=label)
plt.plot([0, 1], [0, 1], "k--")
plt.axis([-0.005, 1, 0, 1.005])
plt.xticks(arange(0, 1, 0.05), rotation=90)
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate (Recall)")
plt.legend(loc="best")
from pickle import dumps
pckl = dumps(fig)
Plots.create(model_id=mdl_id, title="roc curve", plot=pckl)
return plt
def plot_cumulative_gain(
self,
mdl,
title="Cumulative Gains Curve",
figsize=None,
title_fontsize="large",
text_fontsize="medium",
):
"""
Generates the Cumulative Gains Plot from labels and scores/probabilities
The cumulative gains chart is used to determine the effectiveness of a
binary classifier. A detailed explanation can be found at
`http://mlwiki.org/index.php/Cumulative_Gain_Chart <http://mlwiki.org/index.php/Cumulative_Gain_Chart>`_.
The implementation here works only for binary classification.
:param mdl: object type that implements the "fit" and "predict" methods;
An object of that type which is cloned for each validation.
:param title: (string, optional): Title of the generated plot.
Defaults to "Cumulative Gains Curve".
:param figsize: (2-tuple, optional): Tuple denoting figure size of the plot e.g. (6, 6).
Defaults to ``None``.
:param title_fontsize: (string or int, optional): Matplotlib-style fontsizes.
Use e.g., "small", "medium", "large" or integer-values. Defaults to "large".
:param text_fontsize: (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to "medium".
:return: ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was drawn.
"""
from numpy import array, unique
import matplotlib.pyplot as plt
mdl, mdl_id, _, X_test, _, y_test = self.split_train(mdl)
y_true = array(y_test)
try:
y_probas = mdl.predict_proba(X_test)
y_probas = array(y_probas)
prob_pos0 = y_probas[:, 0]
prob_pos1 = y_probas[:, 1]
except:
prob_pos = mdl.decision_function(X_test)
prob_pos1 = (prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
prob_pos0 = (prob_pos.max() - prob_pos) / (prob_pos.max() - prob_pos.min())
classes = unique(y_true)
if len(classes) != 2:
raise ValueError(
"Cannot calculate Cumulative Gains for data with "
"{} category/ies".format(len(classes))
)
# Compute Cumulative Gain Curves
percentages, gains1 = self.cumulative_gain_curve(y_true, prob_pos0, classes[0])
percentages, gains2 = self.cumulative_gain_curve(y_true, prob_pos1, classes[1])
fig, ax = plt.subplots(1, 1, figsize=figsize)
ax.set_title(title, fontsize=title_fontsize)
ax.plot(percentages, gains1, lw=3, label="Class {}".format(classes[0]))
ax.plot(percentages, gains2, lw=3, label="Class {}".format(classes[1]))
ax.set_xlim([0.0, 1.0])
ax.set_ylim([0.0, 1.0])
ax.plot([0, 1], [0, 1], "k--", lw=2, label="Baseline")
ax.set_xlabel("Percentage of sample", fontsize=text_fontsize)
ax.set_ylabel("Gain", fontsize=text_fontsize)
ax.tick_params(labelsize=text_fontsize)
ax.grid(True)
ax.legend(loc="lower right", fontsize=text_fontsize)
from pickle import dumps
pckl = dumps(fig)
Plots.create(model_id=mdl_id, title="cumulative gain", plot=pckl)
return ax
@staticmethod
def cumulative_gain_curve(y_true, y_score, pos_label=None):
"""
This function generates the points necessary to plot the Cumulative Gain
Note: This implementation is restricted to the binary classification task.
:param y_true: (array-like, shape (n_samples)): True labels of the data.
:param y_score: (array-like, shape (n_samples)): Target scores, can either be probability estimates of
the positive class, confidence values, or non-thresholded measure of decisions (as returned by
decision_function on some classifiers).
:param pos_label: (int or str, default=None): Label considered as positive and others are considered negative
:return:
percentages (numpy.ndarray): An array containing the X-axis values for plotting the Cumulative Gains chart.
gains (numpy.ndarray): An array containing the Y-axis values for one curve of the Cumulative Gains chart.
:raise:
ValueError: If `y_true` is not composed of 2 classes. The Cumulative Gain Chart is only relevant in
binary classification.
"""
from numpy import asarray, array_equal, cumsum, arange, insert, unique, argsort
y_true, y_score = asarray(y_true), asarray(y_score)
# ensure binary classification if pos_label is not specified
classes = unique(y_true)
if pos_label is None and not (
array_equal(classes, [0, 1])
or array_equal(classes, [-1, 1])
or array_equal(classes, [0])
or array_equal(classes, [-1])
or array_equal(classes, [1])
):
raise ValueError("Data is not binary and pos_label is not specified")
elif pos_label is None:
pos_label = 1.0
# make y_true a boolean vector
y_true = y_true == pos_label
sorted_indices = argsort(y_score)[::-1]
y_true = y_true[sorted_indices]
gains = cumsum(y_true)
percentages = arange(start=1, stop=len(y_true) + 1)
gains = gains / float(sum(y_true))
percentages = percentages / float(len(y_true))
gains = insert(gains, 0, [0])
percentages = insert(percentages, 0, [0])
return percentages, gains
def plot_lift_curve(
self,
mdl,
title="Lift Curve",
figsize=None,
title_fontsize="large",
text_fontsize="medium",
):
"""
Generates the Lift Curve from labels and scores/probabilities The lift curve is used to
determine the effectiveness of a binary classifier. A detailed explanation can be found at
`http://www2.cs.uregina.ca/~dbd/cs831/notes/lift_chart/lift_chart.html <http://www2.cs.uregina.ca/~dbd/cs831/notes/lift_chart/lift_chart.html>`_.
The implementation here works only for binary classification.
:param mdl: object type that implements the "fit" and "predict" methods;
An object of that type which is cloned for each validation.
:param title: (string, optional): Title of the generated plot. Defaults to "Lift Curve".
:param figsize: (2-tuple, optional): Tuple denoting figure size of the plot e.g. (6, 6). Defaults to ``None``.
:param title_fontsize: (string or int, optional): Matplotlib-style fontsizes. Use e.g. "small", "medium",
"large" or integer-values. Defaults to "large".
:param text_fontsize: (string or int, optional): Matplotlib-style fontsizes. Use e.g. "small", "medium",
"large" or integer-values. Defaults to "medium".
:return: ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was drawn.
"""
import matplotlib.pyplot as plt
from numpy import array, unique
mdl, mdl_id, _, X_test, _, y_test = self.split_train(mdl)
y_true = array(y_test)
try:
y_probas = mdl.predict_proba(X_test)
y_probas = array(y_probas)
prob_pos0 = y_probas[:, 0]
prob_pos1 = y_probas[:, 1]
except:
prob_pos = mdl.decision_function(X_test)
prob_pos1 = (prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
prob_pos0 = (prob_pos.max() - prob_pos) / (prob_pos.max() - prob_pos.min())
classes = unique(y_true)
if len(classes) != 2:
raise ValueError(
"Cannot calculate Lift Curve for data with "
"{} category/ies".format(len(classes))
)
# Compute Cumulative Gain Curves
percentages, gains1 = self.cumulative_gain_curve(y_true, prob_pos0, classes[0])
percentages, gains2 = self.cumulative_gain_curve(y_true, prob_pos1, classes[1])
percentages = percentages[1:]
gains1 = gains1[1:]
gains2 = gains2[1:]
gains1 = gains1 / percentages
gains2 = gains2 / percentages
fig, ax = plt.subplots(1, 1, figsize=figsize)
ax.set_title(title, fontsize=title_fontsize)
ax.plot(percentages, gains1, lw=3, label="Class {}".format(classes[0]))
ax.plot(percentages, gains2, lw=3, label="Class {}".format(classes[1]))
ax.plot([0, 1], [1, 1], "k--", lw=2, label="Baseline")
ax.set_xlabel("Percentage of sample", fontsize=text_fontsize)
ax.set_ylabel("Lift", fontsize=text_fontsize)
ax.tick_params(labelsize=text_fontsize)
ax.grid(True)
ax.legend(loc="lower right", fontsize=text_fontsize)
from pickle import dumps
pckl = dumps(fig)
Plots.create(model_id=mdl_id, title="lift curve", plot=pckl)
return ax
def heatmap(
self,
corr_df=None,
sort_by=None,
ascending=False,
font_size=3,
cmap="gnuplot2",
idx_col="feature",
ignore=(),
):
"""
Plots a heatmap from the values of the dataframe `corr_df`
:param corr_df: value container
:param idx_col: the column whose values will be used as index
:param sort_by: dataframe will be sorted descending by values of this column.
If None, the first column is used
:param font_size: font size, defalut 3
:param cmap: color mapping. Must be one of the followings
'viridis', 'plasma', 'inferno', 'magma', 'cividis', 'Greys', 'Purples',
'Blues', 'Greens', 'Oranges', 'Reds', 'YlOrBr', 'YlOrRd', 'OrRd', 'PuRd',
'RdPu', 'BuPu', 'GnBu', 'PuBu', 'YlGnBu', 'PuBuGn', 'BuGn', 'YlGn',
'binary', 'gist_yarg', 'gist_gray', 'gray', 'bone', 'pink', 'spring',
'summer', 'autumn', 'winter', 'cool', 'Wistia', 'hot', 'afmhot',
'gist_heat', 'copper', 'PiYG', 'PRGn', 'BrBG', 'PuOr', 'RdGy', 'RdBu',
'RdYlBu', 'RdYlGn', 'Spectral', 'coolwarm', 'bwr', 'seismic', 'twilight',
'twilight_shifted', 'hsv', 'Pastel1', 'Pastel2', 'Paired', 'Accent',
'Dark2', 'Set1', 'Set2', 'Set3', 'tab10', 'tab20', 'tab20b', 'tab20c',
'flag', 'prism', 'ocean', 'gist_earth', 'terrain', 'gist_stern', 'gnuplot',
'gnuplot2', 'CMRmap', 'cubehelix', 'brg', 'gist_rainbow', 'rainbow',
'jet', 'nipy_spectral', 'gist_ncar'
:return: matplotlib pyplot instance
"""
import matplotlib.pyplot as plt
from numpy import arange, amin, amax
from pandas import read_sql
ax = plt.gca()
idx_col = idx_col
if corr_df is None:
df = read_sql("SELECT * FROM weights", self.conn)
clmns = list(df.columns)
df = df.sort_values(
by=clmns[0] if sort_by is None else sort_by, ascending=ascending
)
if idx_col is None:
idx_col = clmns[0]
clmns.remove(idx_col)
else:
df = corr_df
clmns = list(df.columns)
# df = df.sort_values(by=clmns[0] if sort_by is None else sort_by, ascending=ascending)
if idx_col is not None:
# idx_col = clmns[0]
clmns.remove(idx_col)
for itm in ignore:
clmns.remove(itm)
data = df[clmns].values
mn, mx = amin(data), amax(data)
im = ax.imshow(data, cmap=cmap, interpolation="bilinear")
# ax.set_adjustable(adjustable='box', share=False)
ax.autoscale(False)
cbar_kw = {
"fraction": 0.2,
"ticks": [mn, 0.0, (mn + mx) / 2.0, mx],
"drawedges": False,
}
cbar = ax.figure.colorbar(im, ax=ax, aspect=max(20, len(df)), **cbar_kw)
cbarlabel = ""
cbar.ax.set_ylabel(cbarlabel, rotation=-90, va="bottom")
cbar.ax.tick_params(labelsize=font_size + 1)
ax.set_xticks(arange(data.shape[1]))
ax.set_yticks(arange(data.shape[0]))
ax.set_xticklabels(clmns, fontdict={"fontsize": font_size})
if idx_col is not None:
ax.set_yticklabels(list(df[idx_col]), fontdict={"fontsize": font_size})
else:
ax.set_yticklabels(list(df.index), fontdict={"fontsize": font_size})
# Let the horizontal axes labeling appear on top.
ax.tick_params(top=True, bottom=False, labeltop=True, labelbottom=False)
# Rotate the tick labels and set their alignment.
plt.setp(
ax.get_xticklabels(),
rotation=-305,
ha="left",
va="top",
rotation_mode="anchor",
)
# Turn spines off and create white grid.
for _, spine in ax.spines.items():
spine.set_visible(False)
ax.set_xticks(arange(data.shape[1] + 1) - 0.5, minor=True)
ax.set_yticks(arange(data.shape[0] + 1) - 0.5, minor=True)
ax.grid(which="minor", color="w", linestyle="-", linewidth=0)
ax.tick_params(which="minor", bottom=False, left=False)
return plt
def FeatureWeights(self, weights=("pearson", "variance"), **kwargs):
"""
Calculates the requested weights and log them
:param weights: a list of weights, a subset of {'pearson', 'variance', 'relieff',
'surf', 'sobol', 'morris', 'delta_mmnt', 'info-gain'}
:param kwargs: all input acceptable by ``skrebate.ReliefF``, ``skrebate.surf``,
``sensapprx.SensAprx``
:return: None
"""
from pandas import DataFrame, read_sql
self.data = read_sql("SELECT * FROM data", self.conn)
features = list(self.data.columns)
features.remove(self.target)
weights_df = read_sql("SELECT * FROM weights", self.conn)
if len(weights_df) == 0:
weights_df = DataFrame({"feature": features})
X = self.data[features].values
y = self.data[self.target].values
n_features = kwargs.get("n_features", int(len(features) / 2))
domain = None
probs = None
regressor = kwargs.get("regressor", None)
reduce = kwargs.get("reduce", True)
num_smpl = kwargs.get("num_smpl", 700)
W = {"feature": features}
for factor in weights:
if factor == "pearson":
Res = dict(self.data.corr(method="pearson").fillna(0)[self.target])
W["pearson"] = [Res[v] for v in features]
elif factor == "variance":
Res = dict(self.data.var())
W["variance"] = [Res[v] for v in features]
elif factor == "relieff":
from skrebate import ReliefF
n_neighbors = kwargs.get("n_neighbors", 80)
RF = ReliefF(n_features_to_select=n_features, n_neighbors=n_neighbors)
RF.fit(X, y)
W["relieff"] = [
RF.feature_importances_[features.index(v)] for v in features
]
elif factor == "surf":
from skrebate import SURF
RF = SURF(n_features_to_select=n_features)
RF.fit(X, y)
W["surf"] = [
RF.feature_importances_[features.index(v)] for v in features
]
elif factor == "sobol":
from .sensapprx import SensAprx
SF = SensAprx(
method="sobol",
domain=domain,
probs=probs,
regressor=regressor,
reduce=reduce,
num_smpl=num_smpl,
)
SF.fit(X, y)
domain = SF.domain
probs = SF.probs
W["sobol"] = [SF.weights_[features.index(v)] for v in features]
elif factor == "morris":
from .sensapprx import SensAprx
SF = SensAprx(
method="morris",
domain=domain,
probs=probs,
regressor=regressor,
reduce=reduce,
num_smpl=num_smpl,
)
SF.fit(X, y)
domain = SF.domain
probs = SF.probs
W["morris"] = [SF.weights_[features.index(v)] for v in features]
elif factor == "delta-mmnt":
from .sensapprx import SensAprx
SF = SensAprx(
method="delta-mmnt",
domain=domain,
probs=probs,
regressor=regressor,
reduce=reduce,
num_smpl=num_smpl,
)
SF.fit(X, y)
domain = SF.domain
probs = SF.probs
W["delta_mmnt"] = [SF.weights_[features.index(v)] for v in features]
elif factor == "info-gain":
from sklearn.feature_selection import mutual_info_classif
Res = mutual_info_classif(X, y, discrete_features=True)
W["info_gain"] = [Res[features.index(v)] for v in features]
new_w_df = DataFrame(W)
merged = weights_df.merge(new_w_df, on="feature")
merged.fillna(0.0)
merged.to_sql("weights", self.conn, if_exists="replace", index=False)
def TopFeatures(self, num=10):
"""
Returns `num` of top features in the data based on calculated weights
:param num: number of top features to return
:return: an OrderedDict of top features
"""
from pandas import read_sql
from collections import OrderedDict
weights_df = | read_sql("SELECT * FROM weights", self.conn) | pandas.read_sql |
import pandas as pd
import os
import numpy as np
import gc
import copy
import datetime
import warnings
from tqdm import tqdm
from scipy import sparse
from numpy import array
from scipy.sparse import csr_matrix
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfTransformer
###############################################
#########数据加载
###############################################
user_app = pd.read_csv('../../data/processed_data/user_app.csv', dtype={'uId':np.int32, 'appId':str})
app_info = pd.read_csv('../../data/processed_data/app_info.csv', dtype={'appId':str, 'category':int})
###############################################
########## 压缩函数
###############################################
# 对数据进行类型压缩,节约内存
from tqdm import tqdm_notebook
class _Data_Preprocess:
def __init__(self):
self.int8_max = np.iinfo(np.int8).max
self.int8_min = np.iinfo(np.int8).min
self.int16_max = np.iinfo(np.int16).max
self.int16_min = np.iinfo(np.int16).min
self.int32_max = np.iinfo(np.int32).max
self.int32_min = np.iinfo(np.int32).min
self.int64_max = np.iinfo(np.int64).max
self.int64_min = np.iinfo(np.int64).min
self.float16_max = np.finfo(np.float16).max
self.float16_min = np.finfo(np.float16).min
self.float32_max = np.finfo(np.float32).max
self.float32_min = np.finfo(np.float32).min
self.float64_max = np.finfo(np.float64).max
self.float64_min = np.finfo(np.float64).min
'''
function: _get_type(self,min_val, max_val, types)
get the correct types that our columns can trans to
'''
def _get_type(self, min_val, max_val, types):
if types == 'int':
if max_val <= self.int8_max and min_val >= self.int8_min:
return np.int8
elif max_val <= self.int16_max <= max_val and min_val >= self.int16_min:
return np.int16
elif max_val <= self.int32_max and min_val >= self.int32_min:
return np.int32
return None
elif types == 'float':
if max_val <= self.float16_max and min_val >= self.float16_min:
return np.float16
if max_val <= self.float32_max and min_val >= self.float32_min:
return np.float32
if max_val <= self.float64_max and min_val >= self.float64_min:
return np.float64
return None
'''
function: _memory_process(self,df)
column data types trans, to save more memory
'''
def _memory_process(self, df):
init_memory = df.memory_usage().sum() / 1024 ** 2 / 1024
print('Original data occupies {} GB memory.'.format(init_memory))
df_cols = df.columns
for col in tqdm_notebook(df_cols):
try:
if 'float' in str(df[col].dtypes):
max_val = df[col].max()
min_val = df[col].min()
trans_types = self._get_type(min_val, max_val, 'float')
if trans_types is not None:
df[col] = df[col].astype(trans_types)
elif 'int' in str(df[col].dtypes):
max_val = df[col].max()
min_val = df[col].min()
trans_types = self._get_type(min_val, max_val, 'int')
if trans_types is not None:
df[col] = df[col].astype(trans_types)
except:
print(' Can not do any process for column, {}.'.format(col))
afterprocess_memory = df.memory_usage().sum() / 1024 ** 2 / 1024
print('After processing, the data occupies {} GB memory.'.format(afterprocess_memory))
return df
memory_preprocess = _Data_Preprocess()
# 用法:
# baseSet=memory_preprocess._memory_process(baseSet)
###############################################
########## 统计用户安装app的数量和占比
###############################################
app_counts = user_app[['appId']].drop_duplicates().count()
userSub = user_app.groupby('uId')['appId'].nunique().reset_index().rename(columns={'appId': 'user_app_active_counts'})
userSub['user_app_active_ratio'] = userSub['user_app_active_counts'].apply(lambda x: x/app_counts)
del app_counts
user_app_active_counts = userSub.copy()
###############################################
########统计用户每个年龄段安装的app
###############################################
age_train = pd.read_csv('../../data/processed_data/age_train.csv',dtype={'uId':np.int32, 'age_group':np.int8})
userSub = pd.merge(age_train, user_app, how='left', on='uId')
userSub=pd.pivot_table(userSub, values='uId', index=['appId'],columns=['age_group'],aggfunc='count', fill_value=0)
userSub['sum']=userSub.sum(axis=1)
userSub= userSub.reset_index()
userSub.rename(columns={1:'age_1',2:'age_2',3:'age_3',4:'age_4',5:'age_5',6:'age_6'},inplace=True)
userSub.drop(axis=0, index=0, inplace=True)
userSub['age1_%']= userSub.apply(lambda x: round(x['age_1']/x['sum'],2),axis=1)
userSub['age2_%']= userSub.apply(lambda x: round(x['age_2']/x['sum'],2),axis=1)
userSub['age3_%']= userSub.apply(lambda x: round(x['age_3']/x['sum'],2),axis=1)
userSub['age4_%']= userSub.apply(lambda x: round(x['age_4']/x['sum'],2),axis=1)
userSub['age5_%']= userSub.apply(lambda x: round(x['age_5']/x['sum'],2),axis=1)
userSub['age6_%']= userSub.apply(lambda x: round(x['age_6']/x['sum'],2),axis=1)
age1 = userSub[(userSub['age1_%'] >= 0.3)][['appId']].copy()
age1['age_num1'] = 1
age2 = userSub[(userSub['age2_%'] >= 0.6)][['appId']].copy()
age2['age_num2'] = 1
age3 = userSub[(userSub['age3_%'] >= 0.6)][['appId']].copy()
age3['age_num3'] = 1
age4 = userSub[(userSub['age4_%'] >= 0.6)][['appId']].copy()
age4['age_num4'] = 1
age5 = userSub[(userSub['age5_%'] >= 0.3)][['appId']].copy()
age5['age_num5'] = 1
age6 = userSub[(userSub['age6_%'] >= 0.3)][['appId']].copy()
age6['age_num6'] = 1
userSub = pd.merge(user_app, age1, how='left', on='appId').fillna(0)
userSub = pd.merge(userSub, age2, how='left', on='appId').fillna(0)
userSub = pd.merge(userSub, age3, how='left', on='appId').fillna(0)
userSub = pd.merge(userSub, age4, how='left', on='appId').fillna(0)
userSub = pd.merge(userSub, age5, how='left', on='appId').fillna(0)
userSub = pd.merge(userSub, age6, how='left', on='appId').fillna(0)
userSub = userSub.groupby('uId').sum().reset_index()
user_active_app_age = userSub.copy()
###############################################
########## 用户安装各app类型的数量
###############################################
userSub = pd.merge(user_app, app_info, how='left', on='appId').fillna(method='pad')
userSub = pd.pivot_table(userSub, values='appId', index=['uId'],columns=['category'], aggfunc='count', fill_value=0).reset_index()
userSub['use_app_cate_nums']=0
for i in range(25):
userSub['use_app_cate_nums']+=userSub[float(i)]
for i in range(26,30):
userSub['use_app_cate_nums']+=userSub[float(i)]
for i in range(34,36):
userSub['use_app_cate_nums']+=userSub[float(i)]
for i in range(25):
userSub[str(float(i))+ '_ratio']=userSub[float(i)]/userSub['use_app_cate_nums']
for i in range(26,30):
userSub[str(float(i))+ '_ratio']=userSub[float(i)]/userSub['use_app_cate_nums']
for i in range(34,36):
userSub[str(float(i))+ '_ratio']=userSub[float(i)]/userSub['use_app_cate_nums']
user_active_category_counts = userSub.copy()
###############################################
########## 用户安装了多少种app类型
###############################################
userSub = pd.merge(user_app, app_info, how='left', on='appId').fillna(method='pad')
userSub = userSub[['uId', 'category']].groupby('uId')['category'].nunique().reset_index()
userSub.rename(columns={'category': 'active_cate_nums'}, inplace=True)
user_active_cate_nums = userSub.copy()
###############################################
########## 计算每个app的目标客户年龄指数
###############################################
age_train = pd.read_csv('../../data/processed_data/age_train.csv',dtype={'uId':np.int32, 'age_group':np.int8})
userSub = pd.merge(age_train, user_app, how='left', on='uId')
userSub=pd.pivot_table(userSub, values='uId', index=['appId'],columns=['age_group'],
aggfunc='count', fill_value=0)
userSub['sum']=userSub.sum(axis=1)
userSub= userSub.reset_index()
userSub.rename(columns={1:'age_1',2:'age_2',3:'age_3',4:'age_4',5:'age_5',6:'age_6'},inplace=True)
userSub.drop(axis=0, index=0, inplace=True)
userSub['age1_%']= userSub.apply(lambda x: round(x['age_1']/x['sum'],2),axis=1)
userSub['age2_%']= userSub.apply(lambda x: round(x['age_2']/x['sum'],2),axis=1)
userSub['age3_%']= userSub.apply(lambda x: round(x['age_3']/x['sum'],2),axis=1)
userSub['age4_%']= userSub.apply(lambda x: round(x['age_4']/x['sum'],2),axis=1)
userSub['age5_%']= userSub.apply(lambda x: round(x['age_5']/x['sum'],2),axis=1)
userSub['age6_%']= userSub.apply(lambda x: round(x['age_6']/x['sum'],2),axis=1)
# 计算每个app的目标客户年龄指数(计算方法 :sum(app在该年龄段N安装比例 * 年龄段数值N * 10 / 对应年龄段样本比例))
userSub['age_weight']=userSub.apply(lambda x:(10*x['age1_%']/0.03 +20*x['age2_%']/0.2 +30*x['age3_%']/0.3 +40*x['age4_%']/0.25 +50*x['age5_%']/0.15 +60*x['age6_%']/0.075) ,axis=1)
userSub=userSub[['appId','age_weight']]
userSub=pd.merge(user_app,userSub,how='left',on='appId')
userSub=userSub.groupby('uId')['age_weight'].agg({'mean','min','max','median','std','var'}).reset_index()
userSub['sqrt_of_mean']=userSub['mean']**2
userSub=round(userSub)
feature_activated_app_age_weight = userSub.copy()
###############################################
########## 计算每个app的在每个年龄组的概率
###############################################
age_train = pd.read_csv('../../data/processed_data/age_train.csv',dtype={'uId':np.int32, 'age_group':np.int8})
userSub = pd.merge(age_train, user_app, how='inner', on='uId')
userSub= | pd.pivot_table(userSub, values='uId', index=['appId'],columns=['age_group'],aggfunc='count', fill_value=0) | pandas.pivot_table |
from __future__ import print_function
import os
import pandas as pd
import xgboost as xgb
import time
import shutil
from sklearn import preprocessing
from sklearn.cross_validation import train_test_split
import numpy as np
from sklearn.utils import shuffle
from sklearn import metrics
def archive_results(filename,results,algo,script):
"""
:type algo: basestring
:type script: basestring
:type results: DataFrame
"""
#assert results == pd.DataFrame
now=time.localtime()[0:5]
dirname='../archive'
subdirfmt='%4d-%02d-%02d-%02d-%02d'
subdir=subdirfmt %now
if not os.path.exists(os.path.join(dirname,str(algo))):
os.mkdir(os.path.join(dirname,str(algo)))
dir_to_create=os.path.join(dirname,str(algo),subdir)
if not os.path.exists(dir_to_create):
os.mkdir(dir_to_create)
os.chdir(dir_to_create)
results.to_csv(filename,index=False,float_format='%.6f')
shutil.copy2(script,'.')
return
###############################################################################################
def preprocess_data(train,test):
id_test=test['patient_id']
train=train.drop(['patient_id'],axis=1)
test=test.drop(['patient_id' ],axis=1)
y=train['is_screener']
train=train.drop(['is_screener'],axis=1)
for f in train.columns:
if train[f].dtype == 'object':
print(f)
lbl = preprocessing.LabelEncoder()
lbl.fit(list(train[f].values) + list(test[f].values))
train[f] = lbl.transform(list(train[f].values))
test[f] = lbl.transform(list(test[f].values))
return id_test,test,train,y
os.chdir(os.getcwd())
train_file = '../features/train_big_table.csv.gz'
test_file = '../features/test_big_table.csv.gz'
train = | pd.read_csv(train_file,low_memory=False) | pandas.read_csv |
import os
import sys
import pickle
import pandas as pd
import numpy as np
import sys
from sklearn.feature_selection import chi2, SelectKBest, f_regression
from sklearn.decomposition import PCA, TruncatedSVD
from sklearn.manifold import Isomap, LocallyLinearEmbedding
import settings as project_settings
target_data_folder = project_settings.target_data_folder
features_data_folder = project_settings.features_data_folder
class_folder = project_settings.class_folder
result_folder = project_settings.result_folder
algorithm_len = project_settings.algorithm_len
sys.path.append(class_folder)
from decision_tree_singletarget import DecisionTree_Single
from decision_tree_multitarget import DecisionTree_Multi
from random_forest_singletarget import RandomForestRegressor_Single
from random_forest_multitarget import RandomForestRegressor_Multi
from dnn_model import DNN
from dnn_single_model import DNN_Single
labels =pd.read_csv(f"{target_data_folder}labels.txt",sep=';',index_col=False)
sys_min = sys.float_info.min
def create_data(df,df_perf,labels):
df2 = df_perf.assign(label = labels['x'])
df2 = df2.rename(columns={'1' : 'Precision'})
data = df.join(df2.set_index('label'))
return data
def clean_dataset(df):
assert isinstance(df, pd.DataFrame), "df needs to be a pd.DataFrame"
df.dropna(inplace=True)
indices_to_keep = ~df.isin([np.nan, np.inf, -np.inf]).any(1)
df = df[df.replace([-np.inf], sys.float_info.min).notnull().all(axis=1)]
return df[indices_to_keep].astype(np.float32)
def get_data_for_algorith(algorithm, no_fold):
df_perf = pd.read_csv(f"{target_data_folder}performance_0_I{algorithm}.txt",sep='\t')
df_train = pd.read_csv(f"{features_data_folder}train_{no_fold}_fused.csv",sep='\t', index_col=0)
df_test = pd.read_csv(f"{features_data_folder}test_{no_fold}_fused.csv",sep='\t', index_col=0)
df_perf = df_perf.iloc[:,2:3]
rez_test = create_data(df_test,df_perf,labels)
rez_train = create_data(df_train,df_perf,labels)
return rez_train, rez_test
# Check valid data
def valid_data(df):
if len(df[df.isin([np.nan, np.inf, -np.inf]).any(1)]) == 0:
return True
else:
return False
def add_log_performance(df):
df['log_Precision'] = np.log10(df.iloc[:, -1] + 1)
return df
def get_features(X_train, y_train):
selected_features = []
for i in range(0, len(y_train.columns)):
selector = SelectKBest(f_regression, k=10)
selector.fit(X_train, y_train.iloc[:,i])
#selected_features.append(list(selector.scores_))
cols = selector.get_support(indices=True)
selected_features.append(cols)
features = set()
for array in selected_features:
for feature in array:
features.add(feature)
return list(features)
def get_data(algorithm_no, fold):
if not isinstance(algorithm_no, list) and not isinstance(algorithm_no, tuple):
train_df, test_df = get_data_for_algorith(algorithm_no, fold)
train_df_with_log = add_log_performance(train_df)
test_df_with_log = add_log_performance(test_df)
train_df_with_log_clean = clean_dataset(train_df_with_log)
test_df_with_log_clean = clean_dataset(test_df_with_log)
if valid_data(train_df_with_log_clean) and valid_data(test_df_with_log_clean):
X_train = train_df_with_log_clean.iloc[:, :-2]
y_train_labels = train_df_with_log_clean.iloc[:, -2:]
X_test = test_df_with_log_clean.iloc[:, :-2]
y_test_labels = test_df_with_log_clean.iloc[:, -2:]
return X_train, y_train_labels, X_test, y_test_labels
else:
raise Exception("Invalid Data")
else:
data_train, data_test = [], []
for alg in algorithm_no:
d_train, d_test = get_data_for_algorith(alg, fold)
data_train.append(d_train)
data_test.append(d_test)
merged_good_train = data_train[0]
merged_good_test = data_test[0]
for i in range(1, len(algorithm_no)):
merged_train = pd.merge(merged_good_train, data_train[i], how='inner', left_index=True, right_index=True,
suffixes=(f'_x{i}', f'_y{i}'))
merged_test = pd.merge(merged_good_test, data_test[i], how='inner', left_index=True, right_index=True,
suffixes=(f'_x{i}', f'_y{i}'))
merged_good_train = merged_train[list(merged_train.columns[0:(100+i-1)]) + [merged_train.columns[-1]]]
merged_good_test = merged_test[list(merged_test.columns[0:(100+i-1)]) + [merged_test.columns[-1]]]
# Fix the column names
index = pd.Index(list(data_train[0].columns[:99]) + [f'Precision_alg{alg}' for alg in algorithm_no])
merged_good_train.columns = index
merged_good_test.columns = index
X_train = merged_good_train.iloc[:, :-len(algorithm_no)]
y_train_labels = merged_good_train.iloc[:, -len(algorithm_no):]
X_test = merged_good_test.iloc[:, :-len(algorithm_no)]
y_test_labels = merged_good_test.iloc[:, -len(algorithm_no):]
def get_model(X_train, y_train, X_test, y_test, model_name, model_kwargs=None, single_output=True, target=None):
if model_kwargs is None:
model_kwargs = {}
if single_output:
if model_name == 'Xgboost':
return Xgboost_Single(X_train, y_train, X_test, y_test, model_kwargs, target)
elif model_name == 'nn':
return DNN_Single(X_train, y_train, X_test, y_test, target)
elif model_name == 'decision_tree':
return DecisionTree_Single(X_train, y_train, X_test, y_test, model_kwargs, target)
elif model_name == 'linear_regression':
return LinearRegression_Single(X_train, y_train, X_test, y_test, model_kwargs, target)
elif model_name == 'k_neighbors':
return KNeighborsRegressor_Single(X_train, y_train, X_test, y_test, model_kwargs, target)
elif model_name == 'random_forest':
return RandomForestRegressor_Single(X_train, y_train, X_test, y_test, model_kwargs, target)
else:
pass
else:
if model_name == 'Xgboost':
return Xgboost_Multi(X_train, y_train, X_test, model_kwargs, y_test)
elif model_name == 'nn':
return DNN(X_train, y_train, X_test, y_test)
elif model_name == 'decision_tree':
return DecisionTree_Multi(X_train, y_train, X_test, y_test, model_kwargs)
elif model_name == 'linear_regression':
return LinearRegression_Multi(X_train, y_train, X_test, y_test, model_kwargs)
elif model_name == 'k_neighbors':
return KNeighborsRegressor_Multi(X_train, y_train, X_test, y_test, model_kwargs)
elif model_name == 'random_forest':
return RandomForestRegressor_Multi(X_train, y_train, X_test, y_test, model_kwargs)
else:
pass
def dimension_reduction(x, n, dim_reduction_alg):
alg = dim_reduction_alg.lower()
alg_dict = {
'pca': PCA,
'svd': TruncatedSVD,
'isomap': Isomap,
'lle': LocallyLinearEmbedding
}
model = alg_dict[alg]
return model(n_components=n).fit_transform(x)
def run_50_folds_on_algorithm_multi_target(algorithm_no, feature_selection = False, model_name='Xgboost', model_kwargs=None, dim_reduction_n=None, dim_reduction_alg='None', fold_range=None):
model_name_folder = model_name
if dim_reduction_alg and dim_reduction_n:
model_name_folder += f'_{dim_reduction_alg}_n_{dim_reduction_n}'
if model_kwargs:
model_name_folder += '_' + '_'.join(f'{k}={v}'for k, v in model_kwargs.items())
model_kwargs_str = '_'.join(f'{k}={v}'for k, v in model_kwargs.items())
if not isinstance(algorithm_no, list) and not isinstance(algorithm_no, tuple):
labels = ['Precision', 'log_Precision']
algorithm_name = str(algorithm_no)
else:
labels = [f'Precision_alg{i}' for i in algorithm_no]
algorithm_name = '_'.join(map(str, algorithm_no))
predictions_folder = f"{result_folder}predictions/predictions_alg_no_{algorithm_name}/multi_target_output/{model_name_folder}/"
mae_folder = f"{result_folder}mae/multi_target_output/{model_name_folder}/"
models_folder = f'{result_folder}models/multi_target_output/{model_name_folder}/'
os.makedirs(predictions_folder, exist_ok=True)
os.makedirs(mae_folder, exist_ok=True)
os.makedirs(models_folder, exist_ok=True)
output_mae = []
if fold_range is None:
fold_range = range(0, 50)
for i in fold_range:
# try:
X_train, y_train, X_test, y_test = get_data(algorithm_no, i)
if feature_selection:
features = get_features(X_train, y_train)
X_train = X_train.iloc[:, features]
X_test = X_test.iloc[:, features]
if dim_reduction_alg and dim_reduction_n:
X_train = dimension_reduction(X_train, dim_reduction_n, dim_reduction_alg)
X_test = dimension_reduction(X_test, dim_reduction_n, dim_reduction_alg)
model = get_model(X_train, y_train, X_test, y_test, model_name, model_kwargs, False, None)
model.train_model()
print(f"PRINTING RESULTS FOR: fold number{i+1} and algorithm {algorithm_name}\n")
print("Testing score: \n")
y_pred = model.get_predictions()
precision_mae = model.get_mae_precision()
print(f"Precision MAE: {precision_mae:.4f}")
log_precision_mae = model.get_mae_log_precision()
print(f"Log Precision MAE: {log_precision_mae:.4f}")
fold_algorithm_mae = [i, algorithm_name, precision_mae, log_precision_mae]
output_mae.append(fold_algorithm_mae)
real_pred_df = model.get_df_with_predictions_for_csv(algorithm_name, i, labels)
real_pred_df.to_csv(f"{predictions_folder}predictions_fold_no_{i}_alg_no_{algorithm_name}.csv")
model_name_location = f'{models_folder}model_fold_no_{i}_alg_no_{algorithm_name}.pkl'
if model_name == 'nn':
model_name_location = f'{models_folder}model_fold_no_{i}_alg_no_{algorithm_name}'
model.save_model(model_name_location)
else:
with open(model_name_location, 'wb') as file:
pickle.dump(model, file)
output_mae_df = | pd.DataFrame(data=output_mae, columns=['Fold', 'Algorithm', 'Precision_mae', 'log_Precision_mae']) | pandas.DataFrame |
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from web3 import Web3
import time
import json
import io
plt.rc('figure', titleweight='bold')
plt.rc('axes', grid='True', linewidth=1.2, titlepad=20)
plt.rc('font', weight='bold', size=16)
plt.rc('lines', linewidth=3.5)
eXRD_rewards = '0xDF191bFbdE2e3E178e3336E63C18DD20d537c421'
with open('./infura.json') as f:
INFURA_URL = json.load(f)['url']
w3 = Web3(Web3.HTTPProvider(INFURA_URL))
with open('./eXRD_rewards.json') as f:
ABI = json.load(f)['result']
rewardsContract = w3.eth.contract(address=eXRD_rewards, abi=ABI)
emissionTimestamps = np.array([1605629336, 1608221336, 1608221858, 1610813858])
class RewardTrender():
baseIndex = | pd.date_range(start='17-11-2020 17:00', periods=180*4, freq='6H') | pandas.date_range |
# Importações
import sqlalchemy
import pandas as pd
import numpy as np
# Criação da engine do sql alchemy para a tabela
db_connection = sqlalchemy.create_engine(
'postgresql+pg8000://postgres:123456@localhost:5433/folhadb',
client_encoding='utf8',
)
# 1. Extract
# Extração da tabela cargos para dataframe do pandas
cargos_df = pd.read_sql('SELECT * FROM folha.cargos', db_connection)
# Extração da tabela carreiras para dataframe do pandas
carreiras_df = pd.read_sql('SELECT * FROM folha.carreiras', db_connection)
# Extração da tabela unidades para dataframe do pandas
unidades_df = pd.read_sql('SELECT * FROM folha.unidades', db_connection)
# Extração da tabela setores para dataframe do pandas
setores_df = pd.read_sql('SELECT * FROM folha.setores', db_connection)
# Extração da tabela evolucoes_funcionais para dataframe do pandas
evolucoes_funcionais_df = pd.read_sql(
'SELECT * FROM folha.evolucoes_funcionais', db_connection
)
# Extração da tabela colaboradores para dataframe do pandas
colaboradores_df = pd.read_sql(
'SELECT * FROM folha.colaboradores', db_connection
)
# Extração da tabela lancamentos para dataframe do pandas
lancamentos_df = pd.read_sql('SELECT * FROM folha.lancamentos', db_connection)
# Extração da tabela folhas_pagamentos para dataframe do pandas
folhas_pagamentos_df = pd.read_sql(
'SELECT * FROM folha.folhas_pagamentos', db_connection
)
# Extração da tabela rubricas para dataframe do pandas
rubricas_df = pd.read_sql('SELECT * FROM folha.rubricas', db_connection)
# Extração da tabela grupos_rubricas para dataframe do pandas
grupos_rubricas_df = pd.read_sql(
'SELECT * FROM folha.grupos_rubricas', db_connection
)
# 2. Transform
## dm_cargos
# Merge de cargos e carreiras
dm_cargos_df = pd.merge(
left=cargos_df, right=carreiras_df, how='left', on='cod_carreira'
)
# Remoção da linha cod_carreira
dm_cargos_df.drop(columns=['cod_carreira'], inplace=True)
## dm_setores
# Merge de setores e unidades
dm_setores_df = pd.merge(
left=setores_df, right=unidades_df, how='left', on='cod_und'
)
# Renomeando colunas
dm_setores_df.rename(
columns={
'dsc_und': 'dsc_unidade',
'cid_und': 'cidade_unidade',
'uf_und': 'uf_unidade',
},
inplace=True,
)
# Remoção das linhas cod_und e cod_colab_chefe
dm_setores_df.drop(columns=['cod_und', 'cod_colab_chefe'], inplace=True)
## dm_rubricas
# Merge de rubricas e grupos_rubricas
dm_rubricas_df = pd.merge(
left=rubricas_df, right=grupos_rubricas_df, how='left', on='cod_grupo'
)
# Remoção da linha cod_grupo
dm_rubricas_df.drop(columns=['cod_grupo'], inplace=True)
## dm_faixas_etarias
# Criação do dataframe dm_faixas_etárias
dm_faixas_etarias_df = pd.DataFrame(
{
'cod_faixa': [1, 2, 3, 4],
'dsc_faixa': [
'até 21 anos',
'de 21 a 30 anos',
'de 31 a 45 anos',
'acima de 45 anos',
],
'idade_inicial': [0, 22, 31, 45],
'idade_final': [21, 30, 45, 100],
}
)
## dm_tempos_servicos
# Criação da do dataframe dm_tempo_servicos
dm_tempos_servicos_df = pd.DataFrame(
{
'cod_tempo_serv': [1, 2, 3, 4, 5],
'dsc_tempo_serv': [
'até 1 ano',
'de 1 a 10 anos',
'de 11 a 20 anos',
'de 21 a 30 anos',
'acima de 31 anos',
],
'ano_inicial': [0, 1, 11, 21, 31],
'ano_final': [0, 10, 20, 30, 100],
}
)
## dm_tempos_folhas
# Criação da do dataframe dm_tempos_folhas a partir de folhas_pagamentos, removendo as linha tpo_folha e dsc_folha
dm_tempos_folhas_df = folhas_pagamentos_df.drop(
columns=['tpo_folha', 'dsc_folha'], inplace=False
)
# Criação da coluna id_ano_mes a partir de ano e mês
dm_tempos_folhas_df['id_ano_mes'] = (
dm_tempos_folhas_df['ano'].astype(str)
+ dm_tempos_folhas_df['mes'].astype(str)
).astype(int)
## ft_lancamentos
# Merge das tabelas lancamentos, folhas_pagamento, colaboradores e evoluções funcionais
ft_lancamentos_df = pd.merge(
left=lancamentos_df,
right=folhas_pagamentos_df,
how='left',
on=['ano', 'mes', 'tpo_folha'],
)
ft_lancamentos_df = pd.merge(
left=ft_lancamentos_df, right=colaboradores_df, how='left', on='cod_colab'
)
ft_lancamentos_df = pd.merge(
left=ft_lancamentos_df,
right=evolucoes_funcionais_df,
how='right',
on='cod_colab',
)
# Criação da coluna id_ano_mes a partir de ano e mês
ft_lancamentos_df['id_ano_mes'] = (
ft_lancamentos_df['ano'].astype(str) + ft_lancamentos_df['mes'].astype(str)
).astype(int)
# Coluna cod_faixa
# Criação da coluna idade_colab a partir da data de nascimento do colaborador e data do lançamento
ft_lancamentos_df['idade_colab'] = (
pd.to_datetime(ft_lancamentos_df['dat_lanc'])
- pd.to_datetime(ft_lancamentos_df['dat_nasc'])
) // np.timedelta64(1, 'Y')
# Função para obter o cod_faixa a partir da idade_colab
def get_cod_faixa(idade_colab):
list_cod_faixa = []
for idade in idade_colab:
if idade < 21:
list_cod_faixa.append(1)
elif idade <= 30:
list_cod_faixa.append(2)
elif idade <= 45:
list_cod_faixa.append(3)
else:
list_cod_faixa.append(4)
return list_cod_faixa
ft_lancamentos_df['cod_faixa'] = get_cod_faixa(
ft_lancamentos_df['idade_colab']
)
# Coluna cod_tempo_serv
# Criação da coluna tempo_serv a partir da data de admissão do colaborador e data do lançamento
ft_lancamentos_df['tempo_serv'] = (
pd.to_datetime(ft_lancamentos_df['dat_lanc'])
- | pd.to_datetime(ft_lancamentos_df['dat_admissao']) | pandas.to_datetime |
"""Command line interface."""
from argparse import (
Action,
ArgumentParser,
)
from datetime import datetime
from datetime import date
import pandas
from pandas import DataFrame
from penn_chime.constants import CHANGE_DATE
from penn_chime.model.parameters import Parameters, Disposition
from penn_chime.model.sir import Sir as Model
from clienv import ChimeCLIEnvironment
from sys import stdout
from logging import INFO, basicConfig, getLogger
import shlex
import unicodedata
import os
from pdfgenerator.chime_pdf_generator import generate_pdf
basicConfig(
level=INFO,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
stream=stdout,
)
logger = getLogger(__name__)
VERBOSE = False
OPEN_PDF = True
class FromFile(Action):
"""From File."""
def __call__(self, parser, namespace, values, option_string=None):
with values as f:
parser.parse_args(f.read().split(), namespace)
def cast_date(string):
return datetime.strptime(string, '%Y-%m-%d').date()
def validator(arg, cast, min_value, max_value, required, default ):
"""Validator."""
def validate(string):
"""Validate."""
if string == '' and cast != str:
if required:
raise AssertionError('%s is required.')
return None
value = cast(string)
if min_value is not None:
assert value >= min_value
if max_value is not None:
assert value <= max_value
return value
return validate
def parse_args(args):
"""Parse args."""
parser = ArgumentParser(description="penn_chime: {CHANGE_DATE}")
parser.add_argument("--file", type=open, action=FromFile)
parser.add_argument(
"--location", type=str, default="no location"
)
parser.add_argument(
"--scenario-id", type=str, default="no id",
)
parser.add_argument("--hosp-capacity", type=int, help="MedSurg Capacity", default=0)
parser.add_argument("--icu-capacity", type=int, help="ICU Capacity", default=0)
parser.add_argument("--vent-capacity", type=int, help="Ventilators", default=0)
parser.add_argument("--hosp-occupied", type=int, help="Non-COVID19 MedSurg Occupancy", default=0)
parser.add_argument("--icu-occupied", type=int, help="Non-COVID19 ICU Occupancy", default=0)
parser.add_argument("--vent-occupied", type=int, help="Non-COVID19 Ventilators in Use", default=0)
parser.add_argument("--current-precautionary", type=int, help="Currently Hospitalized Precautionary COVID-19 Patients (>= 0)", default=0 ),
# generate_pdf: 0 = None, 1=PDF for each scenario, 2=One PDF for with all scenarios, 3=Both
parser.add_argument("--generate-pdf", type=int, help="Generate PDF Report", default=1)
parser.add_argument("--actuals-date", type=cast_date, help="Actuals Date", default=cast_date('1980-01-01') )
parser.add_argument("--mitigation-date", type=cast_date, help="Mitigation Start Date", default=None)
for arg, cast, min_value, max_value, help, required, default in (
("--current-hospitalized", int, 0, None, "Currently Hospitalized COVID-19 Patients (>= 0)", True, None ),
("--date-first-hospitalized", cast_date, None, None, "Date of First Hospitalization", False, None ),
("--doubling-time-low", float, 0.0, None, "Doubling time (lower) before social distancing (days)", True, None ),
("--doubling-time-observed", float, 0.0, None, "Doubling time (observed) before social distancing (days)", True, None ),
("--doubling-time-high", float, 0.0, None, "Doubling time (upper) before social distancing (days)", True, None ),
("--hospitalized-days", int, 0, None, "Average Hospital Length of Stay (days)", True, None ),
("--hospitalized-rate", float, 0.00001, 1.0, "Hospitalized Rate: 0.00001 - 1.0", True, None ),
("--icu-days", int, 0, None, "Average Days in ICU", True, None),
("--icu-rate", float, 0.0, 1.0, "ICU Rate: 0.0 - 1.0", True, None),
("--market-share", float, 0.00001, 1.0, "Hospital Market Share (0.00001 - 1.0)", True, None ),
("--infectious-days", float, 0.0, None, "Infectious days", True, None),
("--n-days", int, 0, None, "Number of days to project >= 0", True, 200 ),
("--relative-contact-rate", float, 0.0, 1.0, "Social Distancing Reduction Rate: 0.0 - 1.0", True, None ),
("--relative-contact-rate-0", float, 0.0, 1.0, "Social Distancing Reduction Rate (0): 0.0 - 1.0", True, None ),
("--relative-contact-rate-1", float, 0.0, 1.0, "Social Distancing Reduction Rate (1): 0.0 - 1.0", True, None ),
("--population", int, 1, None, "Regional Population >= 1", True, None),
("--ventilated-days", int, 0, None, "Average Days on Ventilator", True, None),
("--ventilated-rate", float, 0.0, 1.0, "Ventilated Rate: 0.0 - 1.0", True, None),
("--current-date", cast_date, None, None, "Current Date", True, date.today() ),
("--start-day", int, None, None, "Start day for model output", False, None),
("--data-key", str, None, None, "Key for linking for displays", False, None),
):
parser.add_argument(
arg,
type=validator(arg, cast, min_value, max_value, required, default),
help=help,
)
return parser.parse_args(shlex.split(args))
def main():
"""Main."""
data = DataFrame()
head = DataFrame()
computed_data = DataFrame()
cenv = ChimeCLIEnvironment()
f = open(cenv.parameters_file, "r")
for x in f:
x = "".join(ch for ch in x if unicodedata.category(ch)[0]!="C")
a = parse_args(x)
logger.info("Processing %s", a.location)
# funky date issue
# if a.current_date is None :
# a.current_date = date.today()
p = Parameters(
current_hospitalized=a.current_hospitalized,
doubling_time=a.doubling_time_low,
infectious_days=a.infectious_days,
market_share=a.market_share,
n_days=a.n_days,
relative_contact_rate=a.relative_contact_rate,
population=a.population,
hospitalized=Disposition(a.hospitalized_days, a.hospitalized_rate),
icu=Disposition(a.icu_days, a.icu_rate),
ventilated=Disposition(a.ventilated_days, a.ventilated_rate),
mitigation_date=a.mitigation_date,
current_date = a.current_date,
recovered=0, # this will need to be fixed when CHIME catches up
)
fndata = cenv.output_dir + "/chime-projection-" + a.scenario_id + ".csv"
fncdata = cenv.output_dir + "/chime-computed-data-" + a.scenario_id + ".csv"
fnhead = cenv.output_dir + "/chime-parameters-" + a.scenario_id + ".csv"
doubling_rates = [ [a.doubling_time_low, "dt-low"], [a.doubling_time_high, "dt-high"], [a.doubling_time_observed, "dt-observed"]] #, [ None, "dt-computed"]]
contact_rates = [ [a.relative_contact_rate, "sd-norm"], [a.relative_contact_rate_0, "sd-0"], [a.relative_contact_rate_1, "sd-1"] ]
m = []
zi = 0
for d in ( doubling_rates ):
mr = []
for r in ( contact_rates ) :
p.relative_contact_rate = r[0]
# if d[0] is None :
# p.doubling_time = None
# p.date_first_hospitalized = a.date_first_hospitalized
# else :
p.doubling_time = d[0]
p.date_first_hospitalized = None
if p.doubling_time is None and p.date_first_hospitalized is None:
p.doubling_time = doubling_rates[2][0]
zi = zi + 1
ds = Model (p)
suffix = ' ' + d[1] + ' ' + r[1]
ds.dispositions_df.rename( columns = { 'ever_hospitalized':'disp-hosp' + suffix, 'ever_icu':'disp-icu' + suffix, 'ever_ventilated':'disp-vent' + suffix }, inplace = True)
ds.census_df.rename( columns = { 'census_hospitalized':'census-hosp' + suffix, 'census_icu':'census-icu' + suffix, 'census_ventilated':'census-vent' + suffix }, inplace = True)
ds.admits_df.rename( columns = { 'admits_hospitalized':'new-hosp' + suffix, 'admits_icu':'new-icu' + suffix, 'admits_ventilated':'new-vent' + suffix }, inplace = True)
ds.raw_df = ds.raw_df[[ "day", "susceptible", "infected", "recovered" ]]
ds.raw_df.rename( columns = { 'susceptible':'susceptible' + suffix, 'infected':'infected' + suffix, 'recovered':'infected' + suffix }, inplace = True)
mr.append( ds )
m.append(mr)
# # assemble and merge datasets for output
# # the second day column is to assist with a lookup function in Excel
rf = DataFrame( m[0][0].census_df[['day']] )
df = DataFrame( m[0][0].census_df[['day']] )
rf['Location'] = a.location
rf["MedSurg Capacity"] = a.hosp_capacity
rf["ICU Capacity"] = a.icu_capacity
rf["Ventilators"] = a.vent_capacity
rf["Non-COVID19 MedSurg Occupancy"] = a.hosp_occupied
rf["Non-COVID19 ICU Occupancy"] = a.icu_occupied
rf["Non-COVID19 Ventilators in Use"] = a.vent_occupied
if a.data_key is not None:
rf['data key'] = a.data_key
df['day-r'] = rf['day']
for mr in ( m ) :
for ds in ( mr ) :
rf = rf.merge(ds.census_df).merge(ds.admits_df).merge(ds.raw_df ).merge(ds.dispositions_df)
rf = rf.merge( df )
if a.start_day is not None :
rf = rf[rf['day'] >= a.start_day]
rf.to_csv(fndata, index=False)
if len(data.columns) == 0 :
data = rf.copy()
else :
data = pandas.concat([data, rf])
# Report out the parameters used to run the model for reference
param = {
'Scenario ID' : [ a.scenario_id ],
'Currently Hospitalized COVID-19 Patients (>= 0)' : [ a.current_hospitalized ],
'Currently Precautionary Hospitalized Patients (>= 0)' : [ a.current_precautionary ],
'Doubling Time (low)' : [ a.doubling_time_low ],
'Doubling Time (observed)' : [ a.doubling_time_observed ],
'Doubling Time (high)' : [ a.doubling_time_high ],
'Infectious days' : [ a.infectious_days ],
'Market Share' : [ a.market_share ],
'Social Distancing Reduction Rate: 0.0 - 1.0' : [ a.relative_contact_rate ],
'Social Distancing Reduction Rate (0): 0.0 - 1.0' : [ a.relative_contact_rate_0 ],
'Social Distancing Reduction Rate (1): 0.0 - 1.0' : [ a.relative_contact_rate_1 ],
'Population' : [ a.population ],
'Hospitalized Rate: 0.00001 - 1.0' : [ a.hospitalized_rate ],
'Average Hospital Length of Stay (days)' : [ a.hospitalized_days ],
'ICU Rate: 0.0 - 1.0' : [ a.icu_rate ],
'Average Days in ICU' : [ a.icu_days ],
'Ventilated Rate: 0.0 - 1.0' : [ a.ventilated_rate ],
'Average Days on Ventilator' : [ a.ventilated_days ],
'Date of First Hospitalization' : [ a.date_first_hospitalized ],
'Mitigation Start Date' : [ a.mitigation_date ],
'Location Code' : [ a.location ],
'Start day for model output' : [ a.start_day ],
'MedSurg Capacity' : [ a.hosp_capacity ],
'ICU Capacity' : [ a.icu_capacity ],
'Ventilator Capacity' : [ a.vent_capacity ],
# 'Non-COVID19 MedSurg Occupancy' : [ a.hosp_occupied ],
# 'Non-COVID19 ICU Occupancy' : [ a.icu_occupied ],
# 'Non-COVID19 Ventilators in Use' : [ a.vent_occupied ],
'Days to Project' : [ a.n_days ],
'Report Generated' : cenv.run_datetime.strftime("%m/%d/%Y %H:%M:%S"),
'Actuals as of' : [a.actuals_date.strftime("%m/%d/%Y")] }
if a.data_key is not None:
param.update({ 'Data Key': [ a.data_key ], })
finfo = DataFrame( param )
finfo.to_csv(fnhead, index=False)
if len(head.columns) == 0 :
head = finfo.copy()
else :
head = | pandas.concat([head, finfo]) | pandas.concat |
import numpy as np
import pandas as pd
from imblearn.over_sampling import SMOTE
from sklearn import preprocessing
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
from sklearn.neighbors import LocalOutlierFactor
from sklearn.preprocessing import StandardScaler
train_data_path = "train_data.csv"
labels_path = "train_labels.csv"
test_data_path = "test_data.csv"
def load_data_train_test_data():
genres_labels = np.array(pd.read_csv(labels_path, index_col=False, header=None))
genres = range(1, 11)
training_data_set = np.array(pd.read_csv(train_data_path, index_col=False, header=None))
pca = PCA(79)
scaler = StandardScaler()
training_data_set = scaler.fit_transform(training_data_set)
training_data_set = pca.fit_transform(training_data_set)
training_data_set = np.append(training_data_set, genres_labels, 1)
number_of_cols = training_data_set.shape[1]
train, test = train_test_split(training_data_set, test_size=0.25, random_state=12,
stratify=training_data_set[:, number_of_cols - 1])
train_x = train[:, :number_of_cols - 1]
train_y = train[:, number_of_cols - 1]
# sm = SMOTE()
# x_train_res, y_train_res = sm.fit_resample(train_x, train_y)
# train_x = preprocessing.normalize(train_x, norm='l2')
test_x = test[:, :number_of_cols - 1]
test_y = test[:, number_of_cols - 1]
return train_x, train_y, test_x, test_y, genres, scaler, pca
def load_train_data_rythym_only():
genres_labels = np.array(pd.read_csv(labels_path, index_col=False, header=None))
genres = range(1, 11)
training_data_set = np.array(pd.read_csv(train_data_path, index_col=False, header=None))[:, :168]
pca = PCA(100)
scaler = StandardScaler()
training_data_set = scaler.fit_transform(training_data_set)
# training_data_set = preprocessing.normalize(training_data_set, norm='l2')
training_data_set = pca.fit_transform(training_data_set)
training_data_set = np.append(training_data_set, genres_labels, 1)
number_of_cols = training_data_set.shape[1]
train, test = train_test_split(training_data_set, test_size=0.25, random_state=12,
stratify=training_data_set[:, number_of_cols - 1])
train_x = train[:, :number_of_cols - 1]
train_y = train[:, number_of_cols - 1]
sm = SMOTE()
# x_train_res, y_train_res = sm.fit_resample(train_x, train_y)
# train_x = preprocessing.normalize(train_x, norm='l2')
test_x = test[:, :number_of_cols - 1]
test_y = test[:, number_of_cols - 1]
return train_x, train_y, test_x, test_y, genres, scaler, pca
def load_train_data_chroma_only():
genres_labels = np.array(pd.read_csv(labels_path, index_col=False, header=None))
genres = range(1, 11)
training_data_set = np.array(pd.read_csv(train_data_path, index_col=False, header=None))[:, 169:216]
pca = PCA(40)
scaler = StandardScaler()
training_data_set = scaler.fit_transform(training_data_set)
training_data_set = pca.fit_transform(training_data_set)
training_data_set = np.append(training_data_set, genres_labels, 1)
number_of_cols = training_data_set.shape[1]
train, test = train_test_split(training_data_set, test_size=0.25, random_state=12,
stratify=training_data_set[:, number_of_cols - 1])
train_x = train[:, :number_of_cols - 1]
train_y = train[:, number_of_cols - 1]
# sm = SMOTE()
# x_train_res, y_train_res = sm.fit_resample(train_x, train_y)
# train_x = preprocessing.normalize(train_x, norm='l2')
test_x = test[:, :number_of_cols - 1]
test_y = test[:, number_of_cols - 1]
return train_x, train_y, test_x, test_y, genres, scaler, pca
def load_train_data_MFCC_only():
genres_labels = np.array( | pd.read_csv(labels_path, index_col=False, header=None) | pandas.read_csv |
import pandas as pd
import numpy as np
import tests.mocks.operations as mockops
from trumania.core import operations
from trumania.core.util_functions import build_ids
def test_apply_should_delegate_to_single_col_dataframe_function_correctly():
# some function that expect a dataframe as input => must return
# dataframe with "result" column
def f(df):
return pd.DataFrame({"result": df["A"] + df["D"] - df["C"]})
tested = operations.Apply(source_fields=["A", "C", "D"],
named_as="r",
f=f, f_args="dataframe")
story_data = pd.DataFrame(
np.random.rand(10, 5), columns=["A", "B", "C", "D", "E"])
result = tested.build_output(story_data)
assert result["r"].equals(story_data["A"] + story_data["D"] - story_data[
"C"])
def test_apply_should_delegate_to_multi_col_dataframe_function_correctly():
# now f returns several columns
def f(df):
return pd.DataFrame({
"r1": df["A"] + df["D"] - df["C"],
"r2": df["A"] + df["C"],
"r3": df["A"] * df["C"],
})
tested = operations.Apply(source_fields=["A", "C", "D"],
named_as=["op1", "op2", "op3"],
f=f, f_args="dataframe")
story_data = pd.DataFrame(
np.random.rand(10, 5), columns=["A", "B", "C", "D", "E"])
result = tested.transform(story_data)
assert result.columns.tolist() == ["A", "B", "C", "D", "E", "op1", "op2",
"op3"]
assert result["op1"].equals(
story_data["A"] + story_data["D"] - story_data["C"])
assert result["op2"].equals(
story_data["A"] + story_data["C"])
assert result["op3"].equals(
story_data["A"] * story_data["C"])
def test_apply_should_delegate_to_columns_function_correctly():
"""
same as the above, but this time f input and output arguments are
pandas Series
"""
def f(ca, cc, cd):
return ca + cd - cc
tested = operations.Apply(source_fields=["A", "C", "D"],
named_as="r",
f=f, f_args="series")
story_data = pd.DataFrame(
np.random.rand(10, 5), columns=["A", "B", "C", "D", "E"])
result = tested.build_output(story_data)
assert result["r"].equals(
story_data["A"] + story_data["D"] - story_data["C"])
def test_one_execution_should_merge_empty_data_correctly():
# empty previous
prev_df = pd.DataFrame(columns=[])
prev_log = {}
nop = operations.Operation()
output, logs = operations.Chain._execute_operation((prev_df, prev_log), nop)
assert logs == {}
assert output.equals(prev_df)
def test_one_execution_should_merge_one_op_with_nothing_into_one_result():
# empty previous
prev = pd.DataFrame(columns=[]), {}
cdrs = pd.DataFrame(np.random.rand(12, 3), columns=["A", "B", "duration"])
input = pd.DataFrame(np.random.rand(10, 2), columns=["C", "D"])
op = mockops.FakeOp(input, logs={"cdrs": cdrs})
output, logs = operations.Chain._execute_operation(prev, op)
assert logs == {"cdrs": cdrs}
assert input.equals(output)
def test_one_execution_should_merge_2_ops_correctly():
# previous results
init = | pd.DataFrame(columns=[]) | pandas.DataFrame |
import requests
import pandas as pd
from dateutil.parser import parse
# 在Facebook Graph API Exploer取得token
token = '<KEY>'
# 在Facebook Graph API Exploer取得粉絲專頁的id與名稱,並將其包成字典dic
fanpage = {'137698833067234': '資料視覺化 / Data Visualization',
'1703467299932229': 'Data Man 的資料視覺化筆記'}
# 建立一個空的list
information_list = []
# 使用for迴圈依序讀取粉絲頁的資訊,並使用format將id與token傳入{}裡
for ele in fanpage:
res = requests.get('https://graph.facebook.com/v2.9/{}/posts?limit=100&access_token={}'.format(ele, token))
# API最多一次呼叫100筆資料,因此使用while迴圈去翻頁取得所有的資料
while 'paging' in res.json():
for information in res.json()['data']:
if 'message' in information:
information_list.append(
[fanpage[ele], information['message'], parse(information['created_time']).date()])
if 'next' in res.json()['paging']:
res = requests.get(res.json()['paging']['next'])
else:
break
# 最後將list轉換成dataframe,並輸出成csv檔
information_df = | pd.DataFrame(information_list, columns=['粉絲專頁', '發文內容', '發文時間']) | pandas.DataFrame |
""" test parquet compat """
import datetime
from distutils.version import LooseVersion
import os
from warnings import catch_warnings
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
import pandas._testing as tm
from pandas.io.parquet import (
FastParquetImpl,
PyArrowImpl,
get_engine,
read_parquet,
to_parquet,
)
try:
import pyarrow # noqa
_HAVE_PYARROW = True
except ImportError:
_HAVE_PYARROW = False
try:
import fastparquet # noqa
_HAVE_FASTPARQUET = True
except ImportError:
_HAVE_FASTPARQUET = False
pytestmark = pytest.mark.filterwarnings(
"ignore:RangeIndex.* is deprecated:DeprecationWarning"
)
# setup engines & skips
@pytest.fixture(
params=[
pytest.param(
"fastparquet",
marks=pytest.mark.skipif(
not _HAVE_FASTPARQUET, reason="fastparquet is not installed"
),
),
pytest.param(
"pyarrow",
marks=pytest.mark.skipif(
not _HAVE_PYARROW, reason="pyarrow is not installed"
),
),
]
)
def engine(request):
return request.param
@pytest.fixture
def pa():
if not _HAVE_PYARROW:
pytest.skip("pyarrow is not installed")
return "pyarrow"
@pytest.fixture
def fp():
if not _HAVE_FASTPARQUET:
pytest.skip("fastparquet is not installed")
return "fastparquet"
@pytest.fixture
def df_compat():
return pd.DataFrame({"A": [1, 2, 3], "B": "foo"})
@pytest.fixture
def df_cross_compat():
df = pd.DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
# 'c': np.arange(3, 6).astype('u1'),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("20130101", periods=3),
# 'g': pd.date_range('20130101', periods=3,
# tz='US/Eastern'),
# 'h': pd.date_range('20130101', periods=3, freq='ns')
}
)
return df
@pytest.fixture
def df_full():
return pd.DataFrame(
{
"string": list("abc"),
"string_with_nan": ["a", np.nan, "c"],
"string_with_none": ["a", None, "c"],
"bytes": [b"foo", b"bar", b"baz"],
"unicode": ["foo", "bar", "baz"],
"int": list(range(1, 4)),
"uint": np.arange(3, 6).astype("u1"),
"float": np.arange(4.0, 7.0, dtype="float64"),
"float_with_nan": [2.0, np.nan, 3.0],
"bool": [True, False, True],
"datetime": pd.date_range("20130101", periods=3),
"datetime_with_nat": [
pd.Timestamp("20130101"),
pd.NaT,
pd.Timestamp("20130103"),
],
}
)
def check_round_trip(
df,
engine=None,
path=None,
write_kwargs=None,
read_kwargs=None,
expected=None,
check_names=True,
check_like=False,
repeat=2,
):
"""Verify parquet serializer and deserializer produce the same results.
Performs a pandas to disk and disk to pandas round trip,
then compares the 2 resulting DataFrames to verify equality.
Parameters
----------
df: Dataframe
engine: str, optional
'pyarrow' or 'fastparquet'
path: str, optional
write_kwargs: dict of str:str, optional
read_kwargs: dict of str:str, optional
expected: DataFrame, optional
Expected deserialization result, otherwise will be equal to `df`
check_names: list of str, optional
Closed set of column names to be compared
check_like: bool, optional
If True, ignore the order of index & columns.
repeat: int, optional
How many times to repeat the test
"""
write_kwargs = write_kwargs or {"compression": None}
read_kwargs = read_kwargs or {}
if expected is None:
expected = df
if engine:
write_kwargs["engine"] = engine
read_kwargs["engine"] = engine
def compare(repeat):
for _ in range(repeat):
df.to_parquet(path, **write_kwargs)
with catch_warnings(record=True):
actual = read_parquet(path, **read_kwargs)
tm.assert_frame_equal(
expected, actual, check_names=check_names, check_like=check_like
)
if path is None:
with tm.ensure_clean() as path:
compare(repeat)
else:
compare(repeat)
def test_invalid_engine(df_compat):
with pytest.raises(ValueError):
check_round_trip(df_compat, "foo", "bar")
def test_options_py(df_compat, pa):
# use the set option
with pd.option_context("io.parquet.engine", "pyarrow"):
check_round_trip(df_compat)
def test_options_fp(df_compat, fp):
# use the set option
with pd.option_context("io.parquet.engine", "fastparquet"):
check_round_trip(df_compat)
def test_options_auto(df_compat, fp, pa):
# use the set option
with pd.option_context("io.parquet.engine", "auto"):
check_round_trip(df_compat)
def test_options_get_engine(fp, pa):
assert isinstance(get_engine("pyarrow"), PyArrowImpl)
assert isinstance(get_engine("fastparquet"), FastParquetImpl)
with pd.option_context("io.parquet.engine", "pyarrow"):
assert isinstance(get_engine("auto"), PyArrowImpl)
assert isinstance(get_engine("pyarrow"), PyArrowImpl)
assert isinstance(get_engine("fastparquet"), FastParquetImpl)
with pd.option_context("io.parquet.engine", "fastparquet"):
assert isinstance(get_engine("auto"), FastParquetImpl)
assert isinstance(get_engine("pyarrow"), PyArrowImpl)
assert isinstance(get_engine("fastparquet"), FastParquetImpl)
with pd.option_context("io.parquet.engine", "auto"):
assert isinstance(get_engine("auto"), PyArrowImpl)
assert isinstance(get_engine("pyarrow"), PyArrowImpl)
assert isinstance(get_engine("fastparquet"), FastParquetImpl)
def test_get_engine_auto_error_message():
# Expect different error messages from get_engine(engine="auto")
# if engines aren't installed vs. are installed but bad version
from pandas.compat._optional import VERSIONS
# Do we have engines installed, but a bad version of them?
pa_min_ver = VERSIONS.get("pyarrow")
fp_min_ver = VERSIONS.get("fastparquet")
have_pa_bad_version = (
False
if not _HAVE_PYARROW
else LooseVersion(pyarrow.__version__) < LooseVersion(pa_min_ver)
)
have_fp_bad_version = (
False
if not _HAVE_FASTPARQUET
else LooseVersion(fastparquet.__version__) < LooseVersion(fp_min_ver)
)
# Do we have usable engines installed?
have_usable_pa = _HAVE_PYARROW and not have_pa_bad_version
have_usable_fp = _HAVE_FASTPARQUET and not have_fp_bad_version
if not have_usable_pa and not have_usable_fp:
# No usable engines found.
if have_pa_bad_version:
match = f"Pandas requires version .{pa_min_ver}. or newer of .pyarrow."
with pytest.raises(ImportError, match=match):
get_engine("auto")
else:
match = "Missing optional dependency .pyarrow."
with pytest.raises(ImportError, match=match):
get_engine("auto")
if have_fp_bad_version:
match = f"Pandas requires version .{fp_min_ver}. or newer of .fastparquet."
with pytest.raises(ImportError, match=match):
get_engine("auto")
else:
match = "Missing optional dependency .fastparquet."
with pytest.raises(ImportError, match=match):
get_engine("auto")
def test_cross_engine_pa_fp(df_cross_compat, pa, fp):
# cross-compat with differing reading/writing engines
df = df_cross_compat
with tm.ensure_clean() as path:
df.to_parquet(path, engine=pa, compression=None)
result = read_parquet(path, engine=fp)
tm.assert_frame_equal(result, df)
result = read_parquet(path, engine=fp, columns=["a", "d"])
tm.assert_frame_equal(result, df[["a", "d"]])
def test_cross_engine_fp_pa(df_cross_compat, pa, fp):
# cross-compat with differing reading/writing engines
if (
LooseVersion(pyarrow.__version__) < "0.15"
and LooseVersion(pyarrow.__version__) >= "0.13"
):
pytest.xfail(
"Reading fastparquet with pyarrow in 0.14 fails: "
"https://issues.apache.org/jira/browse/ARROW-6492"
)
df = df_cross_compat
with tm.ensure_clean() as path:
df.to_parquet(path, engine=fp, compression=None)
with catch_warnings(record=True):
result = read_parquet(path, engine=pa)
tm.assert_frame_equal(result, df)
result = read_parquet(path, engine=pa, columns=["a", "d"])
tm.assert_frame_equal(result, df[["a", "d"]])
class Base:
def check_error_on_write(self, df, engine, exc):
# check that we are raising the exception on writing
with tm.ensure_clean() as path:
with pytest.raises(exc):
to_parquet(df, path, engine, compression=None)
class TestBasic(Base):
def test_error(self, engine):
for obj in [
pd.Series([1, 2, 3]),
1,
"foo",
pd.Timestamp("20130101"),
np.array([1, 2, 3]),
]:
self.check_error_on_write(obj, engine, ValueError)
def test_columns_dtypes(self, engine):
df = pd.DataFrame({"string": list("abc"), "int": list(range(1, 4))})
# unicode
df.columns = ["foo", "bar"]
check_round_trip(df, engine)
def test_columns_dtypes_invalid(self, engine):
df = pd.DataFrame({"string": list("abc"), "int": list(range(1, 4))})
# numeric
df.columns = [0, 1]
self.check_error_on_write(df, engine, ValueError)
# bytes
df.columns = [b"foo", b"bar"]
self.check_error_on_write(df, engine, ValueError)
# python object
df.columns = [
datetime.datetime(2011, 1, 1, 0, 0),
datetime.datetime(2011, 1, 1, 1, 1),
]
self.check_error_on_write(df, engine, ValueError)
@pytest.mark.parametrize("compression", [None, "gzip", "snappy", "brotli"])
def test_compression(self, engine, compression):
if compression == "snappy":
pytest.importorskip("snappy")
elif compression == "brotli":
pytest.importorskip("brotli")
df = pd.DataFrame({"A": [1, 2, 3]})
check_round_trip(df, engine, write_kwargs={"compression": compression})
def test_read_columns(self, engine):
# GH18154
df = pd.DataFrame({"string": list("abc"), "int": list(range(1, 4))})
expected = pd.DataFrame({"string": list("abc")})
check_round_trip(
df, engine, expected=expected, read_kwargs={"columns": ["string"]}
)
def test_write_index(self, engine):
check_names = engine != "fastparquet"
df = pd.DataFrame({"A": [1, 2, 3]})
check_round_trip(df, engine)
indexes = [
[2, 3, 4],
pd.date_range("20130101", periods=3),
list("abc"),
[1, 3, 4],
]
# non-default index
for index in indexes:
df.index = index
if isinstance(index, pd.DatetimeIndex):
df.index = df.index._with_freq(None) # freq doesnt round-trip
check_round_trip(df, engine, check_names=check_names)
# index with meta-data
df.index = [0, 1, 2]
df.index.name = "foo"
check_round_trip(df, engine)
def test_write_multiindex(self, pa):
# Not supported in fastparquet as of 0.1.3 or older pyarrow version
engine = pa
df = pd.DataFrame({"A": [1, 2, 3]})
index = pd.MultiIndex.from_tuples([("a", 1), ("a", 2), ("b", 1)])
df.index = index
check_round_trip(df, engine)
def test_write_column_multiindex(self, engine):
# column multi-index
mi_columns = pd.MultiIndex.from_tuples([("a", 1), ("a", 2), ("b", 1)])
df = pd.DataFrame(np.random.randn(4, 3), columns=mi_columns)
self.check_error_on_write(df, engine, ValueError)
def test_multiindex_with_columns(self, pa):
engine = pa
dates = pd.date_range("01-Jan-2018", "01-Dec-2018", freq="MS")
df = pd.DataFrame(np.random.randn(2 * len(dates), 3), columns=list("ABC"))
index1 = pd.MultiIndex.from_product(
[["Level1", "Level2"], dates], names=["level", "date"]
)
index2 = index1.copy(names=None)
for index in [index1, index2]:
df.index = index
check_round_trip(df, engine)
check_round_trip(
df, engine, read_kwargs={"columns": ["A", "B"]}, expected=df[["A", "B"]]
)
def test_write_ignoring_index(self, engine):
# ENH 20768
# Ensure index=False omits the index from the written Parquet file.
df = | pd.DataFrame({"a": [1, 2, 3], "b": ["q", "r", "s"]}) | pandas.DataFrame |
import composeml as cp
import numpy as np
import pandas as pd
import pytest
from dask import dataframe as dd
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import NaturalLanguage
from featuretools.computational_backends.calculate_feature_matrix import (
FEATURE_CALCULATION_PERCENTAGE
)
from featuretools.entityset import EntitySet, Timedelta
from featuretools.exceptions import UnusedPrimitiveWarning
from featuretools.primitives import (
GreaterThanScalar,
Max,
Mean,
Min,
Sum,
make_agg_primitive,
make_trans_primitive
)
from featuretools.synthesis import dfs
from featuretools.tests.testing_utils import to_pandas
from featuretools.utils.gen_utils import Library
@pytest.fixture
def datetime_es():
cards_df = pd.DataFrame({"id": [1, 2, 3, 4, 5]})
transactions_df = pd.DataFrame({"id": [1, 2, 3, 4, 5],
"card_id": [1, 1, 5, 1, 5],
"transaction_time": pd.to_datetime([
'2011-2-28 04:00', '2012-2-28 05:00',
'2012-2-29 06:00', '2012-3-1 08:00',
'2014-4-1 10:00']),
"fraud": [True, False, False, False, True]})
datetime_es = EntitySet(id="fraud_data")
datetime_es = datetime_es.add_dataframe(
dataframe_name="transactions",
dataframe=transactions_df,
index="id",
time_index="transaction_time")
datetime_es = datetime_es.add_dataframe(
dataframe_name="cards",
dataframe=cards_df,
index="id")
datetime_es = datetime_es.add_relationship("cards", "id", "transactions", "card_id")
datetime_es.add_last_time_indexes()
return datetime_es
def test_passing_strings_to_logical_types_dfs():
teams = pd.DataFrame({
'id': range(3),
'name': ['Breakers', 'Spirit', 'Thorns']
})
games = pd.DataFrame({
'id': range(5),
'home_team_id': [2, 2, 1, 0, 1],
'away_team_id': [1, 0, 2, 1, 0],
'home_team_score': [3, 0, 1, 0, 4],
'away_team_score': [2, 1, 2, 0, 0]
})
dataframes = {'teams': (teams, 'id', None, {'name': 'natural_language'}), 'games': (games, 'id')}
relationships = [('teams', 'id', 'games', 'home_team_id')]
features = dfs(dataframes, relationships, target_dataframe_name="teams", features_only=True)
name_logical_type = features[0].dataframe['name'].ww.logical_type
assert isinstance(name_logical_type, NaturalLanguage)
def test_accepts_cutoff_time_df(dataframes, relationships):
cutoff_times_df = pd.DataFrame({"instance_id": [1, 2, 3],
"time": [10, 12, 15]})
feature_matrix, features = dfs(dataframes=dataframes,
relationships=relationships,
target_dataframe_name="transactions",
cutoff_time=cutoff_times_df)
feature_matrix = to_pandas(feature_matrix, index='id', sort_index=True)
assert len(feature_matrix.index) == 3
assert len(feature_matrix.columns) == len(features)
def test_warns_cutoff_time_dask(dataframes, relationships):
cutoff_times_df = pd.DataFrame({"instance_id": [1, 2, 3],
"time": [10, 12, 15]})
cutoff_times_df = dd.from_pandas(cutoff_times_df, npartitions=2)
match = "cutoff_time should be a Pandas DataFrame: " \
"computing cutoff_time, this may take a while"
with pytest.warns(UserWarning, match=match):
dfs(dataframes=dataframes,
relationships=relationships,
target_dataframe_name="transactions",
cutoff_time=cutoff_times_df)
def test_accepts_cutoff_time_compose(dataframes, relationships):
def fraud_occured(df):
return df['fraud'].any()
lm = cp.LabelMaker(
target_dataframe_name='card_id',
time_index='transaction_time',
labeling_function=fraud_occured,
window_size=1
)
transactions_df = to_pandas(dataframes['transactions'][0])
labels = lm.search(
transactions_df,
num_examples_per_instance=-1
)
labels['time'] = pd.to_numeric(labels['time'])
labels.rename({'card_id': 'id'}, axis=1, inplace=True)
feature_matrix, features = dfs(dataframes=dataframes,
relationships=relationships,
target_dataframe_name="cards",
cutoff_time=labels)
feature_matrix = to_pandas(feature_matrix, index='id')
assert len(feature_matrix.index) == 6
assert len(feature_matrix.columns) == len(features) + 1
def test_accepts_single_cutoff_time(dataframes, relationships):
feature_matrix, features = dfs(dataframes=dataframes,
relationships=relationships,
target_dataframe_name="transactions",
cutoff_time=20)
feature_matrix = to_pandas(feature_matrix, index='id')
assert len(feature_matrix.index) == 5
assert len(feature_matrix.columns) == len(features)
def test_accepts_no_cutoff_time(dataframes, relationships):
feature_matrix, features = dfs(dataframes=dataframes,
relationships=relationships,
target_dataframe_name="transactions",
instance_ids=[1, 2, 3, 5, 6])
feature_matrix = to_pandas(feature_matrix, index='id')
assert len(feature_matrix.index) == 5
assert len(feature_matrix.columns) == len(features)
def test_ignores_instance_ids_if_cutoff_df(dataframes, relationships):
cutoff_times_df = pd.DataFrame({"instance_id": [1, 2, 3],
"time": [10, 12, 15]})
instance_ids = [1, 2, 3, 4, 5]
feature_matrix, features = dfs(dataframes=dataframes,
relationships=relationships,
target_dataframe_name="transactions",
cutoff_time=cutoff_times_df,
instance_ids=instance_ids)
feature_matrix = to_pandas(feature_matrix, index='id')
assert len(feature_matrix.index) == 3
assert len(feature_matrix.columns) == len(features)
def test_approximate_features(pd_dataframes, relationships):
# TODO: Update to use Dask dataframes when issue #985 is closed
cutoff_times_df = pd.DataFrame({"instance_id": [1, 3, 1, 5, 3, 6],
"time": [11, 16, 16, 26, 17, 22]})
# force column to BooleanNullable
pd_dataframes['transactions'] += ({'fraud': "BooleanNullable"},)
feature_matrix, features = dfs(dataframes=pd_dataframes,
relationships=relationships,
target_dataframe_name="transactions",
cutoff_time=cutoff_times_df,
approximate=5,
cutoff_time_in_index=True)
direct_agg_feat_name = 'cards.PERCENT_TRUE(transactions.fraud)'
assert len(feature_matrix.index) == 6
assert len(feature_matrix.columns) == len(features)
truth_values = | pd.Series(data=[1.0, 0.5, 0.5, 1.0, 0.5, 1.0]) | pandas.Series |
# -*- coding: utf-8 -*-
#
# License: This module is released under the terms of the LICENSE file
# contained within this applications INSTALL directory
"""
Defines the ForecastModel class, which encapsulates model functions used in
forecast model fitting, as well as their number of parameters and
initialisation parameters.
"""
# -- Coding Conventions
# http://www.python.org/dev/peps/pep-0008/ - Use the Python style guide
# http://sphinx.pocoo.org/rest.html - Use Restructured Text for
# docstrings
# -- Public Imports
import itertools
import logging
import numpy as np
import pandas as pd
from pandas.tseries.holiday import Holiday, AbstractHolidayCalendar, \
MO, nearest_workday, next_monday, next_monday_or_tuesday, \
GoodFriday, EasterMonday, USFederalHolidayCalendar
from pandas.tseries.offsets import DateOffset
from datetime import datetime
# -- Private Imports
from anticipy import model_utils
# -- Globals
logger = logging.getLogger(__name__)
# Fourier model configuration
_dict_fourier_config = { # Default configuration for fourier-based models
'period': 365.25, # days in year
'harmonics': 10 # TODO: evaluate different harmonics values
}
_FOURIER_PERIOD = 365.25
_FOURIER_HARMONICS = 10 # TODO: evaluate different harmonics values
_FOURIER_K = (2.0 * np.pi / _FOURIER_PERIOD)
_FOURIER_I = np.arange(1, _FOURIER_HARMONICS + 1)
_FOURIER_DATE_ORIGIN = datetime(1970, 1, 1)
# -- Functions
# ---- Utility functions
def logger_info(msg, data):
# Convenience function for easier log typing
logger.info(msg + '\n%s', data)
def _get_f_init_params_default(n_params):
# Generate a default function for initialising model parameters: use
# random values between 0 and 1
return lambda a_x=None, a_y=None, a_date=None, is_mult=False:\
np.random.uniform(low=0.001, high=1, size=n_params)
def _get_f_bounds_default(n_params):
# Generate a default function for model parameter boundaries. Default
# boundaries are (-inf, inf)
return lambda a_x=None, a_y=None, a_date=None: (
n_params * [-np.inf], n_params * [np.inf])
def _get_f_add_2_f_models(forecast_model1, forecast_model2):
# Add model functions of 2 ForecastModels
def f_add_2_f_models(a_x, a_date, params, is_mult=False, **kwargs):
params1 = params[0:forecast_model1.n_params]
params2 = params[forecast_model1.n_params:]
return (
forecast_model1.f_model(
a_x,
a_date,
params1,
is_mult=False,
**kwargs) +
forecast_model2.f_model(
a_x,
a_date,
params2,
is_mult=False,
**kwargs))
return f_add_2_f_models
def _get_f_mult_2_f_models(forecast_model1, forecast_model2):
# Multiply model functions of 2 ForecastModels
def f_mult_2_f_models(a_x, a_date, params, is_mult=False, **kwargs):
params1 = params[0:forecast_model1.n_params]
params2 = params[forecast_model1.n_params:]
return (
forecast_model1.f_model(
a_x,
a_date,
params1,
is_mult=True,
**kwargs) *
forecast_model2.f_model(
a_x,
a_date,
params2,
is_mult=True,
**kwargs))
return f_mult_2_f_models
def _get_f_add_2_f_init_params(f_init_params1, f_init_params2):
# Compose parameter initialisation functions of 2 ForecastModels, using
# addition
def f_add_2_f_init_params(a_x, a_y, a_date=None, is_mult=False):
return np.concatenate(
[f_init_params1(a_x, a_y, a_date, is_mult=False),
f_init_params2(a_x, a_y, a_date, is_mult=False)])
return f_add_2_f_init_params
def _get_f_mult_2_f_init_params(f_init_params1, f_init_params2):
# Compose parameter initialisation functions of 2 ForecastModels, using
# multiplication
def f_mult_2_f_init_params(a_x, a_y, a_date=None, is_mult=False):
return np.concatenate(
[f_init_params1(a_x, a_y, a_date, is_mult=True),
f_init_params2(a_x, a_y, a_date, is_mult=True)])
return f_mult_2_f_init_params
def _get_f_concat_2_bounds(forecast_model1, forecast_model2):
# Compose parameter boundary functions of 2 ForecastModels
def f_add_2_f_bounds(a_x, a_y, a_date=None):
return np.concatenate(
(forecast_model1.f_bounds(
a_x, a_y, a_date), forecast_model2.f_bounds(
a_x, a_y, a_date)), axis=1)
return f_add_2_f_bounds
def _f_validate_input_default(a_x, a_y, a_date):
# Default input validation function for a ForecastModel. Always returns
# True
return True
def _as_list(l):
return l if isinstance(l, (list,)) else [l]
# Functions used to initialize cache variables in a ForecastModel
def _f_init_cache_a_month(a_x, a_date):
return a_date.month - 1
def _f_init_cache_a_weekday(a_x, a_date):
return a_date.weekday
def _f_init_cache_a_t_fourier(a_x, a_date):
# convert to days since epoch
t = (a_date - _FOURIER_DATE_ORIGIN).days.values
i = np.arange(1, _FOURIER_HARMONICS + 1)
a_tmp = _FOURIER_K * i.reshape(i.size, 1) * t
y = np.concatenate([np.sin(a_tmp), np.cos(a_tmp)])
return y
# Dictionary to store functions used to initialize cache variables
# in a ForecastModel
# This is shared across all ForecastModel instances
_dict_f_cache = dict(
a_month=_f_init_cache_a_month,
a_weekday=_f_init_cache_a_weekday,
a_t_fourier=_f_init_cache_a_t_fourier
)
# -- Classes
class ForecastModel:
"""
Class that encapsulates model functions for use in forecasting, as well as
their number of parameters and functions for parameter initialisation.
A ForecastModel instance is initialized with a model name, a number of
model parameters, and a model function. Class instances are
callable - when called as a function, their internal model function is
used. The main purpose of ForecastModel objects is to generate predicted
values for a time series, given a set of parameters. These values can be
compared to the original series to get an array of residuals::
y_predicted = model(a_x, a_date, params)
residuals = (a_y - y_predicted)
This is used in an optimization loop to obtain the optimal parameters for
the model.
The reason for using this class instead of raw model functions is that
ForecastModel supports function composition::
model_sum = fcast_model1 + fcast_model2
# fcast_model 1 and 2 are ForecastModel instances, and so is model_sum
a_y1 = fcast_model1(
a_x, a_date, params1) + fcast_model2(a_x, a_date, params2)
params = np.concatenate([params1, params2])
a_y2 = model_sum(a_x, a_date, params)
a_y1 == a_y2 # True
Forecast models can be added or multiplied, with the + and * operators.
Multiple levels of composition are supported::
model = (model1 + model2) * model3
Model composition is used to aggregate trend and seasonality model
components, among other uses.
Model functions have the following signature:
- f(a_x, a_date, params, is_mult)
- a_x : array of floats
- a_date: array of dates, same length as a_x. Only required for date-aware
models, e.g. for weekly seasonality.
- params: array of floats - model parameters - the optimisation loop
updates this to fit our actual values. Each
model function uses a fixed number of parameters.
- is_mult: boolean. True if the model is being used with multiplicative
composition. Required because
some model functions (e.g. steps) have different behaviour
when added to other models than when multiplying them.
- returns an array of floats - with same length as a_x - output of the
model defined by this object's modelling function f_model and the
current set of parameters
By default, model parameters are initialized as random values between
0 and 1. It is possible to define a parameter initialization function
that picks initial values based on the original time series.
This is passed during ForecastModel creation with the argument
f_init_params. Parameter initialization is compatible with model
composition: the initialization function of each component will be used
for that component's parameters.
Parameter initialisation functions have the following signature:
- f_init_params(a_x, a_y, is_mult)
- a_x: array of floats - same length as time series
- a_y: array of floats - time series values
- returns an array of floats - with length equal to this object's n_params
value
By default, model parameters have no boundaries. However, it is possible
to define a boundary function for a model, that sets boundaries for each
model parameter, based on the input time series. This is passed during
ForecastModel creation with the argument f_bounds.
Boundary definition is compatible with model composition:
the boundary function of each component will be used for that component's
parameters.
Boundary functions have the following signature:
- f_bounds(a_x, a_y, a_date)
- a_x: array of floats - same length as time series
- a_y: array of floats - time series values
- a_date: array of dates, same length as a_x. Only required for date-aware
models, e.g. for weekly seasonality.
- returns a tuple of 2 arrays of floats. The first defines minimum
parameter boundaries, and the second the maximum parameter boundaries.
As an option, we can assign a list of input validation functions to a
model. These functions analyse the inputs that will be used for fitting a
model, returning True if valid, and False otherwise. The forecast logic
will skip a model from fitting if any of the validation functions for that
model returns False.
Input validation functions have the following signature:
- f_validate_input(a_x, a_y, a_date)
- See the description of model functions above for more details on these
parameters.
Our input time series should meet the following constraints:
- Minimum required samples depends on number of model parameters
- May include null values
- May include multiple values per sample
- A date array is only required if the model is date-aware
Class Usage::
model_x = ForecastModel(name, n_params, f_model, f_init_params,
l_f_validate_input)
# Get model name
model_name = model_x.name
# Get number of model parameters
n_params = model_x.n_params
# Get parameter initialisation function
f_init_params = model_x.f_init_params
# Get initial parameters
init_params = f_init_params(t_values, y_values)
# Get model fitting function
f_model = model_x.f_model
# Get model output
y = f_model(a_x, a_date, parameters)
The following pre-generated models are available. They are available as attributes from this module: # noqa
.. csv-table:: Forecast models
:header: "name", "params", "formula","notes"
:widths: 20, 10, 20, 40
"model_null",0, "y=0", "Does nothing.
Used to disable components (e.g. seasonality)"
"model_constant",1, "y=A", "Constant model"
"model_linear",2, "y=Ax + B", "Linear model"
"model_linear_nondec",2, "y=Ax + B", "Non decreasing linear model.
With boundaries to ensure model slope >=0"
"model_quasilinear",3, "y=A*(x^B) + C", "Quasilinear model"
"model_exp",2, "y=A * B^x", "Exponential model"
"model_decay",4, "Y = A * e^(B*(x-C)) + D", "Exponential decay model"
"model_step",2, "y=0 if x<A, y=B if x>=A", "Step model"
"model_two_steps",4, "see model_step", "2 step models.
Parameter initialization is aware of # of steps."
"model_sigmoid_step",3, "y = A + (B - A) / (1 + np.exp(- D * (x - C)))
", "Sigmoid step model"
"model_sigmoid",3, "y = A + (B - A) / (1 + np.exp(- D * (x - C)))", "
Sigmoid model"
"model_season_wday",7, "see desc.", "Weekday seasonality model.
Assigns a constant value to each weekday"
"model_season_wday",6, "see desc.", "6-param weekday seasonality model.
As above, with one constant set to 0."
"model_season_wday_2",2, "see desc.", "Weekend seasonality model.
Assigns a constant to each of weekday/weekend"
"model_season_month",12, "see desc.", "Month seasonality model.
Assigns a constant value to each month"
"model_season_fourier_yearly",10, "see desc", "Fourier
yearly seasonality model"
"""
def __init__(
self,
name,
n_params,
f_model,
f_init_params=None,
f_bounds=None,
l_f_validate_input=None,
l_cache_vars=None,
dict_f_cache=None,
):
"""
Create ForecastModel
:param name: Model name
:type name: basestring
:param n_params: Number of parameters for model function
:type n_params: int
:param f_model: Model function
:type f_model: function
:param f_init_params: Parameter initialisation function
:type f_init_params: function
:param f_bounds: Boundary function
:type f_bounds: function
"""
self.name = name
self.n_params = n_params
self.f_model = f_model
if f_init_params is not None:
self.f_init_params = f_init_params
else:
# Default initial parameters: random values between 0 and 1
self.f_init_params = _get_f_init_params_default(n_params)
if f_bounds is not None:
self.f_bounds = f_bounds
else:
self.f_bounds = _get_f_bounds_default(n_params)
if l_f_validate_input is None:
self.l_f_validate_input = [_f_validate_input_default]
else:
self.l_f_validate_input = _as_list(l_f_validate_input)
if l_cache_vars is None:
self.l_cache_vars = []
else:
self.l_cache_vars = _as_list(l_cache_vars)
if dict_f_cache is None:
self.dict_f_cache = dict()
else:
self.dict_f_cache = dict_f_cache
# TODO - REMOVE THIS - ASSUME NORMALIZED INPUT
def _get_f_init_params_validated(f_init_params):
# Adds argument validation to a parameter initialisation function
def f_init_params_validated(
a_x=None, a_y=None, a_date=None, is_mult=False):
if a_x is not None and pd.isnull(a_x).any():
raise ValueError('a_x cannot have null values')
return f_init_params(a_x, a_y, a_date, is_mult)
return f_init_params_validated
# Add logic to f_init_params that validates input
self.f_init_params = _get_f_init_params_validated(self.f_init_params)
def __call__(self, a_x, a_date, params, is_mult=False, **kwargs):
# assert len(params)==self.n_params
return self.f_model(a_x, a_date, params, is_mult, **kwargs)
def __str__(self):
return self.name
def __repr__(self):
return 'ForecastModel:{}'.format(self.name)
def __add__(self, forecast_model):
# Check for nulls
if self.name == 'null':
return forecast_model
if forecast_model.name == 'null':
return self
name = '({}+{})'.format(self.name, forecast_model.name)
n_params = self.n_params + forecast_model.n_params
f_model = _get_f_add_2_f_models(self, forecast_model)
f_init_params = _get_f_add_2_f_init_params(
self.f_init_params, forecast_model.f_init_params)
f_bounds = _get_f_concat_2_bounds(self, forecast_model)
l_f_validate_input = list(
set(self.l_f_validate_input + forecast_model.l_f_validate_input))
# Combine both dicts
dict_f_cache = self.dict_f_cache.copy()
dict_f_cache.update(forecast_model.dict_f_cache)
l_cache_vars = list(
set(self.l_cache_vars + forecast_model.l_cache_vars))
return ForecastModel(
name,
n_params,
f_model,
f_init_params,
f_bounds=f_bounds,
l_f_validate_input=l_f_validate_input,
l_cache_vars=l_cache_vars,
dict_f_cache=dict_f_cache
)
def __radd__(self, other):
return self.__add__(other)
def __mul__(self, forecast_model):
if self.name == 'null':
return forecast_model
if forecast_model.name == 'null':
return self
name = '({}*{})'.format(self.name, forecast_model.name)
n_params = self.n_params + forecast_model.n_params
f_model = _get_f_mult_2_f_models(self, forecast_model)
f_init_params = _get_f_mult_2_f_init_params(
self.f_init_params, forecast_model.f_init_params)
f_bounds = _get_f_concat_2_bounds(self, forecast_model)
l_f_validate_input = list(
set(self.l_f_validate_input + forecast_model.l_f_validate_input))
# Combine both dicts
dict_f_cache = self.dict_f_cache.copy()
dict_f_cache.update(forecast_model.dict_f_cache)
l_cache_vars = list(
set(self.l_cache_vars + forecast_model.l_cache_vars))
return ForecastModel(
name,
n_params,
f_model,
f_init_params,
f_bounds=f_bounds,
l_f_validate_input=l_f_validate_input,
l_cache_vars=l_cache_vars,
dict_f_cache=dict_f_cache
)
def __rmul__(self, other):
return self.__mul__(other)
def __eq__(self, other):
if isinstance(self, other.__class__):
return self.name == other.name
return NotImplemented
def __ne__(self, other):
x = self.__eq__(other)
if x is not NotImplemented:
return not x
return NotImplemented
def __hash__(self):
return hash(self.name)
def __lt__(self, other):
return self.name < other.name
def validate_input(self, a_x, a_y, a_date):
try:
l_result = [f_validate_input(a_x, a_y, a_date)
for f_validate_input in self.l_f_validate_input]
except AssertionError:
return False
return True
def init_cache(self, a_x, a_date):
dict_cache_vars = dict()
for k in self.l_cache_vars:
f = _dict_f_cache.get(k)
if f:
dict_cache_vars[k] = f(a_x, a_date)
else:
logger.warning('Cache function not found: %s', k)
# Search vars defined in internal cache function dictionary
for k in self.dict_f_cache:
f = self.dict_f_cache.get(k)
if f:
dict_cache_vars[k] = f(a_x, a_date)
else:
logger.warning('Cache function not found: %s', k)
return dict_cache_vars
# - Null model: 0
def _f_model_null(a_x, a_date, params, is_mult=False, **kwargs):
# This model does nothing - used to disable model components
# (e.g. seasonality) when adding/multiplying multiple functions
return float(is_mult) # Returns 1 if multiplying, 0 if adding
model_null = ForecastModel('null', 0, _f_model_null)
# - Constant model: :math:`Y = A`
def _f_model_constant(a_x, a_date, params, is_mult=False, **kwargs):
[A] = params
y = np.full(len(a_x), A)
return y
def _f_init_params_constant(a_x=None, a_y=None, a_date=None, is_mult=False):
if a_y is None:
return np.random.uniform(0, 1, 1)
else:
return np.nanmean(a_y) + np.random.uniform(0, 1, 1)
model_constant = ForecastModel(
'constant',
1,
_f_model_constant,
_f_init_params_constant)
# - Naive model: Y = Y(x-1)
# Note: This model requires passing the actuals data - it is not fitted by
# regression. We still pass it to forecast.fit_model() for consistency
# with the rest of the library
def _f_model_naive(a_x, a_date, params, is_mult=False, df_actuals=None):
if df_actuals is None:
raise ValueError('model_naive requires a df_actuals argument')
df_out_tmp = pd.DataFrame({'date': a_date, 'x': a_x})
df_out = (
# This is not really intended to work with multiple values per sample
df_actuals.drop_duplicates('x')
.merge(df_out_tmp, how='outer')
.sort_values('x')
)
df_out['y'] = (
df_out.y.shift(1)
.fillna(method='ffill')
.fillna(method='bfill')
)
df_out = df_out.loc[df_out.x.isin(a_x)]
# df_out = df_out_tmp.merge(df_out, how='left')
# TODO: CHECK THAT X,DATE order is preserved
# TODO: df_out = df_out.merge(df_out_tmp, how='right')
return df_out.y.values
model_naive = ForecastModel('naive', 0, _f_model_naive)
# - Seasonal naive model
# Note: This model requires passing the actuals data - it is not fitted by
# regression. We still pass it to forecast.fit_model() for consistency
# with the rest of the library
def _fillna_wday(df):
"""
In a time series, shift samples by 1 week
and fill gaps with data from same weekday
"""
def add_col_y_out(df):
df = df.assign(y_out=df.y.shift(1).fillna(method='ffill'))
return df
df_out = (
df
.assign(wday=df.date.dt.weekday)
.groupby('wday', as_index=False).apply(add_col_y_out)
.sort_values(['x'])
.reset_index(drop=True)
)
return df_out
def _f_model_snaive_wday(a_x, a_date, params, is_mult=False, df_actuals=None):
"""Naive model - takes last valid weekly sample"""
if df_actuals is None:
raise ValueError('model_snaive_wday requires a df_actuals argument')
# df_actuals_model - table with actuals samples,
# adding y_out column with naive model values
df_actuals_model = _fillna_wday(df_actuals.drop_duplicates('x'))
# df_last_week - table with naive model values from last actuals week,
# to use in extrapolation
df_last_week = (
df_actuals_model
# Fill null actual values with data from previous weeks
.assign(y=df_actuals_model.y.fillna(df_actuals_model.y_out))
.drop_duplicates('wday', keep='last')
[['wday', 'y']]
.rename(columns=dict(y='y_out'))
)
# Generate table with extrapolated samples
df_out_tmp = pd.DataFrame({'date': a_date, 'x': a_x})
df_out_tmp['wday'] = df_out_tmp.date.dt.weekday
df_out_extrapolated = (
df_out_tmp
.loc[~df_out_tmp.date.isin(df_actuals_model.date)]
.merge(df_last_week, how='left')
.sort_values('x')
)
# Filter actuals table - only samples in a_x, a_date
df_out_actuals_filtered = (
# df_actuals_model.loc[df_actuals_model.x.isin(a_x)]
# Using merge rather than simple filtering to account for
# dates with multiple samples
df_actuals_model.merge(df_out_tmp, how='inner')
.sort_values('x')
)
df_out = (
pd.concat(
[df_out_actuals_filtered, df_out_extrapolated],
sort=False, ignore_index=True)
)
return df_out.y_out.values
model_snaive_wday = ForecastModel('snaive_wday', 0, _f_model_snaive_wday)
# - Spike model: :math:`Y = A`, when x_min <= X < x_max
def _f_model_spike(a_x, a_date, params, is_mult=False, **kwargs):
[A, x_min, x_max] = params
if is_mult:
c = 1
else:
c = 0
y = np.concatenate((
np.full(int(x_min), c),
np.full(int(x_max - x_min), A),
np.full(len(a_x) - int(x_max), c)
))
return y
def _f_init_params_spike(a_x=None, a_y=None, a_date=None, is_mult=False):
""" params are spike height, x start, x end """
# if not a_y.any():
if a_y is None:
return [1] + np.random.uniform(0, 1, 1) + [2]
else:
diffs = np.diff(a_y)
# if diffs:
if True:
diff = max(diffs)
x_start = np.argmax(diffs)
x_end = x_start + 1
return np.array([diff, x_start, x_end])
model_spike = ForecastModel('spike', 3, _f_model_spike, _f_init_params_spike)
# - Spike model for dates - dates are fixed for each model
def _f_model_spike_date(
a_x,
a_date,
params,
date_start,
date_end,
is_mult=False):
[A] = params
mask_spike = (a_date >= date_start) * (a_date < date_end)
if is_mult:
y = mask_spike * A + ~mask_spike
else:
y = mask_spike * A
return y
def _f_init_params_spike(a_x=None, a_y=None, a_date=None, is_mult=False):
""" params are spike height, x start, x end """
if a_y is None:
return np.concatenate([np.array([1]) + np.random.uniform(0, 1, 1)])
else:
diffs = np.diff(a_y)
# if diffs:
if True:
diff = max(diffs)
return np.array([diff])
# else:
# rand = np.random.randint(1, len(a_y) - 1)
# return [1]
def get_model_spike_date(date_start, date_end):
f_model = (
lambda a_x, a_date, params, is_mult=False, **kwargs:
_f_model_spike_date(a_x, a_date, params, date_start, date_end, is_mult)
)
model_spike_date = ForecastModel(
'spike_date[{},{}]'.format(
pd.to_datetime(date_start).date(),
pd.to_datetime(date_end).date()),
1,
f_model,
_f_init_params_spike)
return model_spike_date
# - Linear model: :math:`Y = A*x + B`
def _f_model_linear(a_x, a_date, params, is_mult=False, **kwargs):
(A, B) = params
y = A * a_x + B
return y
def _f_init_params_linear(a_x=None, a_y=None, a_date=None, is_mult=False):
if a_y is None:
return np.random.uniform(low=0, high=1, size=2)
else: # TODO: Improve this
if a_x is not None:
a_x_size = np.unique(a_x).size - 1
else:
a_x_size = a_y.size - 1
A = (a_y[-1] - a_y[0]) / a_x_size
B = a_y[0]
# Uniform low= 0*m, high = 1*m
return np.array([A, B])
model_linear = ForecastModel(
'linear',
2,
_f_model_linear,
_f_init_params_linear)
def f_init_params_linear_nondec(
a_x=None,
a_y=None,
a_date=None,
is_mult=False):
params = _f_init_params_linear(a_x, a_y, a_date)
if params[0] < 0:
params[0] = 0
return params
def f_bounds_linear_nondec(a_x=None, a_y=None, a_date=None):
# first param should be between 0 and inf
return [0, -np.inf], [np.inf, np.inf]
model_linear_nondec = ForecastModel('linear_nondec', 2, _f_model_linear,
f_init_params=f_init_params_linear_nondec,
f_bounds=f_bounds_linear_nondec)
# - QuasiLinear model: :math:`Y = A t^{B} + C`
def _f_model_quasilinear(a_x, a_date, params, is_mult=False, **kwargs):
(A, B, C) = params
y = A * np.power(a_x, B) + C
return y
model_quasilinear = ForecastModel('quasilinear', 3, _f_model_quasilinear)
# - Exponential model: math:: Y = A * B^t
# TODO: Deprecate - not safe to use
def _f_model_exp(a_x, a_date, params, is_mult=False, **kwargs):
(A, B) = params
y = A * np.power(B, a_x)
return y
model_exp = ForecastModel('exponential', 2, _f_model_exp)
# - Exponential decay model: math:: Y = A * e^(B*(x-C)) + D
def _f_model_decay(a_x, a_date, params, is_mult=False, **kwargs):
(A, B, D) = params
y = A * np.exp(B * (a_x)) + D
return y
def _f_validate_input_decay(a_x, a_y, a_date):
assert (a_y > 0).all()
def f_init_params_decay(a_x=None, a_y=None, a_date=None, is_mult=False):
if a_y is None:
return np.array([0, 0, 0])
A = a_y[0] - a_y[-1]
B = np.log(np.min(a_y) / np.max(a_y)) / (len(a_y) - 1)
if B > 0 or B == -np.inf:
B = -0.5
C = a_y[-1]
return np.array([A, B, C])
def f_bounds_decay(a_x=None, a_y=None, a_date=None):
return [-np.inf, -np.inf, -np.inf], [np.inf, 0, np.inf]
model_decay = ForecastModel('decay', 3, _f_model_decay,
f_init_params=f_init_params_decay,
f_bounds=f_bounds_decay,
l_f_validate_input=_f_validate_input_decay)
# - Step function: :math:`Y = {0, if x < A | B, if x >= A}`
# A is the time of step, and B is the step
def _f_step(a_x, a_date, params, is_mult=False, **kwargs):
(A, B) = params
if is_mult:
y = 1 + (B - 1) * np.heaviside(a_x - A, 1)
else:
y = B * np.heaviside(a_x - A, 1)
return y
# TODO: Implement initialisation for multiplicative composition
def _f_init_params_step(a_x=None, a_y=None, a_date=None, is_mult=False):
if a_y is None:
return np.random.uniform(0, 1, 2)
else:
if a_y.ndim > 1:
a_y = a_y[:, 0]
df = pd.DataFrame({'b': a_y})
# max difference between consecutive values
df['diff'] = df.diff().abs()
# if is_mult, replace above line with something like
# np.concatenate([[np.NaN],a_y[:-1]/a_y[1:]])
a = df.nlargest(1, 'diff').index[0]
b = df['diff'].iloc[a]
return np.array([a, b * 2])
# TODO: Add boundaries for X axis
model_step = ForecastModel('step', 2, _f_step, _f_init_params_step)
# - Spike model for dates - dates are fixed for each model
def _f_model_step_date(a_x, a_date, params, date_start, is_mult=False):
[A] = params
mask_step = (a_date >= date_start).astype(float)
if is_mult:
# y = mask_step*A + ~mask_step
y = mask_step * (A - 1) + 1
else:
y = mask_step * A
return y
# TODO: Implement initialisation for multiplicative composition
def _f_init_params_step_date(a_x=None, a_y=None, a_date=None, is_mult=False):
if a_y is None:
return np.random.uniform(0, 1, 1)
else:
if a_y.ndim > 1:
a_y = a_y[:, 0]
df = pd.DataFrame({'b': a_y})
# max difference between consecutive values
df['diff'] = df.diff().abs()
# if is_mult, replace above line with something like
# np.concatenate([[np.NaN],a_y[:-1]/a_y[1:]])
a = df.nlargest(1, 'diff').index[0]
b = df['diff'].iloc[a]
return np.array([b * 2])
def get_model_step_date(date_start):
date_start = pd.to_datetime(date_start)
f_model = (
lambda a_x, a_date, params, is_mult=False, **kwargs:
_f_model_step_date(a_x, a_date, params, date_start, is_mult)
)
model_step_date = ForecastModel('step_date[{}]'.format(date_start.date()),
1, f_model, _f_init_params_step_date)
return model_step_date
# Two step functions
def _f_n_steps(n, a_x, a_date, params, is_mult=False):
if is_mult:
y = 1
else:
y = 0
for i in range(0, n + 1, 2):
A, B = params[i: i + 2]
if is_mult:
y = y * _f_step(a_x, a_date, (A, B), is_mult)
else:
y = y + _f_step(a_x, a_date, (A, B), is_mult)
return y
def _f_two_steps(a_x, a_date, params, is_mult=False, **kwargs):
return _f_n_steps(
n=2,
a_x=a_x,
a_date=a_date,
params=params,
is_mult=is_mult)
def _f_init_params_n_steps(
n=2,
a_x=None,
a_y=None,
a_date=None,
is_mult=False):
if a_y is None:
return np.random.uniform(0, 1, n * 2)
else:
# max difference between consecutive values
if a_y.ndim > 1:
a_y = a_y[:, 0]
df = pd.DataFrame({'b': a_y})
df['diff'] = df.diff().abs()
# if is_mult, replace above line with something like
# np.concatenate([[np.NaN],a_y[:-1]/a_y[1:]])
a = df.nlargest(n, 'diff').index[0:n].values
b = df['diff'].iloc[a].values
params = []
for i in range(0, n):
params += [a[i], b[i]]
return np.array(params)
def _f_init_params_two_steps(a_x=None, a_y=None, a_date=None, is_mult=False):
return _f_init_params_n_steps(
n=2,
a_x=a_x,
a_y=a_y,
a_date=a_date,
is_mult=is_mult)
model_two_steps = ForecastModel(
'two_steps',
2 * 2,
_f_two_steps,
_f_init_params_two_steps)
# - Sigmoid step function: `Y = {A + (B - A) / (1 + np.exp(- D * (a_x - C)))}`
# Spans from A to B, C is the position of the step in x axis
# and D is how steep the increase is
def _f_sigmoid(a_x, a_date, params, is_mult=False, **kwargs):
(B, C, D) = params
if is_mult:
A = 1
else:
A = 0
# TODO check if a_x is negative
y = A + (B - A) / (1 + np.exp(- D * (a_x - C)))
return y
def _f_init_params_sigmoid_step(
a_x=None,
a_y=None,
a_date=None,
is_mult=False):
if a_y is None:
return np.random.uniform(0, 1, 3)
else:
if a_y.ndim > 1:
a_y = a_y[:, 0]
df = pd.DataFrame({'y': a_y})
# max difference between consecutive values
df['diff'] = df.diff().abs()
c = df.nlargest(1, 'diff').index[0]
b = df.loc[c, 'y']
d = b * b
return b, c, d
def _f_init_bounds_sigmoid_step(a_x=None, a_y=None, a_date=None):
if a_y is None:
return [-np.inf, -np.inf, 0.], 3 * [np.inf]
if a_y.ndim > 1:
a_y = a_y[:, 0]
if a_x.ndim > 1:
a_x = a_x[:, 0]
diff = max(a_y) - min(a_y)
b_min = -2 * diff
b_max = 2 * diff
c_min = min(a_x)
c_max = max(a_x)
d_min = 0.
d_max = np.inf
return [b_min, c_min, d_min], [b_max, c_max, d_max]
# In this model, parameter initialization is aware of number of steps
model_sigmoid_step = ForecastModel(
'sigmoid_step',
3,
_f_sigmoid,
_f_init_params_sigmoid_step,
f_bounds=_f_init_bounds_sigmoid_step)
model_sigmoid = ForecastModel('sigmoid', 3, _f_sigmoid)
# Ramp functions - used for piecewise linear models
# example : model_linear_pw2 = model_linear + model_ramp
# example 2: model_linear_p23 = model_linear + model_ramp + model_ramp
# - Ramp function: :math:`Y = {0, if x < A | B, if x >= A}`
# A is the time of step, and B is the step
def _f_ramp(a_x, a_date, params, is_mult=False, **kwargs):
(A, B) = params
if is_mult:
y = 1 + (a_x - A) * (B) * np.heaviside(a_x - A, 1)
else:
y = (a_x - A) * B * np.heaviside(a_x - A, 1)
return y
def _f_init_params_ramp(a_x=None, a_y=None, a_date=None, is_mult=False):
# TODO: set boundaries: a_x (0.2, 0.8)
if a_y is None:
if a_x is not None:
nfirst_last = int(np.ceil(0.15 * a_x.size))
a = np.random.uniform(a_x[nfirst_last], a_x[-nfirst_last - 1], 1)
else:
a = np.random.uniform(0, 1, 1)
b = np.random.uniform(0, 1, 1)
return np.concatenate([a,
b])
else:
# TODO: FILTER A_Y BY 20-80 PERCENTILE IN A_X
df = pd.DataFrame({'b': a_y})
if a_x is not None:
#
df['x'] = a_x
# Required because we support input with multiple samples per x
# value
df = df.drop_duplicates('x')
df = df.set_index('x')
# max difference between consecutive values -- this assumes no null
# values in series
df['diff2'] = df.diff().diff().abs()
# We ignore the last 15% of the time series
skip_samples = int(np.ceil(df.index.size * 0.15))
a = (df.head(-skip_samples).tail(
-skip_samples).nlargest(1, 'diff2').index[0]
)
b = df['diff2'].loc[a]
# TODO: replace b with estimation of slope in segment 2
# minus slope in segment 1 - see init_params_linear
return np.array([a, b])
def _f_init_bounds_ramp(a_x=None, a_y=None, a_date=None):
if a_x is None:
a_min = -np.inf
a_max = np.inf
else:
# a_min = np.min(a_x)
nfirst_last = int(np.ceil(0.15 * a_x.size))
a_min = a_x[nfirst_last]
a_max = a_x[-nfirst_last]
# a_min = np.percentile(a_x, 15)
# a_max = np.percentile(a_x,85)
if a_y is None:
b_min = -np.inf
b_max = np.inf
else:
# TODO: FILTER A_Y BY 20-80 PERCENTILE IN A_X
# df = pd.DataFrame({'b': a_y})
# #max_diff2 = np.max(df.diff().diff().abs())
# max_diff2 = np.max(np.abs(np.diff(np.diff(a_y))))
#
# b_min = -2*max_diff2
# b_max = 2*max_diff2
b_min = -np.inf
b_max = np.inf
# logger_info('DEBUG: BOUNDS:',(a_min, b_min,a_max, b_max))
return ([a_min, b_min], [a_max, b_max])
model_ramp = ForecastModel(
'ramp',
2,
_f_ramp,
_f_init_params_ramp,
_f_init_bounds_ramp)
# - Weekday seasonality
def _f_model_season_wday(
a_x, a_date, params, is_mult=False,
# cache variables
a_weekday=None,
**kwargs):
# Weekday seasonality model, 6 params
# params_long[0] is default series value,
params_long = np.concatenate([[float(is_mult)], params])
if a_weekday is None:
a_weekday = _f_init_cache_a_weekday(a_x, a_date)
return params_long[a_weekday]
def _f_validate_input_season_wday(a_x, a_y, a_date):
assert a_date is not None
assert a_date.weekday.drop_duplicates().size == 7
model_season_wday = ForecastModel(
'season_wday',
6,
_f_model_season_wday,
l_f_validate_input=_f_validate_input_season_wday,
l_cache_vars=['a_weekday']
)
# - Month seasonality
def _f_init_params_season_month(
a_x=None,
a_y=None,
a_date=None,
is_mult=False):
if a_y is None or a_date is None:
return np.random.uniform(low=-1, high=1, size=11)
else: # TODO: Improve this
l_params_long = [np.mean(a_y[a_date.month == i])
for i in np.arange(1, 13)]
l_baseline = l_params_long[-1]
l_params = l_params_long[:-1]
if not is_mult:
l_params_add = l_params - l_baseline
return l_params_add
else:
l_params_mult = l_params / l_baseline
return l_params_mult
def _f_model_season_month(
a_x, a_date, params, is_mult=False,
# cache variables
a_month=None,
**kwargs):
# Month of December is taken as default level, has no parameter
# params_long[0] is default series value
params_long = np.concatenate([[float(is_mult)], params])
if a_month is None:
a_month = _f_init_cache_a_month(a_x, a_date)
return params_long[a_month]
model_season_month = ForecastModel(
'season_month',
11,
_f_model_season_month,
_f_init_params_season_month,
l_cache_vars=['a_month']
)
model_season_month_old = ForecastModel(
'season_month_old', 11, _f_model_season_month)
def _f_model_yearly_season_fourier(
a_x,
a_date,
params,
is_mult=False,
# cache params
a_t_fourier=None,
**kwargs):
if a_t_fourier is None:
a_t_fourier = _f_init_cache_a_t_fourier(None, a_date)
y = np.matmul(params, a_t_fourier)
return y
def _f_init_params_fourier_n_params(
n_params,
a_x=None,
a_y=None,
a_date=None,
is_mult=False):
if a_y is None:
params = np.random.uniform(0.001, 1, n_params)
else:
# max difference in time series
diff = a_y.max() - a_y.min()
params = diff * np.random.uniform(0.001, 1, n_params)
return params
def _f_init_params_fourier(a_x=None, a_y=None, a_date=None, is_mult=False):
n_params = 2 * _dict_fourier_config['harmonics']
return _f_init_params_fourier_n_params(
n_params, a_x=a_x, a_y=a_y, a_date=a_date, is_mult=is_mult)
def _f_init_bounds_fourier_nparams(n_params, a_x=None, a_y=None, a_date=None):
return n_params * [-np.inf], n_params * [np.inf]
def _f_init_bounds_fourier_yearly(a_x=None, a_y=None, a_date=None):
n_params = 2 * _dict_fourier_config['harmonics']
return _f_init_bounds_fourier_nparams(n_params, a_x, a_y, a_date)
model_season_fourier_yearly = ForecastModel(
name='season_fourier_yearly',
n_params=2 * _dict_fourier_config.get('harmonics'),
f_model=_f_model_yearly_season_fourier,
f_init_params=_f_init_params_fourier,
f_bounds=_f_init_bounds_fourier_yearly,
l_cache_vars='a_t_fourier'
)
def get_fixed_model(forecast_model, params_fixed, is_mult=False):
# Generate model with some fixed parameters
if forecast_model.n_params == 0: # Nothing to do
return forecast_model
if len(params_fixed) != forecast_model.n_params:
err = 'Wrong number of fixed parameters'
raise ValueError(err)
return ForecastModel(
forecast_model.name + '_fixed', 0,
f_model=lambda a_x, a_date, params, is_mult=is_mult, **kwargs:
forecast_model.f_model(
a_x=a_x, a_date=a_date, params=params_fixed, is_mult=is_mult))
def get_iqr_thresholds(s_diff, low=0.25, high=0.75):
# Get thresholds based on inter quantile range
q1 = s_diff.quantile(low)
q3 = s_diff.quantile(high)
iqr = q3 - q1
thr_low = q1 - 1.5 * iqr
thr_hi = q3 + 1.5 * iqr
return thr_low, thr_hi
# TODO: Add option - estimate_outl_size
# TODO: Add option - sigmoid steps
# TODO: ADD option - gaussian spikes
def get_model_outliers(df, window=3):
"""
Identify outlier samples in a time series
:param df: Input time series
:type df: pandas.DataFrame
:param window: The x-axis window to aggregate multiple steps/spikes
:type window: int
:return:
| tuple (mask_step, mask_spike)
| mask_step: True if sample contains a step
| mask_spike: True if sample contains a spike
:rtype: tuple of 2 numpy arrays of booleans
TODO: require minimum number of samples to find an outlier
"""
dfo = df.copy() # dfo - df for outliers
# If df has datetime index, use date logic in steps/spikes
with_dates = 'date' in df.columns
x_col = 'date' if with_dates else 'x'
if df[x_col].duplicated().any():
raise ValueError('Input cannot have multiple values per sample')
# Get the differences
dfo['dif'] = dfo.y.diff()
# We consider as outliers the values that are
# 1.5 * IQR (interquartile range) beyond the quartiles.
# These thresholds are obtained here
thr_low, thr_hi = get_iqr_thresholds(dfo.dif)
# Now identify the changes
dfo['ischange'] = ((dfo.dif < thr_low) | (dfo.dif > thr_hi)).astype(int)
# Whenever there are two or more consecutive changes
# (that is, within `window` samples), we group them together
dfo['ischange_group'] = (
dfo.ischange.rolling(window, win_type=None, center=True).max().fillna(
0).astype(int)
)
# We now have to calculate the difference within the
# same group in order to identify if the consecutive changes
# result in a step, a spike, or both.
# We get the filtered difference
dfo['dif_filt'] = (dfo.dif * dfo.ischange).fillna(0)
# And the absolute value of that
dfo['dif_filt_abs'] = dfo.dif_filt.abs()
dfo['change_group'] = dfo.ischange_group.diff(
).abs().fillna(0).astype(int).cumsum()
# this gets us the average difference of the outliers within each change
# group
df_mean_gdiff = (
dfo.loc[dfo.ischange.astype(bool)].groupby('change_group')[
'dif_filt'].mean().rename('mean_group_diff').reset_index())
# this gets us the average absolute difference of the outliers within each
# change group
df_mean_gdiff_abs = (
dfo.loc[dfo.ischange.astype(bool)].groupby('change_group')[
'dif_filt_abs'].mean().rename(
'mean_group_diff_abs').reset_index()
)
# Merge the differences with the original dfo
dfo = dfo.merge(
df_mean_gdiff,
how='left').merge(
df_mean_gdiff_abs,
how='left')
# Fill missing values with zero -> no change
dfo.mean_group_diff = dfo.mean_group_diff.fillna(0)
dfo.mean_group_diff_abs = dfo.mean_group_diff_abs.fillna(0)
# the change group is a step if the mean_group_diff exceeds the thresholds
dfo['is_step'] = dfo['ischange_group'] & (
((dfo.mean_group_diff < thr_low) | (dfo.mean_group_diff > thr_hi)))
# the change group is a spike if the difference between the
# mean_group_diff_abs and the average mean_group_diff exceeds
# the average threshold value
dfo['is_spike'] = (dfo.mean_group_diff_abs -
dfo.mean_group_diff.abs()) > (thr_hi - thr_low) / 2
# Get the outlier start and end points for each group
df_outl = (
dfo.loc[dfo.ischange.astype(bool)].groupby('change_group').apply(
lambda x: pd.Series(
{'outl_start': x[x_col].iloc[0],
'outl_end': x[x_col].iloc[-1]})).reset_index()
)
if df_outl.empty: # No outliers - nothing to do
return np.full(dfo.index.size, False), np.full(dfo.index.size, False)
dfo = dfo.merge(df_outl, how='left')
# Get the start and end points in dfo
if with_dates:
# Convert to datetime, if we are using dates
dfo['outl_start'] = pd.to_datetime(dfo.outl_start)
dfo['outl_end'] = pd.to_datetime(dfo.outl_end)
# Create the mask for spikes and steps
dfo['mask_spike'] = (dfo['is_spike'] &
(dfo.date >= pd.to_datetime(dfo.outl_start)) &
(dfo.date < pd.to_datetime(dfo.outl_end)))
dfo['mask_step'] = (dfo['is_step'] &
(dfo.date >= pd.to_datetime(dfo.outl_start)) &
(dfo.date <= pd.to_datetime(dfo.outl_end)))
else:
# For non-date x values, we fill na's and convert to int
dfo['outl_start'] = dfo.outl_start.fillna(0).astype(int)
dfo['outl_end'] = dfo.outl_end.fillna(0).astype(int)
# Create the mask for spikes and steps
dfo['mask_spike'] = (dfo['is_spike'] &
(dfo.x >= dfo.outl_start) &
(dfo.x < dfo.outl_end))
dfo['mask_step'] = (dfo['is_step'] &
(dfo.x >= dfo.outl_start) &
(dfo.x <= dfo.outl_end))
return dfo.mask_step.values, dfo.mask_spike.values
def create_fixed_step(diff, x):
# Generate a fixed step model
fixed_params = [x, diff]
return get_fixed_model(model_step, fixed_params)
def create_fixed_spike(diff, x, duration):
# Generate a fixed spike model
fixed_params = [diff, x, x + duration]
return get_fixed_model(model_spike, fixed_params)
def create_fixed_spike_ignored(x, duration):
# Generate a fixed spike ignored model
fixed_params = [0, x, x + duration]
return get_fixed_model(model_spike, fixed_params, is_mult=True)
# Dummy variable models
def _validate_f_dummy(f_dummy):
# Ensures that behaviour of f_dummy matches specs
# Must return array of floats, same length as a_x, with values either 0.
# or 1.
def validate_for_dummy(a_dummy):
assert isinstance(a_dummy, np.ndarray)
assert np.setdiff1d(a_dummy, np.array([0., 1.])).size == 0
# validate_for_dummy(f_dummy(np.arange(0, 10), None)) # Crashes with
# f_dummy 's that require dates
validate_for_dummy(
f_dummy(
np.arange(
0, 10), pd.date_range(
'2018-01-01', '2018-01-10')))
def _get_f_dummy(dummy):
"""
Get a function that generates a mask array from a dummy variable
:param dummy: dummy variable, that can be used to generate a mask array
:type dummy: function, pandas Holiday/Calendar,
or list-like of numerics or dates
:return: model function based on dummy variable, to use on a ForecastModel
:rtype: function
"""
if callable(dummy): # If dummy is a function, use it
f_dummy = dummy
elif isinstance(dummy, Holiday):
f_dummy = _get_f_dummy_from_holiday(dummy)
elif isinstance(dummy, AbstractHolidayCalendar):
f_dummy = _get_f_dummy_from_calendar(dummy)
else:
# If dummy is a list, convert to function
f_dummy = _get_f_dummy_from_list(dummy)
return f_dummy
def _get_f_dummy_from_list(list_check):
"""
Generate a f_dummy function that defines a dummy variable, can be used
for dummy models
:param list_check: Input list
:type list_check: list-like of numerics or datetime-likes
:return: f_dummy
:rtype: function
"""
# Generate a f_dummy function that defines a dummy variable, can be used
# for dummy models
s_check = pd.Series(list_check)
assert s_check.size, 'Input list cannot be empty'
if pd.api.types.is_numeric_dtype(s_check):
list_check_numeric = s_check
def f_dummy_list_numeric(a_x, a_date):
# return a_x in check_numeric
return np.isin(a_x, list_check_numeric).astype(float)
return f_dummy_list_numeric
else:
try:
list_check_date = | pd.to_datetime(s_check) | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Evaluation of trained models.
"""
import io
import pathlib
from typing import Dict, List, Optional, Union, Tuple
from dateutil.relativedelta import relativedelta
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from PIL import Image, ImageOps
import seaborn as sns
import scipy.stats as scs
import sklearn.metrics as sklm
from perceptree.common.cache import update_dict_recursively
from perceptree.common.configuration import Config
from perceptree.common.configuration import Configurable
from perceptree.common.file_saver import FileSaver
from perceptree.common.graph_saver import GraphSaver
from perceptree.common.logger import Logger
from perceptree.common.util import parse_bool_string
from perceptree.common.util import reshape_scalar
from perceptree.data.predict import Prediction
from perceptree.model.base import BaseModel
class EvaluationProcessor(Logger, Configurable):
"""
Evaluation of trained models.
"""
COMMAND_NAME = "Evaluate"
""" Name of this command, used for configuration. """
def __init__(self, config: Config):
super().__init__(config=config)
self._set_instance()
self.__l.info("Initializing evaluation system...")
@classmethod
def register_options(cls, parser: Config.Parser):
""" Register configuration options for this class. """
parser.add_argument("--evaluate-current",
action="store_true",
default=False,
dest=cls._add_config_parameter("evaluate_current"),
help="Perform evaluation of all current model predictions?")
parser.add_argument("--export-evaluations",
action="store",
default=None, type=str,
metavar=("PATH/TO/EVALUATIONS.csv"),
dest=cls._add_config_parameter("export_evaluations"),
help="Export evaluations data-frame to given csv.")
parser.add_argument("--export-images",
action="store",
default=None, type=str,
metavar=("PATH/TO/OUTPUT/"),
dest=cls._add_config_parameter("export_images"),
help="Export annotated images with score bars to given location.")
parser.add_argument("--export-images-text",
action="store",
default=True, type=parse_bool_string,
metavar=("True/False"),
dest=cls._add_config_parameter("export_images_text"),
help="Draw text in annotated images?")
parser.add_argument("--export-images-side",
action="store",
default="l", type=str,
metavar=("r|l"),
dest=cls._add_config_parameter("export_images_side"),
help="Which side to display the bar on annotated images")
parser.add_argument("--export-images-transparent",
action="store",
default=False, type=parse_bool_string,
metavar=("True/False"),
dest=cls._add_config_parameter("export_images_transparent"),
help="Should bars on annotated images be transparent?")
parser.add_argument("--export-images-show",
action="store",
default=False, type=parse_bool_string,
metavar=("True/False"),
dest=cls._add_config_parameter("export_images_show"),
help="Display the annotated images through Matplotlib?")
def _evaluate_prediction_score(self, prediction: Prediction) -> [ dict ]:
""" Perform evaluation of tree scoring. """
if prediction.score_prediction is None:
self.__l.error("Failed to evaluate tree score, no prediction available!")
return [ ]
if prediction.score_expected is not None:
ground_truth_score = prediction.score_expected
ground_truth_available = True
else:
ground_truth_score = prediction.score_prediction
ground_truth_available = False
predicted_score = prediction.score_prediction
if len(ground_truth_score) != len(predicted_score) and len(ground_truth_score) == 1:
ground_truth_score = np.repeat(ground_truth_score, len(predicted_score))
if len(ground_truth_score) != len(predicted_score):
self.__l.error(f"Failed to evaluate tree score, GT and predicted don't match up "
f"({ground_truth_score} vs {predicted_score})!")
return [
{
"tree_id": prediction.tree_id,
"view_id": view_id,
"score_gt": reshape_scalar(gt["jod"])[0],
"score_p": reshape_scalar(pred["jod"])[0],
"mse": sklm.mean_squared_error([ gt["jod"] ], [ pred["jod"] ]),
"source": prediction.data_source,
"true_gt": ground_truth_available,
"tree_file": prediction.tree_file,
"view_files": prediction.tree_views[view_id] if view_id in prediction.tree_views else { },
}
for view_id in ground_truth_score.keys()
for gt, pred in [ ( ground_truth_score[view_id], predicted_score[view_id] ) ]
]
def _process_evaluations(self, evaluations: pd.DataFrame):
""" Process evaluation results. """
evaluations = evaluations.drop([ "tree_file", "view_files" ], axis=1)
if self.c.export_evaluations:
evaluations.to_csv(self.c.export_evaluations, sep=";")
evaluations_test = evaluations[evaluations["source"] != "train"]
evaluations_desc = { }
evaluation_stats = { }
evaluation_text = { }
for model_name in evaluations.model_name.unique():
model_eval = evaluations.loc[evaluations.model_name == model_name]
model_eval_test = evaluations_test.loc[evaluations_test.model_name == model_name]
evaluations_desc[model_name] = f"Description: \n" + str(
model_eval[["score_gt", "score_p", "mse"]].describe().to_string()
)
mse_a = evaluations["mse"].mean()
mse_t = evaluations_test["mse"].mean()
if len(model_eval) > 1:
pear_a = tuple(scs.pearsonr(model_eval["score_p"], model_eval["score_gt"]))
spea_a = tuple(scs.spearmanr(model_eval["score_p"], model_eval["score_gt"]))
else:
pear_a = tuple(( 1.0, 0.0 ))
spea_a = tuple(( 1.0, 0.0 ))
if len(model_eval_test) > 1:
pear_t = tuple(scs.pearsonr(model_eval_test["score_p"], model_eval_test["score_gt"]))
spea_t = tuple(scs.spearmanr(model_eval_test["score_p"], model_eval_test["score_gt"]))
else:
pear_t = tuple(( 1.0, 0.0 ))
spea_t = tuple(( 1.0, 0.0 ))
evaluation_stats[model_name] = \
f"Statistics: \n" \
f"\tMSE_a: {mse_a}\n" \
f"\tMSE_t: {mse_t}\n" \
f"\tPear_a: {pear_a}\n" \
f"\tPear_t: {pear_t}\n" \
f"\tSpea_a: {spea_a}\n" \
f"\tSpea_t: {spea_t}"
evaluation_text[model_name] = f"{evaluations_desc[model_name]}\n" \
f"{evaluation_stats[model_name]}"
model_evaluation_text = "\n".join([
f"Evaluation for model \"{model_name}\": \n" + model_text
for model_name, model_text in evaluation_text.items()
])
all_evaluation_text = f"{str(evaluations.to_string())}\n{model_evaluation_text}"
self.__l.info(f"Evaluation Results: \n{all_evaluation_text}")
FileSaver.save_string("EvaluationResults", all_evaluation_text)
FileSaver.save_csv("EvaluationResults", evaluations)
fig, ax = plt.subplots()
g = sns.lineplot(ax=ax, data=evaluations, x="score_gt", y="score_gt", color="red")
plt.setp(g.lines, alpha=0.6)
g = sns.lineplot(ax=ax, data=evaluations, x="score_gt", y="score_p")
plt.setp(g.lines, alpha=0.3)
all_sources = evaluations.source.unique()
source_order = [
source
for source in [ "train", "valid", "test", "external", "unknown" ]
if source in all_sources
]
g = sns.scatterplot(ax=ax, data=evaluations, x="score_gt", y="score_p", hue="source", hue_order=source_order)
logging_name = self.config["logging.logging_name"] or "Model"
g.set_title(
f"{logging_name} | MSE: {mse_a:.2} / {mse_t:.2} | "
f"PE: {pear_a[0]:.2} / {pear_t[0]:.2} | "
f"SP: {spea_a[0]:.2} / {spea_t[0]:.2}",
fontsize=8
)
GraphSaver.save_graph("EvaluationResults")
def _prepare_annotated_image(self, tree_id: tuple, view_id: tuple,
annotation: dict, do_text: bool, bar_side: str,
transparent: bool, show: bool) -> (Image, str):
""" Prepare annotated image from given specification. """
views = annotation["views"]
if "base" in views:
view_data = views["base"]
elif len(views) > 0:
view_data = views[list(views.keys())[0]]
else:
return None, None
full_path = view_data.description or None
if full_path is not None and pathlib.Path(full_path).exists():
view_image = Image.open(full_path)
name = pathlib.Path(full_path).with_suffix(".png").name
else:
full_path = None
view_image = Image.fromarray(view_data.data)
name = f"tree{tree_id[0]}_{tree_id[1]}_view_{view_id[0]}_{view_id[1]}.png"
data = [ ]
colors = [ ]
if "score_gt" in annotation:
data.append({
"name": "t",
"value": annotation["score_gt"]
})
colors.append("#e03a3a")
if "score_feature" in annotation:
data.append({
"name": "f",
"value": annotation["score_feature"]
})
colors.append("#88d962")
if "score_image" in annotation:
data.append({
"name": "i",
"value": annotation["score_image"]
})
colors.append("#127331")
if len(data) > 0:
data = pd.DataFrame(data=data)
colors = sns.set_palette(sns.color_palette(colors))
fig, ax = plt.subplots(figsize=(2, 16))
plt.ylim(-0.01, 6.0)
g = sns.barplot(ax=ax, x="name", y="value", data=data, orient="v", color=colors)
ax.tick_params(axis="both", which="both", length=0)
g.set(xticklabels=[ ])
g.set(xlabel=None)
g.set(yticklabels=[ ])
g.set(ylabel=None)
max_value = np.max([ row[ "value" ] for index, row in data.iterrows() ])
label_height = max_value + 0.01
if do_text:
for index, row in data.iterrows():
g.text(row.name, label_height, round(row[ "value" ], 2), color='black', ha="center", fontsize=30)
# Blue, red, green
bar_data = io.BytesIO()
fig.savefig(bar_data, format="png", transparent=transparent)
plt.close("all")
bar_image = Image.open(bar_data)
view_image = view_image.convert("RGBA")
bar_image_size = bar_image.size
tree_image_size = view_image.size
size_mult = tree_image_size[1] / bar_image_size[1]
new_bar_image_size = (int(bar_image_size[0] * size_mult), int(bar_image_size[1] * size_mult))
annotated_image = Image.new("RGB", tree_image_size, "WHITE")
bar_image = bar_image.resize(size=new_bar_image_size)
if bar_side == "r":
annotated_image.paste(view_image, (0, 0), view_image)
annotated_image.paste(bar_image, (view_image.size[0] - bar_image.size[0], 0), bar_image)
else:
annotated_image.paste(view_image, (0, 0), view_image)
annotated_image.paste(bar_image, (0, 0), bar_image)
if show:
plt.imshow(np.asarray(annotated_image))
plt.axis("off")
plt.show()
else:
annotated_image = view_image
return annotated_image, name
def _export_images(self, evaluations: pd.DataFrame,
output_path: Optional[str],
do_text: bool, bar_side: str,
transparent: bool, show: bool):
""" Export images with score bars if requested. """
if output_path is None:
return
output_path = pathlib.Path(output_path)
output_path.mkdir(parents=True, exist_ok=True)
annotations = { }
for idx, evaluation in evaluations.iterrows():
if evaluation.model_name.lower().startswith("image"):
model_type = "image"
elif evaluation.model_name.lower().startswith("feature"):
model_type = "feature"
else:
continue
new_data = { }
new_data[f"score_{model_type}"] = evaluation.score_p
if evaluation.true_gt:
new_data["score_gt"] = evaluation.score_gt
new_data["views"] = evaluation.view_files
update_dict_recursively(
annotations, {
( evaluation.tree_id, evaluation.view_id ): new_data
},
create_keys=True
)
for ( tree_id, view_id ), annotation in annotations.items():
if len(annotation["views"]) == 0:
continue
annotated_image, name = self._prepare_annotated_image(
tree_id=tree_id, view_id=view_id,
annotation=annotation,
do_text=do_text, bar_side=bar_side,
transparent=transparent,
show=show,
)
if annotated_image is None:
continue
annotated_path = output_path / name
annotated_image.save(annotated_path, "PNG")
def _evaluate_scores(self, predictions: Dict[str, Tuple[BaseModel, List[Prediction]]]) -> pd.DataFrame:
""" Perform evaluation of score precision on given predictions. """
evaluations = [ ]
for model_idx, (model_name, (model, model_predictions)) in enumerate(predictions.items()):
self.__l.info(f"Evaluating mode \"{model_name}\" ({model_idx + 1} / {len(predictions)}")
for prediction_idx, prediction in enumerate(model_predictions):
self.__l.info(f"\tEvaluation {prediction_idx + 1}/{len(model_predictions)}")
prediction_evaluations = self._evaluate_prediction_score(prediction=prediction)
prediction_evaluations = [
update_dict_recursively(evaluation, {
"model_idx": model_idx,
"model_name": model_name,
"eval_idx": idx,
}, create_keys=True)
for idx, evaluation in enumerate(prediction_evaluations)
]
evaluations += prediction_evaluations
self.__l.info(f"\t\tEvaluation {prediction_idx + 1}/{len(model_predictions)} Completed!")
evaluations = | pd.DataFrame(data=evaluations) | pandas.DataFrame |
import os
from pathlib import Path
import flywheel
import numpy as np
import pandas as pd
import pytest
import sys
sys.path.append(str(Path(__file__).parents[2].resolve()))
from tests.BIDS_popup_curation.acquisitions import acquistions_object
from tests.BIDS_popup_curation.sessions import session_object
from utils.bids_pre_curate import (data2csv, handle_acquisitions,
handle_sessions, handle_subjects,
keep_specified_keys)
from utils.deep_dict import nested_get
test_data = [
{
'code': 'sub-13 test',
'firstname': None,
'_id': '5db0845e69d4f3002d16ee05',
'label': 'sub-13 test',
'lastname': None,
'parents': {
'group': 'scien',
'project': '5db0759469d4f3001f16e9c1',
'session': {
'session_no': 1,
'session_info': None
},
'subject': None},
'permissions': {
'perm-01': 'nate-has-access'
},
},
{
'code': 'sub-14',
'firstname': None,
'_id': '5db0845e69d4f3002d16ee05',
'label': 'sub-14',
'lastname': 'hello',
'parents': {
'group': 'Nate',
'project': '5db0759469d4f3001f16e9c1',
'session': {
'session_no': None,
'session_info': None
},
'subject': None
},
'permissions': {}
}
]
def test_data2csv_rename():
proj_label = 'test_proj'
keep_keys = ['label']
column_renames = ['existing_subject_label']
user_columns = ['new_subject_label']
remap = [0, 1]
allows = ['.-_', '._-', '._ -', '', '.', '-', '_']
expected = [['sub-13test', 'sub-14'],
['sub-13 test', 'sub-14'],
['sub-13 test', 'sub-14'],
['sub13test', 'sub14'],
['sub-13test', 'sub-14'],
['sub13test', 'sub14']]
def test_invalid_regex():
with pytest.raises(SystemExit):
path, df = data2csv(test_data,
proj_label,
keep_keys, 'sub', column_renames,
user_columns,
old_new_index=remap,
no_print=True,
allows=allows[0])
def test_valid_regex():
for allow, exp in zip(allows[1:], expected):
path, df = data2csv(test_data,
proj_label,
keep_keys, 'sub', column_renames,
user_columns,
old_new_index=remap,
no_print=True,
allows=allow)
assert df.columns == ['existing_subject_label', 'new_subject_label']
assert df.iloc[0,1] == expected[0]
assert df.iloc[1,1] == expected[1]
def test_data2csv_dummy_data():
proj_label = 'test_proj'
keep_keys = [['label'],
['label', ['parents', 'group']],
['label', ['parents', 'subject'], 'label']]
column_renames = [['existing_acquisition_label'],
['existing_session_label', 'subject_group'],
['existing_session_label', 'subject_label']]
remap = [[0, 1], [0, 2], [0, 2]]
prefixes = ['acq', 'sub', 'sess']
user_columns = [['new_session_label'], ['new_session_label'], ['new_session_label', 'test2']]
for keep_key, column_rename, prefix, user_column, remap in zip(keep_keys, column_renames, prefixes, user_columns,
remap):
path, df = data2csv(test_data, proj_label, keep_key, prefix, '[^A-Za-z0-9]', column_rename, user_column, old_new_index=remap,
no_print=True)
assert (df.columns == [*column_rename, *user_column]).all()
assert df['new_session_label'][0] == 'sub13test'
assert df['new_session_label'][1] == 'sub14'
def test_data2csv_acq_duplicate(group='scien', project='Nate-BIDS-pre-curate'):
fw = flywheel.Client()
proj = fw.lookup(f'{group}/{project}')
acqs = [acq.to_dict() for acq in fw.get_project_acquisitions(proj.id)]
path, df = data2csv(acqs, project,
keep_keys=['label'],
prefix='acquisition_labels',
regex='[^A-Za-z0-9]',
column_rename=['existing_acquisition_label'],
user_columns=['new_acquisition_label', 'modality', 'task', 'run', 'ignore'],
unique=['label'], no_print=True)
supposedly_unique = np.sort(df['existing_acquisition_label'].values)
unique = np.unique( | pd.DataFrame.from_records(acquistions_object) | pandas.DataFrame.from_records |
import requests
import pandas as pd
import numpy as np
from credential import API_KEY
target_dir = '../csv_data/'
movies = pd.read_csv(f'{target_dir}movies.csv')
df_genres = pd.read_csv(f'{target_dir}genres.csv')
df_genre_info = pd.read_csv(f'{target_dir}genre_info.csv')
df_companies = pd.read_csv(f'{target_dir}companies.csv')
df_company_info = pd.read_csv(f'{target_dir}company_info.csv')
df_countries = pd.read_csv(f'{target_dir}countries.csv')
df_country_info = pd.read_csv(f'{target_dir}country_info.csv')
df_spoken_languages = | pd.read_csv(f'{target_dir}spoken_languages.csv') | pandas.read_csv |
import logging
import math
import pandas as pd
from sklearn.model_selection import train_test_split
from .tokenizer import WordTokenizer
from .utils import load_obj
from typing import Dict, Optional
from overrides import overrides
from nltk.tree import Tree
from allennlp.common.file_utils import cached_path
from allennlp.common.checks import ConfigurationError
from allennlp.data import DatasetReader, Instance, Vocabulary
from allennlp.data.tokenizers import PretrainedTransformerTokenizer
from allennlp.data.token_indexers import PretrainedTransformerIndexer
from allennlp.data.fields import LabelField, TextField, Field
logger = logging.getLogger(__name__)
@DatasetReader.register("CR")
class SentimentDatasetReader(DatasetReader):
def __init__(
self,
dataset_params: Dict,
**kwargs,
) -> None:
super().__init__(**kwargs)
self._indexers = {
"tokens": PretrainedTransformerIndexer(
dataset_params["transformer_model_name"]
)
}
self.transformer_tokenizer = PretrainedTransformerTokenizer(
dataset_params["transformer_model_name"]
)
self.transformer_vocab = Vocabulary.from_pretrained_transformer(
dataset_params["transformer_model_name"]
)
self.detokenizer = WordTokenizer()
self.max_length = dataset_params["max_length"]
@overrides
def _read(self, file_path):
corpus = pd.read_csv(
file_path
)
reviews, labels = list(corpus.sentence), list(corpus.label)
for review, label in zip(reviews, labels):
if type(review) != str:
if math.isnan(review):
review = "."
instance = self.text_to_instance(review, str(label))
if instance is not None:
yield instance
else:
pass
@overrides
def text_to_instance(
self,
text: str,
sentiment: str = None
) -> Optional[Instance]:
tokens = self.transformer_tokenizer.tokenize(text)
text_field = TextField(
tokens,
token_indexers=self._indexers
)
fields: Dict[str, Field] = {
"text": text_field
}
if sentiment is not None:
fields["label"] = LabelField(sentiment)
else:
pass
return Instance(fields)
def get_token_indexers(self):
return self._token_indexers
@DatasetReader.register("sst_tokens")
class StanfordSentimentTreeBankDatasetReader(DatasetReader):
"""
Reads tokens and their sentiment labels from the Stanford Sentiment Treebank.
The Stanford Sentiment Treebank comes with labels
from 0 to 4. `"5-class"` uses these labels as is. `"3-class"` converts the
problem into one of identifying whether a sentence is negative, positive, or
neutral sentiment. In this case, 0 and 1 are grouped as label 0 (negative sentiment),
2 is converted to label 1 (neutral sentiment) and 3 and 4 are grouped as label 2
(positive sentiment). `"2-class"` turns it into a binary classification problem
between positive and negative sentiment. 0 and 1 are grouped as the label 0
(negative sentiment), 2 (neutral) is discarded, and 3 and 4 are grouped as the label 1
(positive sentiment).
Expected format for each input line: a linearized tree, where nodes are labeled
by their sentiment.
The output of `read` is a list of `Instance` s with the fields:
tokens : `TextField` and
label : `LabelField`
Registered as a `DatasetReader` with name "sst_tokens".
# Parameters
token_indexers : `Dict[str, TokenIndexer]`, optional (default=`{"tokens": SingleIdTokenIndexer()}`)
We use this to define the input representation for the text. See :class:`TokenIndexer`.
use_subtrees : `bool`, optional, (default = `False`)
Whether or not to use sentiment-tagged subtrees.
granularity : `str`, optional (default = `"5-class"`)
One of `"5-class"`, `"3-class"`, or `"2-class"`, indicating the number
of sentiment labels to use.
"""
def __init__(
self,
sst_params: Dict,
**kwargs,
) -> None:
super().__init__(**kwargs)
self._indexers = {
"tokens": PretrainedTransformerIndexer(
sst_params["transformer_model_name"]
)
}
self.transformer_tokenizer = PretrainedTransformerTokenizer(
sst_params["transformer_model_name"]
)
self.transformer_vocab = Vocabulary.from_pretrained_transformer(
sst_params["transformer_model_name"]
)
self._use_subtrees = sst_params["use_subtrees"]
self.detokenizer = WordTokenizer()
self.max_length = sst_params["max_length"]
self.robust_test = False
if sst_params["noise_datapath"] != "none":
self.noise_data = load_obj(sst_params["noise_datapath"])
else:
self.noise_data = None
allowed_granularities = ["5-class", "3-class", "2-class"]
if sst_params["granularity"] not in allowed_granularities:
raise ConfigurationError(
"granularity is {}, but expected one of: {}".format(
sst_params["granularity"], allowed_granularities
)
)
self._granularity = sst_params["granularity"]
@overrides
def _read(self, file_path):
sentences = []
labels = []
with open(cached_path(file_path), "r") as data_file:
logger.info("Reading instances from lines in file at: %s", file_path)
for idx, line in enumerate(data_file.readlines()):
line = line.strip("\n")
if not line:
continue
parsed_line = Tree.fromstring(line)
if self._use_subtrees:
for subtree in parsed_line.subtrees():
instance = self.text_to_instance(
self.detokenizer.detokenize(subtree.leaves()),
subtree.label()
)
if instance is not None:
yield instance
else:
if self.robust_test is True and self.noise_data is not None:
text = self.noise_data[idx]
else:
text = self.detokenizer.detokenize(parsed_line.leaves())
instance = self.text_to_instance(
text,
parsed_line.label()
)
if self._granularity == "2-class":
label = int(parsed_line.label())
if label > 2:
label = "1"
sentences.append(text)
labels.append(label)
elif label < 2:
label = "0"
sentences.append(text)
labels.append(label)
else:
pass
else:
sentences.append(text)
labels.append(parsed_line.label())
if instance is not None:
yield instance
input_df = pd.DataFrame({"sentence": sentences, "label": labels}, dtype=object)
input_df.to_csv(file_path+".csv", index=False)
@overrides
def text_to_instance(
self,
text: str,
sentiment: str = None
) -> Optional[Instance]:
tokens = self.transformer_tokenizer.tokenize(text)
text_field = TextField(
tokens[:self.max_length],
token_indexers=self._indexers
)
fields: Dict[str, Field] = {
"text": text_field
}
if sentiment is not None:
if self._granularity == "3-class":
if int(sentiment) < 2:
sentiment = "0"
elif int(sentiment) == 2:
sentiment = "1"
else:
sentiment = "2"
elif self._granularity == "2-class":
if int(sentiment) < 2:
sentiment = "0"
elif int(sentiment) == 2:
return None
else:
sentiment = "1"
fields["label"] = LabelField(sentiment)
else:
pass
return Instance(fields)
def get_token_indexers(self):
return self._token_indexers
def get_sst_ds(
sst_params: Dict,
train_data_path="data/sst/train.txt",
valid_data_path="data/sst/dev.txt",
test_data_path="data/sst/test.txt",
):
sst_dataset_reader = StanfordSentimentTreeBankDatasetReader(
sst_params
)
train_ds = sst_dataset_reader.read(train_data_path)
if sst_params["proportion"] != 1:
import random
random.seed(2003)
train_ds.instances = random.sample(
train_ds.instances,
int(len(train_ds.instances) * sst_params["proportion"])
)
else:
pass
valid_ds = sst_dataset_reader.read(valid_data_path)
sst_dataset_reader.robust_test = True
test_ds = sst_dataset_reader.read(test_data_path)
return train_ds, valid_ds, test_ds, sst_dataset_reader
def split_dataset(
all_data_path: str
):
corpus = | pd.read_pickle(all_data_path) | pandas.read_pickle |
"""
This module contains a collection of functions which make plots (saved as png files) using matplotlib, generated from
some model fits and cross-validation evaluation within a MAST-ML run.
This module also contains a method to create python notebooks containing plotted data and the relevant source code from
this module, to enable the user to make their own modifications to the created plots in a straightforward way (useful for
tweaking plots for a presentation or publication).
"""
import math
import statistics
import os
import copy
import pandas as pd
import itertools
import warnings
import logging
from collections import Iterable
from os.path import join
from collections import OrderedDict
from math import log, floor, ceil
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
from sklearn.ensemble._forest import _generate_sample_indices, _get_n_samples_bootstrap
from mpl_toolkits.axes_grid1 import make_axes_locatable
# Ignore the harmless warning about the gelsd driver on mac.
warnings.filterwarnings(action="ignore", module="scipy",
message="^internal gelsd")
# Ignore matplotlib deprecation warning (set as all warnings for now)
warnings.filterwarnings(action="ignore")
import numpy as np
from sklearn.metrics import confusion_matrix, roc_curve, auc, precision_recall_curve
import matplotlib
from matplotlib import pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure, figaspect
from matplotlib.animation import FuncAnimation
from matplotlib.font_manager import FontProperties
from scipy.stats import gaussian_kde, norm
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
# Needed imports for ipynb_maker
#from mastml.utils import nice_range
#from mastml.metrics import nice_names
import inspect
import textwrap
from pandas import DataFrame, Series
import nbformat
from functools import wraps
import forestci as fci
from forestci.calibration import calibrateEB
import copy
matplotlib.rc('font', size=18, family='sans-serif') # set all font to bigger
matplotlib.rc('figure', autolayout=True) # turn on autolayout
# adding dpi as a constant global so it can be changed later
DPI = 250
#logger = logging.getLogger() # only used inside ipynb_maker I guess
# HEADERENDER don't delete this line, it's used by ipynb maker
logger = logging.getLogger('mastml') # the real logger
def ipynb_maker(plot_func):
"""
This method creates Jupyter Notebooks so user can modify and regenerate the plots produced by MAST-ML.
Args:
plot_func: (plot_helper method), a plotting method contained in plot_helper.py which contains the
@ipynb_maker decorator
Returns:
(plot_helper method), the same plot_func as used as input, but after having written the Jupyter notebook with source code to create plot
"""
from mastml import plot_helper # Strange self-import but it works, as had cyclic import issues with ipynb_maker as its own module
@wraps(plot_func)
def wrapper(*args, **kwargs):
# convert everything to kwargs for easier display
# from geniuses at https://stackoverflow.com/a/831164
#kwargs.update(dict(zip(plot_func.func_code.co_varnames, args)))
sig = inspect.signature(plot_func)
binding = sig.bind(*args, **kwargs)
all_args = binding.arguments
# if this is an outdir style function, fill in savepath and delete outdir
if 'savepath' in all_args:
ipynb_savepath = all_args['savepath']
knows_savepath = True
basename = os.path.basename(ipynb_savepath) # fix absolute path problem
elif 'outdir' in all_args:
knows_savepath = False
basename = plot_func.__name__
ipynb_savepath = os.path.join(all_args['outdir'], basename)
else:
raise Exception('you must have an "outdir" or "savepath" argument to use ipynb_maker')
readme = textwrap.dedent(f"""\
This notebook was automatically generated from your MAST-ML run so you can recreate the
plots. Some things are a bit different from the usual way of creating plots - we are
using the [object oriented
interface](https://matplotlib.org/tutorials/introductory/lifecycle.html) instead of
pyplot to create the `fig` and `ax` instances.
""")
# get source of the top of plot_helper.py
header = ""
with open(plot_helper.__file__) as f:
for line in f.readlines():
if 'HEADERENDER' in line:
break
header += line
core_funcs = [plot_helper.stat_to_string, plot_helper.plot_stats, plot_helper.make_fig_ax,
plot_helper.get_histogram_bins, plot_helper.nice_names, plot_helper.nice_range,
plot_helper.nice_mean, plot_helper.nice_std, plot_helper.rounder, plot_helper._set_tick_labels,
plot_helper._set_tick_labels_different, plot_helper._nice_range_helper, plot_helper._nearest_pow_ten,
plot_helper._three_sigfigs, plot_helper._n_sigfigs, plot_helper._int_if_int, plot_helper._round_up,
plot_helper.prediction_intervals]
func_strings = '\n\n'.join(inspect.getsource(func) for func in core_funcs)
plot_func_string = inspect.getsource(plot_func)
# remove first line that has this decorator on it (!!!)
plot_func_string = '\n'.join(plot_func_string.split('\n')[1:])
# put the arguments and their values in the code
arg_assignments = []
arg_names = []
for key, var in all_args.items():
if isinstance(var, DataFrame):
# this is amazing
arg_assignments.append(f"{key} = pd.read_csv(StringIO('''\n{var.to_csv(index=False)}'''))")
elif isinstance(var, Series):
arg_assignments.append(f"{key} = pd.Series(pd.read_csv(StringIO('''\n{var.to_csv(index=False)}''')).iloc[:,0])")
else:
arg_assignments.append(f'{key} = {repr(var)}')
arg_names.append(key)
args_block = ("from numpy import array\n" +
"from collections import OrderedDict\n" +
"from io import StringIO\n" +
"from sklearn.gaussian_process import GaussianProcessRegressor # Need for error plots\n" +
"from sklearn.gaussian_process.kernels import * # Need for error plots\n" +
"from sklearn.ensemble import RandomForestRegressor # Need for error plots\n" +
'\n'.join(arg_assignments))
arg_names = ', '.join(arg_names)
if knows_savepath:
if '.png' not in basename:
basename += '.png'
main = textwrap.dedent(f"""\
import pandas as pd
from IPython.display import Image, display
{plot_func.__name__}({arg_names})
display(Image(filename='{basename}'))
""")
else:
main = textwrap.dedent(f"""\
import pandas as pd
from IPython.display import Image, display
plot_paths = plot_predicted_vs_true(train_quad, test_quad, outdir, label)
for plot_path in plot_paths:
display(Image(filename=plot_path))
""")
nb = nbformat.v4.new_notebook()
readme_cell = nbformat.v4.new_markdown_cell(readme)
text_cells = [header, func_strings, plot_func_string, args_block, main]
cells = [readme_cell] + [nbformat.v4.new_code_cell(cell_text) for cell_text in text_cells]
nb['cells'] = cells
nbformat.write(nb, ipynb_savepath + '.ipynb')
return plot_func(*args, **kwargs)
return wrapper
def make_train_test_plots(run, path, is_classification, label, model, train_X, test_X, groups=None):
"""
General plotting method used to execute sequence of specific plots of train-test data analysis
Args:
run: (dict), a particular split_result from masml_driver
path: (str), path to save the generated plots and analysis of split_result designated in 'run'
is_classification: (bool), whether or not the analysis is a classification task
label: (str), name of the y data variable being fit
model: (scikit-learn model object), a scikit-learn model/estimator
train_X: (numpy array), array of X features used in training
test_X: (numpy array), array of X features used in testing
groups: (numpy array), array of group names
Returns:
None
"""
y_train_true, y_train_pred, y_test_true = \
run['y_train_true'], run['y_train_pred'], run['y_test_true']
y_test_pred, train_metrics, test_metrics = \
run['y_test_pred'], run['train_metrics'], run['test_metrics']
train_groups, test_groups = run['train_groups'], run['test_groups']
if is_classification:
# Need these class prediction probabilities for ROC curve analysis
y_train_pred_proba = run['y_train_pred_proba']
y_test_pred_proba = run['y_test_pred_proba']
title = 'train_confusion_matrix'
plot_confusion_matrix(y_train_true, y_train_pred,
join(path, title+'.png'), train_metrics,
title=title)
title = 'test_confusion_matrix'
plot_confusion_matrix(y_test_true, y_test_pred,
join(path, title+'.png'), test_metrics,
title=title)
title = 'train_roc_curve'
plot_roc_curve(y_train_true, y_train_pred_proba, join(path, title+'png'))
title = 'test_roc_curve'
plot_roc_curve(y_test_true, y_test_pred_proba, join(path, title+'png'))
title = 'train_precision_recall_curve'
plot_precision_recall_curve(y_train_true, y_train_pred_proba, join(path, title+'png'))
title = 'test_precision_recall_curve'
plot_precision_recall_curve(y_test_true, y_test_pred_proba, join(path, title + 'png'))
else: # is_regression
plot_predicted_vs_true((y_train_true, y_train_pred, train_metrics, train_groups),
(y_test_true, y_test_pred, test_metrics, test_groups),
path, label=label)
title = 'train_residuals_histogram'
plot_residuals_histogram(y_train_true, y_train_pred,
join(path, title+'.png'), train_metrics,
title=title, label=label)
title = 'test_residuals_histogram'
plot_residuals_histogram(y_test_true, y_test_pred,
join(path, title+'.png'), test_metrics,
title=title, label=label)
def make_error_plots(run, path, is_classification, label, model, train_X, test_X, rf_error_method, rf_error_percentile,
is_validation, validation_column_name, validation_X, groups=None):
y_train_true, y_train_pred, y_test_true = \
run['y_train_true'], run['y_train_pred'], run['y_test_true']
y_test_pred, train_metrics, test_metrics = \
run['y_test_pred'], run['train_metrics'], run['test_metrics']
train_groups, test_groups = run['train_groups'], run['test_groups']
if is_validation:
y_validation_pred, y_validation_true, prediction_metrics = \
run['y_validation_pred'+'_'+str(validation_column_name)], \
run['y_validation_true'+'_'+str(validation_column_name)], \
run['prediction_metrics']
if is_classification:
logger.debug('There is no error distribution plotting for classification problems, just passing through...')
else: # is_regression
#title = 'train_normalized_error'
#plot_normalized_error(y_train_true, y_train_pred, join(path, title+'.png'), model, error_method, percentile,
# X=train_X, Xtrain=train_X, Xtest=test_X)
title = 'test_normalized_error'
plot_normalized_error(y_test_true, y_test_pred, join(path, title+'.png'), model, rf_error_method,
rf_error_percentile, X=test_X, Xtrain=train_X, Xtest=test_X)
#title = 'train_cumulative_normalized_error'
#plot_cumulative_normalized_error(y_train_true, y_train_pred, join(path, title+'.png'), model, error_method,
# percentile, X=train_X, Xtrain=train_X, Xtest=test_X)
title = 'test_cumulative_normalized_error'
plot_cumulative_normalized_error(y_test_true, y_test_pred, join(path, title+'.png'), model, rf_error_method,
rf_error_percentile, X=test_X, Xtrain=train_X, Xtest=test_X)
# HERE, add your RMS residual vs. error plot function
if model.__class__.__name__ in ['RandomForestRegressor', 'ExtraTreesRegressor', 'GaussianProcessRegressor',
'GradientBoostingRegressor', 'EnsembleRegressor']:
y_all_data = np.concatenate([y_test_true, y_train_true])
plot_real_vs_predicted_error(y_all_data, path, model, data_test_type='test')
if is_validation:
title = 'validation_cumulative_normalized_error'
plot_cumulative_normalized_error(y_validation_true, y_validation_pred, join(path, title+'.png'), model, rf_error_method,
rf_error_percentile, X=validation_X, Xtrain=train_X, Xtest=test_X)
title = 'validation_normalized_error'
plot_normalized_error(y_validation_true, y_validation_pred, join(path, title + '.png'), model, rf_error_method,
rf_error_percentile, X=validation_X, Xtrain=train_X, Xtest=test_X)
if model.__class__.__name__ in ['RandomForestRegressor', 'ExtraTreesRegressor', 'GaussianProcessRegressor',
'GradientBoostingRegressor', 'EnsembleRegressor']:
y_all_data = np.concatenate([y_test_true, y_train_true])
plot_real_vs_predicted_error(y_all_data, path, model, data_test_type='validation')
@ipynb_maker
def plot_confusion_matrix(y_true, y_pred, savepath, stats, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):
"""
Method used to generate a confusion matrix for a classification run. Additional information can be found
at: http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html
Args:
y_true: (numpy array), array containing the true y data values
y_pred: (numpy array), array containing the predicted y data values
savepath: (str), path to save the plotted confusion matrix
stats: (dict), dict of training or testing statistics for a particular run
normalize: (bool), whether or not to normalize data output as truncated float vs. double
title: (str), title of the confusion matrix plot
cmap: (matplotlib colormap), the color map to use for confusion matrix plotting
Returns:
None
"""
# calculate confusion matrix and lables in correct order
cm = confusion_matrix(y_true, y_pred)
#classes = sorted(list(set(y_true).intersection(set(y_pred))))
classes = sorted(list(set(y_true).union(set(y_pred))))
x_align = 0.64
fig, ax = make_fig_ax(x_align=x_align)
#ax.set_title(title)
# create the colorbar, not really needed but everyones got 'em
mappable = ax.imshow(cm, interpolation='nearest', cmap=cmap)
#fig.colorbar(mappable)
# set x and y ticks to labels
tick_marks = range(len(classes))
ax.set_xticks(tick_marks)
ax.set_xticklabels(classes, rotation='horizontal', fontsize=18)
ax.set_yticks(tick_marks)
ax.set_yticklabels(classes, rotation='horizontal', fontsize=18)
# draw number in the boxes
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
ax.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
# plots the stats
plot_stats(fig, stats, x_align=0.60, y_align=0.90)
ax.set_ylabel('True label')
ax.set_xlabel('Predicted label')
fig.savefig(savepath, dpi=DPI, bbox_inches='tight')
@ipynb_maker
def plot_roc_curve(y_true, y_pred, savepath):
"""
Method to calculate and plot the receiver-operator characteristic curve for classification model results
Args:
y_true: (numpy array), array of true y data values
y_pred: (numpy array), array of predicted y data values
savepath: (str), path to save the plotted ROC curve
Returns:
None
"""
#TODO: have work when probability=False in model params. Suggest user set probability=True!!
#classes = sorted(list(set(y_true).union(set(y_pred))))
#n_classes = y_pred.shape[1]
classes = list(np.unique(y_true))
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(len(classes)):
fpr[i], tpr[i], _ = roc_curve(y_true, y_pred[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
x_align = 0.95
fig, ax = make_fig_ax(aspect_ratio=0.66, x_align=x_align, left=0.15)
colors = ['blue', 'red']
#for i in range(len(classes)):
# ax.plot(fpr[i], tpr[i], color=colors[i], lw=2, label='ROC curve class '+str(i)+' (area = %0.2f)' % roc_auc[i])
ax.plot(fpr[1], tpr[1], color=colors[0], lw=2, label='ROC curve' + ' (area = %0.2f)' % roc_auc[1])
ax.plot([0, 1], [0, 1], color='black', label='Random guess', lw=2, linestyle='--')
ax.set_xticks(np.linspace(0, 1, 5))
ax.set_yticks(np.linspace(0, 1, 5))
_set_tick_labels(ax, maxx=1, minn=0)
ax.set_xlabel('False Positive Rate', fontsize='16')
ax.set_ylabel('True Positive Rate', fontsize='16')
ax.legend(loc="lower right", fontsize=12)
#plot_stats(fig, stats, x_align=0.60, y_align=0.90)
fig.savefig(savepath, dpi=DPI, bbox_to_inches='tight')
@ipynb_maker
def plot_precision_recall_curve(y_true, y_pred, savepath):
"""
Method to calculate and plot the precision-recall curve for classification model results
Args:
y_true: (numpy array), array of true y data values
y_pred: (numpy array), array of predicted y data values
savepath: (str), path to save the plotted precision-recall curve
Returns:
None
"""
# Note this only works with probability predictions of the classifier labels.
classes = list(np.unique(y_true))
precision = dict()
recall = dict()
#roc_auc = dict()
for i in range(len(classes)):
precision[i], recall[i], _ = precision_recall_curve(y_true, y_pred[:, i])
x_align = 0.95
fig, ax = make_fig_ax(aspect_ratio=0.66, x_align=x_align, left=0.15)
colors = ['blue', 'red']
#for i in range(len(classes)):
# ax.plot(fpr[i], tpr[i], color=colors[i], lw=2, label='ROC curve class '+str(i)+' (area = %0.2f)' % roc_auc[i])
ax.step(recall[1], precision[1], color=colors[0], lw=2, label='Precision-recall curve')
#ax.fill_between(recall[1], precision[1], alpha=0.4, color=colors[0])
ax.set_xticks(np.linspace(0, 1, 5))
ax.set_yticks(np.linspace(0, 1, 5))
_set_tick_labels(ax, maxx=1, minn=0)
ax.set_xlabel('Recall', fontsize='16')
ax.set_ylabel('Precision', fontsize='16')
ax.legend(loc="upper right", fontsize=12)
#plot_stats(fig, stats, x_align=0.60, y_align=0.90)
fig.savefig(savepath, dpi=DPI, bbox_to_inches='tight')
return
@ipynb_maker
def plot_residuals_histogram(y_true, y_pred, savepath,
stats, title='residuals histogram', label='residuals'):
"""
Method to calculate and plot the histogram of residuals from regression model
Args:
y_true: (numpy array), array of true y data values
y_pred: (numpy array), array of predicted y data values
savepath: (str), path to save the plotted precision-recall curve
stats: (dict), dict of training or testing statistics for a particular run
title: (str), title of residuals histogram
label: (str), label used for axis labeling
Returns:
None
"""
# make fig and ax, use x_align when placing text so things don't overlap
x_align = 0.64
fig, ax = make_fig_ax(x_align=x_align)
#ax.set_title(title)
# do the actual plotting
residuals = y_true - y_pred
#Output residuals data and stats to spreadsheet
path = os.path.dirname(savepath)
pd.DataFrame(residuals).describe().to_csv(os.path.join(path,'residual_statistics.csv'))
pd.DataFrame(residuals).to_csv(path+'/'+'residuals.csv')
#Get num_bins using smarter method
num_bins = get_histogram_bins(y_df=residuals)
ax.hist(residuals, bins=num_bins, color='b', edgecolor='k')
# normal text stuff
ax.set_xlabel('Value of '+label, fontsize=16)
ax.set_ylabel('Number of occurences', fontsize=16)
# make y axis ints, because it is discrete
#ax.yaxis.set_major_locator(MaxNLocator(integer=True))
plot_stats(fig, stats, x_align=x_align, y_align=0.90)
plot_stats(fig, pd.DataFrame(residuals).describe().to_dict()[0], x_align=x_align, y_align=0.60)
fig.savefig(savepath, dpi=DPI, bbox_inches='tight')
@ipynb_maker
def plot_target_histogram(y_df, savepath, title='target histogram', label='target values'):
"""
Method to plot the histogram of true y values
Args:
y_df: (pandas dataframe), dataframe of true y data values
savepath: (str), path to save the plotted precision-recall curve
title: (str), title of residuals histogram
label: (str), label used for axis labeling
Returns:
None
"""
# make fig and ax, use x_align when placing text so things don't overlap
x_align = 0.70
fig, ax = make_fig_ax(aspect_ratio=0.5, x_align=x_align)
#ax.set_title(title)
#Get num_bins using smarter method
num_bins = get_histogram_bins(y_df=y_df)
# do the actual plotting
try:
ax.hist(y_df, bins=num_bins, color='b', edgecolor='k')#, histtype='stepfilled')
except:
print('Could not plot target histgram')
return
# normal text stuff
ax.set_xlabel('Value of '+label, fontsize=16)
ax.set_ylabel('Number of occurences', fontsize=16)
# make y axis ints, because it is discrete
#ax.yaxis.set_major_locator(MaxNLocator(integer=True))
plot_stats(fig, dict(y_df.describe()), x_align=x_align, y_align=0.90, fontsize=14)
# Save input data stats to csv
savepath_parse = savepath.split('target_histogram.png')[0]
y_df.describe().to_csv(savepath_parse+'/''input_data_statistics.csv')
fig.savefig(savepath, dpi=DPI, bbox_inches='tight')
@ipynb_maker
def plot_predicted_vs_true(train_quad, test_quad, outdir, label):
"""
Method to create a parity plot (predicted vs. true values)
Args:
train_quad: (tuple), tuple containing 4 numpy arrays: true training y data, predicted training y data,
training metric data, and groups used in training
test_quad: (tuple), tuple containing 4 numpy arrays: true test y data, predicted test y data,
testing metric data, and groups used in testing
outdir: (str), path to save plots to
label: (str), label used for axis labeling
Returns:
None
"""
filenames = list()
y_train_true, y_train_pred, train_metrics, train_groups = train_quad
y_test_true, y_test_pred, test_metrics, test_groups = test_quad
# make diagonal line from absolute min to absolute max of any data point
# using round because Ryan did - but won't that ruin small numbers??? TODO this
#max1 = max(y_train_true.max(), y_train_pred.max(),
# y_test_true.max(), y_test_pred.max())
max1 = max(y_train_true.max(), y_test_true.max())
#min1 = min(y_train_true.min(), y_train_pred.min(),
# y_test_true.min(), y_test_pred.min())
min1 = min(y_train_true.min(), y_test_true.min())
max1 = round(float(max1), rounder(max1-min1))
min1 = round(float(min1), rounder(max1-min1))
for y_true, y_pred, stats, groups, title_addon in \
(train_quad+('train',), test_quad+('test',)):
# make fig and ax, use x_align when placing text so things don't overlap
x_align=0.64
fig, ax = make_fig_ax(x_align=x_align)
# set tick labels
# notice that we use the same max and min for all three. Don't
# calculate those inside the loop, because all the should be on the same scale and axis
_set_tick_labels(ax, max1, min1)
# plot diagonal line
ax.plot([min1, max1], [min1, max1], 'k--', lw=2, zorder=1)
# do the actual plotting
if groups is None:
ax.scatter(y_true, y_pred, color='blue', edgecolors='black', s=100, zorder=2, alpha=0.7)
else:
handles = dict()
unique_groups = np.unique(np.concatenate((train_groups, test_groups), axis=0))
unique_groups_train = np.unique(train_groups)
unique_groups_test = np.unique(test_groups)
#logger.debug(' '*12 + 'unique groups: ' +str(list(unique_groups)))
colors = ['blue', 'red', 'green', 'purple', 'orange', 'black']
markers = ['o', 'v', '^', 's', 'p', 'h', 'D', '*', 'X', '<', '>', 'P']
colorcount = markercount = 0
for groupcount, group in enumerate(unique_groups):
mask = groups == group
#logger.debug(' '*12 + f'{group} group_percent = {np.count_nonzero(mask) / len(groups)}')
handles[group] = ax.scatter(y_true[mask], y_pred[mask], label=group, color=colors[colorcount],
marker=markers[markercount], s=100, alpha=0.7)
colorcount += 1
if colorcount % len(colors) == 0:
markercount += 1
colorcount = 0
if title_addon == 'train':
to_delete = [k for k in handles.keys() if k not in unique_groups_train]
for k in to_delete:
del handles[k]
elif title_addon == 'test':
to_delete = [k for k in handles.keys() if k not in unique_groups_test]
for k in to_delete:
del handles[k]
ax.legend(handles.values(), handles.keys(), loc='lower right', fontsize=12)
# set axis labels
ax.set_xlabel('True '+label, fontsize=16)
ax.set_ylabel('Predicted '+label, fontsize=16)
plot_stats(fig, stats, x_align=x_align, y_align=0.90)
filename = 'predicted_vs_true_'+ title_addon + '.png'
filenames.append(filename)
fig.savefig(join(outdir, filename), dpi=DPI, bbox_inches='tight')
df = pd.DataFrame({'y_pred': y_pred, 'y_true': y_true})
df.to_csv(join(outdir, 'predicted_vs_true_' + title_addon + '.csv'))
return filenames
def plot_scatter(x, y, savepath, groups=None, xlabel='x', label='target data'):
"""
Method to create a general scatter plot
Args:
x: (numpy array), array of x data
y: (numpy array), array of y data
savepath: (str), path to save plots to
groups: (list), list of group labels
xlabel: (str), label used for x-axis labeling
label: (str), label used for y-axis labeling
Returns:
None
"""
# Set image aspect ratio:
fig, ax = make_fig_ax()
# set tick labels
max_tick_x = max(x)
min_tick_x = min(x)
max_tick_y = max(y)
min_tick_y = min(y)
max_tick_x = round(float(max_tick_x), rounder(max_tick_x-min_tick_x))
min_tick_x = round(float(min_tick_x), rounder(max_tick_x-min_tick_x))
max_tick_y = round(float(max_tick_y), rounder(max_tick_y-min_tick_y))
min_tick_y = round(float(min_tick_y), rounder(max_tick_y-min_tick_y))
#divisor_y = get_divisor(max(y), min(y))
#max_tick_y = round_up(max(y), divisor_y)
#min_tick_y = round_down(min(y), divisor_y)
_set_tick_labels_different(ax, max_tick_x, min_tick_x, max_tick_y, min_tick_y)
if groups is None:
ax.scatter(x, y, c='b', edgecolor='darkblue', zorder=2, s=100, alpha=0.7)
else:
colors = ['blue', 'red', 'green', 'purple', 'orange', 'black']
markers = ['o', 'v', '^', 's', 'p', 'h', 'D', '*', 'X', '<', '>', 'P']
colorcount = markercount = 0
for groupcount, group in enumerate(np.unique(groups)):
mask = groups == group
ax.scatter(x[mask], y[mask], label=group, color=colors[colorcount], marker=markers[markercount], s=100, alpha=0.7)
ax.legend(loc='lower right', fontsize=12)
colorcount += 1
if colorcount % len(colors) == 0:
markercount += 1
colorcount = 0
ax.set_xlabel(xlabel, fontsize=16)
ax.set_ylabel('Value of '+label, fontsize=16)
#ax.set_xticklabels(rotation=45)
fig.savefig(savepath, dpi=DPI, bbox_inches='tight')
def plot_keras_history(model_history, savepath, plot_type):
# Set image aspect ratio:
fig, ax = make_fig_ax()
keys = model_history.history.keys()
for k in keys:
if 'loss' not in k and 'val' not in k:
metric = k
accuracy = model_history.history[str(metric)]
loss = model_history.history['loss']
if plot_type == 'accuracy':
ax.plot(accuracy, label='training '+str(metric))
ax.set_ylabel(str(metric)+' (Accuracy)', fontsize=16)
try:
validation_accuracy = model_history.history['val_'+str(metric)]
ax.plot(validation_accuracy, label='validation '+str(metric))
except:
pass
if plot_type == 'loss':
ax.plot(loss, label='training loss')
ax.set_ylabel(str(metric)+' (Loss)', fontsize=16)
try:
validation_loss = model_history.history['val_loss']
ax.plot(validation_loss, label='validation loss')
except:
pass
ax.legend(loc='upper right', fontsize=12)
#_set_tick_labels_different(ax, max_tick_x, min_tick_x, max_tick_y, min_tick_y)
ax.set_xlabel('Epochs', fontsize=16)
fig.savefig(savepath, dpi=DPI, bbox_inches='tight')
return
@ipynb_maker
def plot_best_worst_split(y_true, best_run, worst_run, savepath,
title='Best Worst Overlay', label='target_value'):
"""
Method to create a parity plot (predicted vs. true values) of just the best scoring and worst scoring CV splits
Args:
y_true: (numpy array), array of true y data
best_run: (dict), the best scoring split_result from mastml_driver
worst_run: (dict), the worst scoring split_result from mastml_driver
savepath: (str), path to save plots to
title: (str), title of the best_worst_split plot
label: (str), label used for axis labeling
Returns:
None
"""
# make fig and ax, use x_align when placing text so things don't overlap
x_align = 0.64
fig, ax = make_fig_ax(x_align=x_align)
maxx = max(y_true) # TODO is round the right thing here?
minn = min(y_true)
maxx = round(float(maxx), rounder(maxx-minn))
minn = round(float(minn), rounder(maxx-minn))
ax.plot([minn, maxx], [minn, maxx], 'k--', lw=2, zorder=1)
# set tick labels
_set_tick_labels(ax, maxx, minn)
# do the actual plotting
ax.scatter(best_run['y_test_true'], best_run['y_test_pred'], c='red',
alpha=0.7, label='best test', edgecolor='darkred', zorder=2, s=100)
ax.scatter(worst_run['y_test_true'], worst_run['y_test_pred'], c='blue',
alpha=0.7, label='worst test', edgecolor='darkblue', zorder=3, s=60)
ax.legend(loc='lower right', fontsize=12)
# set axis labels
ax.set_xlabel('True '+label, fontsize=16)
ax.set_ylabel('Predicted '+label, fontsize=16)
#font_dict = {'size' : 10, 'family' : 'sans-serif'}
# Duplicate the stats dicts with an additional label
best_stats = OrderedDict([('Best Run', None)])
best_stats.update(best_run['test_metrics'])
worst_stats = OrderedDict([('worst Run', None)])
worst_stats.update(worst_run['test_metrics'])
plot_stats(fig, best_stats, x_align=x_align, y_align=0.90)
plot_stats(fig, worst_stats, x_align=x_align, y_align=0.60)
fig.savefig(savepath + '.png', dpi=DPI, bbox_inches='tight')
df_best = pd.DataFrame({'best run pred': best_run['y_test_pred'], 'best run true': best_run['y_test_true']})
df_worst = pd.DataFrame({'worst run pred': worst_run['y_test_pred'], 'worst run true': worst_run['y_test_true']})
df_best.to_csv(savepath + '_best.csv')
df_worst.to_csv(savepath + '_worst.csv')
@ipynb_maker
def plot_best_worst_per_point(y_true, y_pred_list, savepath, metrics_dict,
avg_stats, title='best worst per point', label='target_value'):
"""
Method to create a parity plot (predicted vs. true values) of the set of best and worst CV scores for each
individual data point.
Args:
y_true: (numpy array), array of true y data
y_pred_list: (list), list of numpy arrays containing predicted y data for each CV split
savepath: (str), path to save plots to
metrics_dict: (dict), dict of scikit-learn metric objects to calculate score of predicted vs. true values
avg_stats: (dict), dict of calculated average metrics over all CV splits
title: (str), title of the best_worst_per_point plot
label: (str), label used for axis labeling
Returns:
None
"""
worsts = []
bests = []
new_y_true = []
for yt, y_pred in zip(y_true, y_pred_list):
if len(y_pred) == 0 or np.nan in y_pred_list or yt == np.nan:
continue
worsts.append(max(y_pred, key=lambda yp: abs(yp-yt)))
bests.append( min(y_pred, key=lambda yp: abs(yp-yt)))
new_y_true.append(yt)
worst_stats = OrderedDict([('Worst combined:', None)])
best_stats = OrderedDict([('Best combined:', None)])
for name, (_, func) in metrics_dict.items():
worst_stats[name] = func(new_y_true, worsts)
best_stats[name] = func(new_y_true, bests)
# make fig and ax, use x_align when placing text so things don't overlap
x_align = 15.5/24 #mmm yum
fig, ax = make_fig_ax(x_align=x_align)
# gather max and min
#all_vals = [val for val in worsts+bests if val is not None]
max1 = max(y_true)
min1 = min(y_true)
# draw dashed horizontal line
ax.plot([min1, max1], [min1, max1], 'k--', lw=2, zorder=1)
# set axis labels
ax.set_xlabel('True '+label, fontsize=16)
ax.set_ylabel('Predicted '+label, fontsize=16)
# set tick labels
#maxx = max((max(bests), max(worsts), max(new_y_true)))
#minn = min((min(bests), min(worsts), min(new_y_true)))
#maxx, minn = recursive_max_and_min([bests, worsts, new_y_true])
maxx = round(float(max1), rounder(max1-min1))
minn = round(float(min1), rounder(max1-min1))
_set_tick_labels(ax, maxx, minn)
ax.scatter(new_y_true, bests, c='red', alpha=0.7, label='best test',
edgecolor='darkred', zorder=2, s=100)
ax.scatter(new_y_true, worsts, c='blue', alpha=0.7, label='worst test',
edgecolor='darkblue', zorder=3, s=60)
ax.legend(loc='lower right', fontsize=12)
plot_stats(fig, avg_stats, x_align=x_align, y_align=0.51, fontsize=10)
plot_stats(fig, worst_stats, x_align=x_align, y_align=0.73, fontsize=10)
plot_stats(fig, best_stats, x_align=x_align, y_align=0.95, fontsize=10)
fig.savefig(savepath + '.png', dpi=DPI, bbox_inches='tight')
df = pd.DataFrame({'y true': new_y_true,
'best per point': bests,
'worst per point': worsts})
df.to_csv(savepath + '.csv')
@ipynb_maker
def plot_predicted_vs_true_bars(y_true, y_pred_list, avg_stats,
savepath, title='best worst with bars', label='target_value', groups=None):
"""
Method to calculate parity plot (predicted vs. true) of average predictions, averaged over all CV splits, with error
bars on each point corresponding to the standard deviation of the predicted values over all CV splits.
Args:
y_true: (numpy array), array of true y data
y_pred_list: (list), list of numpy arrays containing predicted y data for each CV split
avg_stats: (dict), dict of calculated average metrics over all CV splits
savepath: (str), path to save plots to
title: (str), title of the best_worst_per_point plot
label: (str), label used for axis labeling
Returns:
None
"""
means = [nice_mean(y_pred) for y_pred in y_pred_list]
standard_error_means = [nice_std(y_pred)/np.sqrt(len(y_pred))
for y_pred in y_pred_list]
standard_errors = [nice_std(y_pred) for y_pred in y_pred_list]
# make fig and ax, use x_align when placing text so things don't overlap
x_align = 0.64
fig, ax = make_fig_ax(x_align=x_align)
# gather max and min
max1 = max(np.nanmax(y_true), np.nanmax(means))
min1 = min(np.nanmin(y_true), np.nanmin(means))
# draw dashed horizontal line
ax.plot([min1, max1], [min1, max1], 'k--', lw=2, zorder=1)
# set axis labels
ax.set_xlabel('True '+label, fontsize=16)
ax.set_ylabel('Predicted '+label, fontsize=16)
# set tick labels
#maxx, minn = recursive_max_and_min([means, y_true])
maxx = max(y_true)
minn = min(y_true)
maxx = round(float(maxx), rounder(maxx-minn))
minn = round(float(minn), rounder(maxx-minn))
#print(maxx, minn, rounder(maxx - minn))
_set_tick_labels(ax, maxx, minn)
if groups is None:
ax.errorbar(y_true, means, yerr=standard_errors, fmt='o', markerfacecolor='blue', markeredgecolor='black', markersize=10,
alpha=0.7, capsize=3)
else:
colors = ['blue', 'red', 'green', 'purple', 'orange', 'black']
markers = ['o', 'v', '^', 's', 'p', 'h', 'D', '*', 'X', '<', '>', 'P']
colorcount = markercount = 0
handles = dict()
unique_groups = np.unique(groups)
for groupcount, group in enumerate(unique_groups):
mask = groups == group
# logger.debug(' '*12 + f'{group} group_percent = {np.count_nonzero(mask) / len(groups)}')
handles[group] = ax.errorbar(y_true[mask], np.array(means)[mask], yerr=np.array(standard_errors)[mask],
marker=markers[markercount], markerfacecolor=colors[colorcount],
markeredgecolor=colors[colorcount], ecolor=colors[colorcount],
markersize=10, alpha=0.7, capsize=3, fmt='o')
colorcount += 1
if colorcount % len(colors) == 0:
markercount += 1
colorcount = 0
ax.legend(handles.values(), handles.keys(), loc='lower right', fontsize=10)
plot_stats(fig, avg_stats, x_align=x_align, y_align=0.90)
fig.savefig(savepath + '.png', dpi=DPI, bbox_inches='tight')
df = pd.DataFrame({'y true': y_true,
'average predicted values': means,
'error bar values': standard_errors})
df.to_csv(savepath + '.csv')
@ipynb_maker
def plot_metric_vs_group(metric, groups, stats, avg_stats, savepath):
"""
Method to plot the value of a particular calculated metric (e.g. RMSE, R^2, etc) for each data group
Args:
metric: (str), name of a calculation metric
groups: (numpy array), array of group names
stats: (dict), dict of training or testing statistics for a particular run
avg_stats: (dict), dict of calculated average metrics over all CV splits
savepath: (str), path to save plots to
Returns:
None
"""
# make fig and ax, use x_align when placing text so things don't overlap
x_align = 0.64
fig, ax = make_fig_ax(x_align=x_align)
# do the actual plotting
ax.scatter(groups, stats, c='blue', alpha=0.7, edgecolor='darkblue', zorder=2, s=100)
# set axis labels
ax.set_xlabel('Group', fontsize=16)
ax.set_ylabel(metric, fontsize=16)
ax.set_xticklabels(labels=groups, fontsize=14)
plot_stats(fig, avg_stats, x_align=x_align, y_align=0.90)
# Save data stats to csv
savepath_parse = savepath.split(str(metric)+'_vs_group.png')[0]
pd.DataFrame(groups, stats).to_csv(os.path.join(savepath_parse, str(metric)+'_vs_group.csv'))
fig.savefig(savepath, dpi=DPI, bbox_inches='tight')
return
@ipynb_maker
def plot_metric_vs_group_size(metric, groups, stats, avg_stats, savepath):
"""
Method to plot the value of a particular calculated metric (e.g. RMSE, R^2, etc) as a function of the size of each group.
Args:
metric: (str), name of a calculation metric
groups: (numpy array), array of group names
stats: (dict), dict of training or testing statistics for a particular run
avg_stats: (dict), dict of calculated average metrics over all CV splits
savepath: (str), path to save plots to
Returns:
None
"""
# make fig and ax, use x_align when placing text so things don't overlap
x_align = 0.64
fig, ax = make_fig_ax(x_align=x_align)
# Get unique groups from full group array
unique_groups = np.unique(groups)
# Get the size of each group
group_lengths = list()
for group in unique_groups:
group_lengths.append(len(np.concatenate(np.where(groups==group)).tolist()))
# do the actual plotting
ax.scatter(group_lengths, stats, c='blue', alpha=0.7, edgecolor='darkblue', zorder=2, s=100)
# set axis labels
ax.set_xlabel('Group size', fontsize=16)
ax.set_ylabel(metric, fontsize=16)
#ax.set_xticklabels(labels=group_lengths, fontsize=14)
plot_stats(fig, avg_stats, x_align=x_align, y_align=0.90)
# Save data stats to csv
savepath_parse = savepath.split(str(metric)+'_vs_group_size.png')[0]
pd.DataFrame(group_lengths, stats).to_csv(os.path.join(savepath_parse, str(metric)+'_vs_group_size.csv'))
fig.savefig(savepath, dpi=DPI, bbox_inches='tight')
return
# Credit to: http://contrib.scikit-learn.org/forest-confidence-interval/_modules/forestci/forestci.html
def calc_inbag_modified(n_samples, forest, is_ensemble):
"""
Derive samples used to create trees in scikit-learn RandomForest objects.
Recovers the samples in each tree from the random state of that tree using
:func:`forest._generate_sample_indices`.
Parameters
----------
n_samples : int
The number of samples used to fit the scikit-learn RandomForest object.
forest : RandomForest
Regressor or Classifier object that is already fit by scikit-learn.
Returns
-------
Array that records how many times a data point was placed in a tree.
Columns are individual trees. Rows are the number of times a sample was
used in a tree.
"""
if not forest.bootstrap:
e_s = "Cannot calculate the inbag from a forest that has "
e_s = " bootstrap=False"
raise ValueError(e_s)
n_trees = forest.n_estimators
inbag = np.zeros((n_samples, n_trees))
sample_idx = []
n_samples_bootstrap = _get_n_samples_bootstrap(
n_samples, n_samples
)
for t_idx in range(n_trees):
if not is_ensemble:
sample_idx.append(
_generate_sample_indices(forest.estimators_[t_idx].random_state,
n_samples, n_samples_bootstrap))
inbag[:, t_idx] = np.bincount(sample_idx[-1], minlength=n_samples)
else:
sample_idx = forest.bootstrapped_idxs[t_idx]
inbag[:, t_idx] = np.bincount(sample_idx, minlength=n_samples)
return inbag
# Credit to: http://contrib.scikit-learn.org/forest-confidence-interval/_modules/forestci/forestci.html
def random_forest_error_modified(forest, is_ensemble, X_train, X_test, basic_IJ=False,inbag=None,
calibrate=True, memory_constrained=False,
memory_limit=None):
"""
Calculate error bars from scikit-learn RandomForest estimators.
RandomForest is a regressor or classifier object
this variance can be used to plot error bars for RandomForest objects
Parameters
----------
forest : RandomForest
Regressor or Classifier object.
X_train : ndarray
An array with shape (n_train_sample, n_features). The design matrix for
training data.
X_test : ndarray
An array with shape (n_test_sample, n_features). The design matrix
for testing data
basic_IJ : boolean, optional
Return the value of basic infinitesimal jackknife or Monte Carlo
corrected infinitesimal jackknife.
inbag : ndarray, optional
The inbag matrix that fit the data. If set to `None` (default) it
will be inferred from the forest. However, this only works for trees
for which bootstrapping was set to `True`. That is, if sampling was
done with replacement. Otherwise, users need to provide their own
inbag matrix.
calibrate: boolean, optional
Whether to apply calibration to mitigate Monte Carlo noise.
Some variance estimates may be negative due to Monte Carlo effects if
the number of trees in the forest is too small. To use calibration,
Default: True
memory_constrained: boolean, optional
Whether or not there is a restriction on memory. If False, it is
assumed that a ndarry of shape (n_train_sample,n_test_sample) fits
in main memory. Setting to True can actually provide a speed up if
memory_limit is tuned to the optimal range.
memory_limit: int, optional.
An upper bound for how much memory the itermediate matrices will take
up in Megabytes. This must be provided if memory_constrained=True.
Returns
-------
An array with the unbiased sampling variance (V_IJ_unbiased)
for a RandomForest object.
See Also
----------
:func:`calc_inbag`
Notes
-----
The calculation of error is based on the infinitesimal jackknife variance,
as described in [Wager2014]_ and is a Python implementation of the R code
provided at: https://github.com/swager/randomForestCI
.. [Wager2014] <NAME>, <NAME>, <NAME>. "Confidence Intervals for
Random Forests: The Jackknife and the Infinitesimal Jackknife", Journal
of Machine Learning Research vol. 15, pp. 1625-1651, 2014.
"""
if inbag is None:
inbag = calc_inbag_modified(X_train.shape[0], forest, is_ensemble)
if not is_ensemble:
pred = np.array([tree.predict(X_test) for tree in forest]).T
else:
pred = np.array([tree.predict(X_test) for tree in forest.model]).T
pred = pred[0]
pred_mean = np.mean(pred, 0)
pred_centered = pred - pred_mean
n_trees = forest.n_estimators
V_IJ = fci._core_computation(X_train, X_test, inbag, pred_centered, n_trees,
memory_constrained, memory_limit)
V_IJ_unbiased = fci._bias_correction(V_IJ, inbag, pred_centered, n_trees)
# Correct for cases where resampling is done without replacement:
if np.max(inbag) == 1:
variance_inflation = 1 / (1 - np.mean(inbag)) ** 2
V_IJ_unbiased *= variance_inflation
if basic_IJ:
return V_IJ
if not calibrate:
return V_IJ_unbiased
if V_IJ_unbiased.shape[0] <= 20:
print("No calibration with n_samples <= 20")
return V_IJ_unbiased
if calibrate:
calibration_ratio = 2
n_sample = np.ceil(n_trees / calibration_ratio)
new_forest = copy.deepcopy(forest)
if not is_ensemble:
new_forest.estimators_ =\
np.random.permutation(new_forest.estimators_)[:int(n_sample)]
else:
new_forest.model =\
np.random.permutation(new_forest.model)[:int(n_sample)]
new_forest.n_estimators = int(n_sample)
results_ss = random_forest_error_modified(new_forest, is_ensemble, X_train, X_test,
calibrate=False,
memory_constrained=memory_constrained,
memory_limit=memory_limit)
# Use this second set of variance estimates
# to estimate scale of Monte Carlo noise
sigma2_ss = np.mean((results_ss - V_IJ_unbiased)**2)
delta = n_sample / n_trees
sigma2 = (delta**2 + (1 - delta)**2) / (2 * (1 - delta)**2) * sigma2_ss
# Use Monte Carlo noise scale estimate for empirical Bayes calibration
V_IJ_calibrated = fci.calibration.calibrateEB(V_IJ_unbiased, sigma2)
return V_IJ_calibrated
def prediction_intervals(model, X, rf_error_method, rf_error_percentile, Xtrain, Xtest):
"""
Method to calculate prediction intervals when using Random Forest and Gaussian Process regression models.
Prediction intervals for random forest adapted from https://blog.datadive.net/prediction-intervals-for-random-forests/
Args:
model: (scikit-learn model/estimator object), a scikit-learn model object
X: (numpy array), array of X features
method: (str), type of error bar to formulate (e.g. "stdev" is standard deviation of predicted errors, "confint"
is error bar as confidence interval
percentile: (int), percentile for which to form error bars
Returns:
err_up: (list), list of upper bounds of error bars for each data point
err_down: (list), list of lower bounds of error bars for each data point
"""
err_down = list()
err_up = list()
nan_indices = list()
indices_TF = list()
X_aslist = X.values.tolist()
if model.__class__.__name__ in ['RandomForestRegressor', 'GradientBoostingRegressor', 'ExtraTreesRegressor', 'EnsembleRegressor']:
if rf_error_method == 'jackknife_calibrated':
if 'EnsembleRegressor' in model.__class__.__name__:
rf_variances = random_forest_error_modified(model, True, X_train=Xtrain, X_test=Xtest, basic_IJ=False, calibrate=True)
else:
rf_variances = random_forest_error_modified(model, False, X_train=Xtrain, X_test=Xtest, basic_IJ=False, calibrate=True)
rf_stdevs = np.sqrt(rf_variances)
nan_indices = np.where(np.isnan(rf_stdevs))
nan_indices_sorted = np.array(sorted(nan_indices[0], reverse=True))
for i, val in enumerate(list(rf_stdevs)):
if i in nan_indices_sorted:
indices_TF.append(False)
else:
indices_TF.append(True)
rf_stdevs = rf_stdevs[~np.isnan(rf_stdevs)]
err_up = err_down = rf_stdevs
elif rf_error_method == 'jackknife_uncalibrated':
if 'EnsembleRegressor' in model.__class__.__name__:
rf_variances = random_forest_error_modified(model, True, X_train=Xtrain, X_test=Xtest, basic_IJ=False, calibrate=False)
else:
rf_variances = random_forest_error_modified(model, False, X_train=Xtrain, X_test=Xtest, basic_IJ=False, calibrate=False)
rf_stdevs = np.sqrt(rf_variances)
nan_indices = np.where(np.isnan(rf_stdevs))
nan_indices_sorted = np.array(sorted(nan_indices[0], reverse=True))
for i, val in enumerate(list(rf_stdevs)):
if i in nan_indices_sorted:
indices_TF.append(False)
else:
indices_TF.append(True)
rf_stdevs = rf_stdevs[~np.isnan(rf_stdevs)]
err_up = err_down = rf_stdevs
elif rf_error_method == 'jackknife_basic':
if 'EnsembleRegressor' in model.__class__.__name__:
rf_variances = random_forest_error_modified(model, True, X_train=Xtrain, X_test=Xtest, basic_IJ=True, calibrate=False)
else:
rf_variances = random_forest_error_modified(model, False, X_train=Xtrain, X_test=Xtest, basic_IJ=True, calibrate=False)
rf_stdevs = np.sqrt(rf_variances)
nan_indices = np.where(np.isnan(rf_stdevs))
nan_indices_sorted = np.array(sorted(nan_indices[0], reverse=True))
for i, val in enumerate(list(rf_stdevs)):
if i in nan_indices_sorted:
indices_TF.append(False)
else:
indices_TF.append(True)
rf_stdevs = rf_stdevs[~np.isnan(rf_stdevs)]
err_up = err_down = rf_stdevs
else:
for x in range(len(X_aslist)):
preds = list()
if model.__class__.__name__ == 'RandomForestRegressor':
for pred in model.estimators_:
preds.append(pred.predict(np.array(X_aslist[x]).reshape(1,-1))[0])
elif model.__class__.__name__ == 'GradientBoostingRegressor':
for pred in model.estimators_.tolist():
preds.append(pred[0].predict(np.array(X_aslist[x]).reshape(1,-1))[0])
elif model.__class__.__name__ == 'EnsembleRegressor':
for pred in model.model:
preds.append(pred.predict(np.array(X_aslist[x]).reshape(1,-1))[0])
if rf_error_method == 'confint':
#e_down = np.percentile(preds, (100 - int(rf_error_percentile)) / 2.)
#e_up = np.percentile(preds, 100 - (100 - int(rf_error_percentile)) / 2.)
e_down = np.percentile(preds, float(rf_error_percentile))
e_up = np.percentile(preds, float(rf_error_percentile))
elif rf_error_method == 'stdev':
e_down = np.std(preds)
e_up = np.std(preds)
elif rf_error_method == 'False' or rf_error_method is False:
# basically default to stdev
e_down = np.std(preds)
e_up = np.std(preds)
else:
raise ValueError('rf_error_method must be one of ["stdev", "confint", "jackknife_basic", "jackknife_calibrated", "jackknife_uncalibrated"]')
#if e_up == 0.0:
# e_up = 10 ** 10
#if e_down == 0.0:
# e_down = 10 ** 10
err_down.append(e_down)
err_up.append(e_up)
nan_indices = np.where(np.isnan(err_up))
nan_indices_sorted = np.array(sorted(nan_indices[0], reverse=True))
for i, val in enumerate(list(err_up)):
if i in nan_indices_sorted:
indices_TF.append(False)
else:
indices_TF.append(True)
if model.__class__.__name__=='GaussianProcessRegressor':
preds = model.predict(X, return_std=True)[1] # Get the stdev model error from the predictions of GPR
err_up = preds
err_down = preds
nan_indices = np.where(np.isnan(err_up))
nan_indices_sorted = np.array(sorted(nan_indices[0], reverse=True))
for i, val in enumerate(list(err_up)):
if i in nan_indices_sorted:
indices_TF.append(False)
else:
indices_TF.append(True)
return err_down, err_up, nan_indices, np.array(indices_TF)
@ipynb_maker
def plot_normalized_error(y_true, y_pred, savepath, model, rf_error_method, rf_error_percentile, X=None, Xtrain=None,
Xtest=None):
"""
Method to plot the normalized residual errors of a model prediction
Args:
y_true: (numpy array), array containing the true y data values
y_pred: (numpy array), array containing the predicted y data values
savepath: (str), path to save the plotted normalized error plot
model: (scikit-learn model/estimator object), a scikit-learn model object
X: (numpy array), array of X features
avg_stats: (dict), dict of calculated average metrics over all CV splits
Returns:
None
"""
path = os.path.dirname(savepath)
# Here: if model is random forest or Gaussian process, get real error bars. Else, just residuals
model_name = model.__class__.__name__
# TODO: also add support for Gradient Boosted Regressor
models_with_error_predictions = ['RandomForestRegressor', 'GaussianProcessRegressor', 'GradientBoostingRegressor', 'EnsembleRegressor']
has_model_errors = False
y_pred_ = y_pred
y_true_ = y_true
if model_name in models_with_error_predictions:
has_model_errors = True
err_down, err_up, nan_indices, indices_TF = prediction_intervals(model, X, rf_error_method=rf_error_method,
rf_error_percentile=rf_error_percentile, Xtrain=Xtrain, Xtest=Xtest)
# Correct for nan indices being present
if has_model_errors:
y_pred_ = y_pred_[indices_TF]
y_true_ = y_true_[indices_TF]
x_align = 0.64
fig, ax = make_fig_ax(x_align=x_align)
mu = 0
sigma = 1
residuals = (y_true_ - y_pred_)
normalized_residuals = (y_true_-y_pred_)/np.std(y_true_-y_pred_)
density_residuals = gaussian_kde(normalized_residuals)
x = np.linspace(mu - 5 * sigma, mu + 5 * sigma, y_true_.shape[0])
ax.plot(x, norm.pdf(x, mu, sigma), linewidth=4, color='blue', label="Analytical Gaussian")
ax.plot(x, density_residuals(x), linewidth=4, color='green', label="Model Residuals")
maxx = 5
minn = -5
if has_model_errors:
err_avg = [(abs(e1)+abs(e2))/2 for e1, e2 in zip(err_up, err_down)]
err_avg = np.asarray(err_avg)
err_avg[err_avg==0.0] = 0.0001
err_avg = err_avg.tolist()
model_errors = (y_true_-y_pred_)/err_avg
density_errors = gaussian_kde(model_errors)
maxy = max(max(density_residuals(x)), max(norm.pdf(x, mu, sigma)), max(density_errors(x)))
miny = min(min(density_residuals(x)), min(norm.pdf(x, mu, sigma)), max(density_errors(x)))
ax.plot(x, density_errors(x), linewidth=4, color='purple', label="Model Errors")
# Save data to csv file
data_dict = {"Y True": y_true_, "Y Pred": y_pred_, "Plotted x values": x, "error_bars_up": err_up,
"error_bars_down": err_down, "error_avg": err_avg,
"analytical gaussian (plotted y blue values)": norm.pdf(x, mu, sigma),
"model residuals": residuals,
"model normalized residuals (plotted y green values)": density_residuals(x),
"model errors (plotted y purple values)": density_errors(x)}
pd.DataFrame(data_dict).to_csv(savepath.split('.png')[0]+'.csv')
else:
# Save data to csv file
data_dict = {"Y True": y_true, "Y Pred": y_pred, "x values": x, "analytical gaussian": norm.pdf(x, mu, sigma),
"model residuals": density_residuals(x)}
pd.DataFrame(data_dict).to_csv(savepath.split('.png')[0]+'.csv')
maxy = max(max(density_residuals(x)), max(norm.pdf(x, mu, sigma)))
miny = min(min(density_residuals(x)), min(norm.pdf(x, mu, sigma)))
ax.legend(loc=0, fontsize=12, frameon=False)
ax.set_xlabel(r"$\mathrm{x}/\mathit{\sigma}$", fontsize=18)
ax.set_ylabel("Probability density", fontsize=18)
_set_tick_labels_different(ax, maxx, minn, maxy, miny)
fig.savefig(savepath, dpi=DPI, bbox_inches='tight')
return
@ipynb_maker
def plot_cumulative_normalized_error(y_true, y_pred, savepath, model, rf_error_method, rf_error_percentile, X=None,
Xtrain=None, Xtest=None):
"""
Method to plot the cumulative normalized residual errors of a model prediction
Args:
y_true: (numpy array), array containing the true y data values
y_pred: (numpy array), array containing the predicted y data values
savepath: (str), path to save the plotted cumulative normalized error plot
model: (scikit-learn model/estimator object), a scikit-learn model object
X: (numpy array), array of X features
avg_stats: (dict), dict of calculated average metrics over all CV splits
Returns:
None
"""
# Here: if model is random forest or Gaussian process, get real error bars. Else, just residuals
model_name = model.__class__.__name__
models_with_error_predictions = ['RandomForestRegressor', 'GaussianProcessRegressor', 'GradientBoostingRegressor', 'EnsembleRegressor']
has_model_errors = False
y_pred_ = y_pred
y_true_ = y_true
if model_name in models_with_error_predictions:
has_model_errors = True
err_down, err_up, nan_indices, indices_TF = prediction_intervals(model, X, rf_error_method=rf_error_method,
rf_error_percentile=rf_error_percentile, Xtrain=Xtrain, Xtest=Xtest)
# Need to remove NaN's before plotting. These will be present when doing validation runs. Note NaN's only show up in y_pred_
# Correct for nan indices being present
if has_model_errors:
y_pred_ = y_pred_[indices_TF]
y_true_ = y_true_[indices_TF]
y_true_ = y_true_[~np.isnan(y_pred_)]
y_pred_ = y_pred_[~np.isnan(y_pred_)]
x_align = 0.64
fig, ax = make_fig_ax(x_align=x_align)
analytic_gau = np.random.normal(0, 1, 10000)
analytic_gau = abs(analytic_gau)
n_analytic = np.arange(1, len(analytic_gau) + 1) / np.float(len(analytic_gau))
X_analytic = np.sort(analytic_gau)
residuals = y_true_-y_pred_
normalized_residuals = abs((y_true_-y_pred_)/np.std(y_true_-y_pred_))
n_residuals = np.arange(1, len(normalized_residuals) + 1) / np.float(len(normalized_residuals))
X_residuals = np.sort(normalized_residuals) #r"$\mathrm{Predicted \/ Value}, \mathit{eV}$"
ax.set_xlabel(r"$\mathrm{x}/\mathit{\sigma}$", fontsize=18)
ax.set_ylabel("Fraction", fontsize=18)
ax.step(X_residuals, n_residuals, linewidth=3, color='green', label="Model Residuals")
ax.step(X_analytic, n_analytic, linewidth=3, color='blue', label="Analytical Gaussian")
ax.set_xlim([0, 5])
if has_model_errors:
err_avg = [(abs(e1)+abs(e2))/2 for e1, e2 in zip(err_up, err_down)]
err_avg = np.asarray(err_avg)
err_avg[err_avg==0.0] = 0.0001
err_avg = err_avg.tolist()
model_errors = abs((y_true_-y_pred_)/err_avg)
n_errors = np.arange(1, len(model_errors) + 1) / np.float(len(model_errors))
X_errors = np.sort(model_errors)
ax.step(X_errors, n_errors, linewidth=3, color='purple', label="Model Errors")
# Save data to csv file
data_dict = {"Y True": y_true, "Y Pred": y_pred, "Analytical Gaussian values": analytic_gau, "Analytical Gaussian (sorted, blue data)": X_analytic,
"model residuals": residuals,
"Model normalized residuals": normalized_residuals, "Model Residuals (sorted, green data)": X_residuals,
"error_bars_up": err_up, "error_bars_down": err_down,
"Model error values (r value: (ytrue-ypred)/(model error avg))": model_errors,
"Model errors (sorted, purple values)": X_errors}
# Save this way to avoid issue with different array sizes in data_dict
df = pd.DataFrame.from_dict(data_dict, orient='index')
df = df.transpose()
df.to_csv(savepath.split('.png')[0]+'.csv', index=False)
else:
# Save data to csv file
data_dict = {"Y True": y_true, "Y Pred": y_pred, "x analytical": X_analytic, "analytical gaussian": n_analytic, "x residuals": X_residuals,
"model residuals": n_residuals}
# Save this way to avoid issue with different array sizes in data_dict
df = pd.DataFrame.from_dict(data_dict, orient='index')
df = df.transpose()
df.to_csv(savepath.split('.png')[0]+'.csv', index=False)
ax.legend(loc=0, fontsize=14, frameon=False)
xlabels = np.linspace(2, 3, 3)
ylabels = np.linspace(0.9, 1, 2)
axin = zoomed_inset_axes(ax, 2.5, loc=7)
axin.step(X_residuals, n_residuals, linewidth=3, color='green', label="Model Residuals")
axin.step(X_analytic, n_analytic, linewidth=3, color='blue', label="Analytical Gaussian")
if has_model_errors:
axin.step(X_errors, n_errors, linewidth=3, color='purple', label="Model Errors")
axin.set_xticklabels(xlabels, fontsize=8, rotation=90)
axin.set_yticklabels(ylabels, fontsize=8)
axin.set_xlim([2, 3])
axin.set_ylim([0.9, 1])
maxx = 5
minn = 0
maxy = 1.1
miny = 0
_set_tick_labels_different(ax, maxx, minn, maxy, miny)
mark_inset(ax, axin, loc1=1, loc2=2)
fig.savefig(savepath, dpi=DPI, bbox_inches='tight')
return
@ipynb_maker
def plot_average_cumulative_normalized_error(y_true, y_pred, savepath, has_model_errors, err_avg=None):
"""
Method to plot the cumulative normalized residual errors of a model prediction
Args:
y_true: (numpy array), array containing the true y data values
y_pred: (numpy array), array containing the predicted y data values
savepath: (str), path to save the plotted cumulative normalized error plot
model: (scikit-learn model/estimator object), a scikit-learn model object
X: (numpy array), array of X features
avg_stats: (dict), dict of calculated average metrics over all CV splits
Returns:
None
"""
x_align = 0.64
fig, ax = make_fig_ax(x_align=x_align)
analytic_gau = np.random.normal(0, 1, 10000)
analytic_gau = abs(analytic_gau)
n_analytic = np.arange(1, len(analytic_gau) + 1) / np.float(len(analytic_gau))
X_analytic = np.sort(analytic_gau)
residuals = y_true-y_pred
residuals = residuals[~np.isnan(residuals)]
normalized_residuals = abs((y_true-y_pred)/np.std(y_true-y_pred))
n_residuals = np.arange(1, len(normalized_residuals) + 1) / np.float(len(normalized_residuals))
X_residuals = np.sort(normalized_residuals) #r"$\mathrm{Predicted \/ Value}, \mathit{eV}$"
ax.set_xlabel(r"$\mathrm{x}/\mathit{\sigma}$", fontsize=18)
ax.set_ylabel("Fraction", fontsize=18)
ax.step(X_residuals, n_residuals, linewidth=3, color='green', label="Model Residuals")
ax.step(X_analytic, n_analytic, linewidth=3, color='blue', label="Analytical Gaussian")
ax.set_xlim([0, 5])
if has_model_errors:
err_avg = np.asarray(err_avg)
err_avg[err_avg==0.0] = 0.0001
err_avg = err_avg.tolist()
model_errors = abs((y_true-y_pred)/err_avg)
model_errors = model_errors[~np.isnan(model_errors)]
n_errors = np.arange(1, len(model_errors) + 1) / np.float(len(model_errors))
X_errors = np.sort(model_errors)
ax.step(X_errors, n_errors, linewidth=3, color='purple', label="Model Errors")
# Save data to csv file
data_dict = {"Y True": y_true, "Y Pred": y_pred, "Analytical Gaussian values": analytic_gau,
"Analytical Gaussian (sorted, blue data)": X_analytic,
"model residuals": residuals,
"Model normalized residuals": normalized_residuals, "Model Residuals (sorted, green data)": X_residuals,
"Model error values (r value: (ytrue-ypred)/(model error avg))": model_errors,
"Model errors (sorted, purple values)": X_errors}
# Save this way to avoid issue with different array sizes in data_dict
df = pd.DataFrame.from_dict(data_dict, orient='index')
df = df.transpose()
df.to_csv(savepath.split('.png')[0]+'.csv', index=False)
else:
# Save data to csv file
data_dict = {"Y True": y_true, "Y Pred": y_pred, "x analytical": X_analytic, "analytical gaussian": n_analytic,
"x residuals": X_residuals, "model residuals": n_residuals}
# Save this way to avoid issue with different array sizes in data_dict
df = | pd.DataFrame.from_dict(data_dict, orient='index') | pandas.DataFrame.from_dict |
import pandas as pd
from sklearn.linear_model import LinearRegression
from sklearn.svm import SVR
from sklearn.tree import DecisionTreeRegressor
from sklearn.preprocessing import PolynomialFeatures
from sklearn.ensemble import AdaBoostRegressor
from sklearn.metrics import mean_squared_error as mae
from sklearn.model_selection import RepeatedKFold
import numpy as np
data = | pd.read_csv('C:\\Users\\<NAME>\\Documents\\Research Projects\\Forecast of Rainfall Quantity and its variation using Envrionmental Features\\Data\\Normalized & Combined Data\\All Districts.csv') | pandas.read_csv |
import pandas as pd
import numpy as np
from datetime import datetime
###############
# SELECT DATA #
###############
print("Selecting attributes...")
# GIT_COMMITS
gitCommits = pd.read_csv("../../data/raw/GIT_COMMITS.csv")
attributes = ['projectID', 'commitHash', 'author', 'committer', 'committerDate']
gitCommits = gitCommits[attributes]
gitCommits.to_csv('../../data/interim/DataPreparation/SelectData/GIT_COMMITS_select.csv', header=True)
# GIT_COMMITS_CHANGES
gitCommitsChanges = pd.read_csv("../../data/raw/GIT_COMMITS_CHANGES.csv")
attributes = ['projectID', 'commitHash', 'changeType', 'linesAdded', 'linesRemoved']
gitCommitsChanges = gitCommitsChanges[attributes]
gitCommitsChanges.to_csv('../../data/interim/DataPreparation/SelectData/GIT_COMMITS_CHANGES_select.csv', header=True)
# JIRA_ISSUES
jiraIssues = pd.read_csv("../../data/raw/JIRA_ISSUES.csv")
attributes = ['projectID', 'key', 'creationDate', 'resolutionDate', 'type', 'priority', 'assignee', 'reporter']
jiraIssues = jiraIssues[attributes]
jiraIssues.to_csv('../../data/interim/DataPreparation/SelectData/JIRA_ISSUES_select.csv', header=True)
# REFACTORING_MINER
refactoringMiner = pd.read_csv("../../data/raw/REFACTORING_MINER.csv")
attributes = ['projectID', 'commitHash', 'refactoringType']
refactoringMiner = refactoringMiner[attributes]
refactoringMiner.to_csv('../../data/interim/DataPreparation/SelectData/REFACTORING_MINER_select.csv', header=True)
# SONAR_ISSUES
sonarIssues = pd.read_csv("../../data/raw/SONAR_ISSUES.csv")
attributes = ['projectID', 'creationDate', 'closeDate', 'creationCommitHash', 'closeCommitHash', 'type', 'severity',
'debt', 'author']
sonarIssues = sonarIssues[attributes]
sonarIssues.to_csv('../../data/interim/DataPreparation/SelectData/SONAR_ISSUES_select.csv', header=True)
# SONAR_MEASURES
sonarMeasures = pd.read_csv("../../data/raw/SONAR_MEASURES.csv")
attributes = ['commitHash', 'projectID', 'functions', 'commentLinesDensity', 'complexity', 'functionComplexity', 'duplicatedLinesDensity',
'violations', 'blockerViolations', 'criticalViolations', 'infoViolations', 'majorViolations', 'minorViolations', 'codeSmells',
'bugs', 'vulnerabilities', 'cognitiveComplexity', 'ncloc', 'sqaleIndex', 'sqaleDebtRatio', 'reliabilityRemediationEffort', 'securityRemediationEffort']
sonarMeasures = sonarMeasures[attributes]
sonarMeasures.to_csv('../../data/interim/DataPreparation/SelectData/SONAR_MEASURES_select.csv', header=True)
# SZZ_FAULT_INDUCING_COMMITS
szzFaultInducingCommits = pd.read_csv("../../data/raw/SZZ_FAULT_INDUCING_COMMITS.csv")
attributes = ['projectID', 'faultFixingCommitHash', 'faultInducingCommitHash', 'key']
szzFaultInducingCommits = szzFaultInducingCommits[attributes]
szzFaultInducingCommits.to_csv('../../data/interim/DataPreparation/SelectData/SZZ_FAULT_INDUCING_COMMITS_select.csv', header=True)
print("Attributes selected.")
##############
# CLEAN DATA #
##############
print("Cleaning data...")
def intersection(l1, l2):
temp = set(l2)
l3 = [value for value in l1 if value in temp]
return l3
def difference(li1, li2):
return (list(list(set(li1)-set(li2)) + list(set(li2)-set(li1))))
# GIT_COMMITS
gitCommits = pd.read_csv("../../data/interim/DataPreparation/SelectData/GIT_COMMITS_select.csv")
authorNan = list(np.where(gitCommits.author.isna()))[0]
committerNan = list(np.where(gitCommits.committer.isna()))[0]
inters = intersection(authorNan, committerNan)
gitCommits = gitCommits.drop(inters)
gitCommits.to_csv('../../data/interim/DataPreparation/CleanData/GIT_COMMITS_clean.csv', header=True)
# GIT_COMMITS_CHANGES
gitCommitsChanges = pd.read_csv("../../data/interim/DataPreparation/SelectData/GIT_COMMITS_CHANGES_select.csv").iloc[:,1:]
gitCommitsChanges.to_csv('../../data/interim/DataPreparation/CleanData/GIT_COMMITS_CHANGES_clean.csv', header=True)
# JIRA_ISSUES
jiraIssues = pd.read_csv("../../data/interim/DataPreparation/SelectData/JIRA_ISSUES_select.csv").iloc[:,1:]
resolutionDate_nan = list(np.where(jiraIssues.resolutionDate.isna()))[0]
jiraIssues_notresolved = jiraIssues.iloc[resolutionDate_nan,:]
gitCommits = pd.read_csv("../../data/interim/DataPreparation/SelectData/GIT_COMMITS_select.csv").iloc[:,[1,-1]]
lastTimestamp = gitCommits.groupby(['projectID']).max()
jiraIssues_notresolved = pd.merge(jiraIssues_notresolved, lastTimestamp, how='left', on='projectID')
jiraIssues_notresolved = jiraIssues_notresolved.iloc[:,[0,1,2,4,5,6,7,8]].rename(columns={'committerDate': 'resolutionDate'})
jiraIssues_resolved = jiraIssues.drop(resolutionDate_nan)
jiraIssues = pd.concat([jiraIssues_resolved, jiraIssues_notresolved], sort=False).sort_index().reset_index().iloc[:,1:]
priority_nan = list(np.where(jiraIssues.priority.isna()))[0]
jiraIssues = jiraIssues.drop(priority_nan)
assignee_nan = list(np.where(jiraIssues.assignee.isna()))[0]
jiraIssues.assignee = jiraIssues.assignee.fillna('not-assigned')
jiraIssues.to_csv('../../data/interim/DataPreparation/CleanData/JIRA_ISSUES_clean.csv', header=True)
# REFACTORING_MINER
refactoringMiner = pd.read_csv("../../data/interim/DataPreparation/SelectData/REFACTORING_MINER_select.csv")
commitHashNan = list(np.where(refactoringMiner.commitHash.isna()))[0]
refactoringTypeNan = list(np.where(refactoringMiner.refactoringType.isna()))[0]
inters = intersection(commitHashNan, refactoringTypeNan)
refactoringMiner = refactoringMiner.drop(inters)
refactoringMiner.to_csv('../../data/interim/DataPreparation/CleanData/REFACTORING_MINER_clean.csv', header=True)
# SONAR_ISSUES
sonarIssues = pd.read_csv("../../data/interim/DataPreparation/SelectData/SONAR_ISSUES_select.csv").iloc[:,1:]
closeDateNan = list(np.where(sonarIssues.closeDate.isna()))[0]
closeCommitHashNan = list(np.where(sonarIssues.closeCommitHash.isna()))[0]
debtNan = list(np.where(sonarIssues.debt.isna()))[0]
authorNan = list(np.where(sonarIssues.author.isna()))[0]
inter = intersection(closeDateNan, closeCommitHashNan)
diff = difference(closeCommitHashNan, closeDateNan)
debtNan = list(np.where(sonarIssues.debt.isna())[0])
sonarIssues = sonarIssues.drop(debtNan).reset_index()
sonarIssues = sonarIssues.fillna({'closeCommitHash': 'not-resolved'})
gitCommits = pd.read_csv("../../data/interim/DataPreparation/CleanData/GIT_COMMITS_clean.csv").iloc[:,1:]
lastTimestamp = gitCommits.loc[:,['projectID', 'committerDate']].groupby(['projectID']).max()
closeDateNan = list(np.where(sonarIssues.closeDate.isna()))[0]
sonarIssues_notresolved = sonarIssues.iloc[closeDateNan,:]
sonarIssues_notresolved = pd.merge(sonarIssues_notresolved, lastTimestamp, how='left', on='projectID')
sonarIssues_notresolved = sonarIssues_notresolved.loc[:,['projectID', 'creationDate', 'creationCommitHash', 'closeCommitHash', 'type', 'severity', 'debt', 'author', 'committerDate']].rename(columns={'committerDate': 'closeDate'})
sonarIssues_resolved = sonarIssues.drop(closeDateNan)
sonarIssues = | pd.concat([sonarIssues_resolved, sonarIssues_notresolved], sort=False) | pandas.concat |
# --- imports from python standard library -------------------------------------
from abc import ABC, abstractmethod
from typing import Any, Generator, List
# --- external imports ---------------------------------------------------------
import pandas as pd
# --- imports own packages and modules -----------------------------------------
# ------------------------------------------------------------------------------
REQUIRED_FIELDS = [
"title", "year"
]
class MetaDataLibraryBase(ABC):
@abstractmethod
def _get_data(self, movie_id: Any, field: str) -> Any:
pass
@abstractmethod
def _set_data(self, movie_id: Any, field: str, value: Any) -> None:
pass
@abstractmethod
def _append_data(self, movie_id: Any, field: str, value: Any) -> None:
pass
@abstractmethod
def add_movie(self, movie_id: Any, title: str, year: int, **fields) -> None:
pass
@abstractmethod
def delete_movie(self, movie_id: Any) -> None:
pass
@abstractmethod
def iter_ids(self) -> Generator[Any, None, None]:
pass
@abstractmethod
def nmovies(self) -> int:
pass
@property
def fields(self) -> List[str]:
return REQUIRED_FIELDS.copy()
def get_data(self, movie_id: Any, field: str) -> Any:
if field not in self.fields:
raise ValueError(f"{self} has no field {field}")
return self._get_data(movie_id, field)
def set_data(self, movie_id: Any, field: str, value: Any) -> None:
if field not in self.fields:
raise ValueError(f"{self} has no field {field}")
return self._set_data(movie_id, field, value)
def append_data(self, movie_id: Any, field: str, value: Any) -> None:
if field not in self.fields:
raise ValueError(f"{self} has no field {field}")
return self._append_data(movie_id, field, value)
def title(self, movie_id: Any) -> str:
return self._get_data(movie_id, "title")
def year(self, movie_id: Any) -> int:
return self._get_data(movie_id, "year")
def to_pandas(self):
data = {
field: [self._get_data(mid, field) for mid in self.iter_ids()]
for field in self.fields
}
index = pd.Index(list(self.iter_ids), name='movie_id')
return | pd.DataFrame(data=data, index=index) | pandas.DataFrame |
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
import array as arr
import datetime
import io
import operator
import random
import re
import string
import textwrap
from copy import copy
import cupy
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from numba import cuda
import cudf
from cudf.core._compat import PANDAS_GE_110, PANDAS_GE_120
from cudf.core.column import column
from cudf.tests import utils
from cudf.tests.utils import (
ALL_TYPES,
DATETIME_TYPES,
NUMERIC_TYPES,
assert_eq,
assert_exceptions_equal,
does_not_raise,
gen_rand,
)
def test_init_via_list_of_tuples():
data = [
(5, "cats", "jump", np.nan),
(2, "dogs", "dig", 7.5),
(3, "cows", "moo", -2.1, "occasionally"),
]
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def _dataframe_na_data():
return [
pd.DataFrame(
{
"a": [0, 1, 2, np.nan, 4, None, 6],
"b": [np.nan, None, "u", "h", "d", "a", "m"],
},
index=["q", "w", "e", "r", "t", "y", "u"],
),
pd.DataFrame({"a": [0, 1, 2, 3, 4], "b": ["a", "b", "u", "h", "d"]}),
pd.DataFrame(
{
"a": [None, None, np.nan, None],
"b": [np.nan, None, np.nan, None],
}
),
pd.DataFrame({"a": []}),
pd.DataFrame({"a": [np.nan], "b": [None]}),
pd.DataFrame({"a": ["a", "b", "c", None, "e"]}),
pd.DataFrame({"a": ["a", "b", "c", "d", "e"]}),
]
@pytest.mark.parametrize("rows", [0, 1, 2, 100])
def test_init_via_list_of_empty_tuples(rows):
data = [()] * rows
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(
pdf,
gdf,
check_like=True,
check_column_type=False,
check_index_type=False,
)
@pytest.mark.parametrize(
"dict_of_series",
[
{"a": pd.Series([1.0, 2.0, 3.0])},
{"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 4.0], index=[1, 2, 3]),
},
{"a": [1, 2, 3], "b": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=["a", "b", "c"]),
"b": pd.Series([1.0, 2.0, 4.0], index=["c", "d", "e"]),
},
{
"a": pd.Series(
["a", "b", "c"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
"b": pd.Series(
["a", " b", "d"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
},
],
)
def test_init_from_series_align(dict_of_series):
pdf = pd.DataFrame(dict_of_series)
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
for key in dict_of_series:
if isinstance(dict_of_series[key], pd.Series):
dict_of_series[key] = cudf.Series(dict_of_series[key])
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
("dict_of_series", "expectation"),
[
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 5, 6]),
},
pytest.raises(
ValueError, match="Cannot align indices with non-unique values"
),
),
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
},
does_not_raise(),
),
],
)
def test_init_from_series_align_nonunique(dict_of_series, expectation):
with expectation:
gdf = cudf.DataFrame(dict_of_series)
if expectation == does_not_raise():
pdf = pd.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
def test_init_unaligned_with_index():
pdf = pd.DataFrame(
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
gdf = cudf.DataFrame(
{
"a": cudf.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": cudf.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
assert_eq(pdf, gdf, check_dtype=False)
def test_series_basic():
# Make series from buffer
a1 = np.arange(10, dtype=np.float64)
series = cudf.Series(a1)
assert len(series) == 10
np.testing.assert_equal(series.to_array(), np.hstack([a1]))
def test_series_from_cupy_scalars():
data = [0.1, 0.2, 0.3]
data_np = np.array(data)
data_cp = cupy.array(data)
s_np = cudf.Series([data_np[0], data_np[2]])
s_cp = cudf.Series([data_cp[0], data_cp[2]])
assert_eq(s_np, s_cp)
@pytest.mark.parametrize("a", [[1, 2, 3], [1, 10, 30]])
@pytest.mark.parametrize("b", [[4, 5, 6], [-11, -100, 30]])
def test_append_index(a, b):
df = pd.DataFrame()
df["a"] = a
df["b"] = b
gdf = cudf.DataFrame()
gdf["a"] = a
gdf["b"] = b
# Check the default index after appending two columns(Series)
expected = df.a.append(df.b)
actual = gdf.a.append(gdf.b)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
expected = df.a.append(df.b, ignore_index=True)
actual = gdf.a.append(gdf.b, ignore_index=True)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
def test_series_init_none():
# test for creating empty series
# 1: without initializing
sr1 = cudf.Series()
got = sr1.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
# 2: Using `None` as an initializer
sr2 = cudf.Series(None)
got = sr2.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_basic():
np.random.seed(0)
df = cudf.DataFrame()
# Populate with cuda memory
df["keys"] = np.arange(10, dtype=np.float64)
np.testing.assert_equal(df["keys"].to_array(), np.arange(10))
assert len(df) == 10
# Populate with numpy array
rnd_vals = np.random.random(10)
df["vals"] = rnd_vals
np.testing.assert_equal(df["vals"].to_array(), rnd_vals)
assert len(df) == 10
assert tuple(df.columns) == ("keys", "vals")
# Make another dataframe
df2 = cudf.DataFrame()
df2["keys"] = np.array([123], dtype=np.float64)
df2["vals"] = np.array([321], dtype=np.float64)
# Concat
df = cudf.concat([df, df2])
assert len(df) == 11
hkeys = np.asarray(np.arange(10, dtype=np.float64).tolist() + [123])
hvals = np.asarray(rnd_vals.tolist() + [321])
np.testing.assert_equal(df["keys"].to_array(), hkeys)
np.testing.assert_equal(df["vals"].to_array(), hvals)
# As matrix
mat = df.as_matrix()
expect = np.vstack([hkeys, hvals]).T
np.testing.assert_equal(mat, expect)
# test dataframe with tuple name
df_tup = cudf.DataFrame()
data = np.arange(10)
df_tup[(1, "foobar")] = data
np.testing.assert_equal(data, df_tup[(1, "foobar")].to_array())
df = cudf.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
pdf = pd.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
assert_eq(df, pdf)
gdf = cudf.DataFrame({"id": [0, 1], "val": [None, None]})
gdf["val"] = gdf["val"].astype("int")
assert gdf["val"].isnull().all()
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"columns", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_columns(pdf, columns, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(columns=columns, inplace=inplace)
actual = gdf.drop(columns=columns, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_0(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=0, inplace=inplace)
actual = gdf.drop(labels=labels, axis=0, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"index",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_index(pdf, index, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace)
actual = gdf.drop(index=index, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5},
index=pd.MultiIndex(
levels=[
["lama", "cow", "falcon"],
["speed", "weight", "length"],
],
codes=[
[0, 0, 0, 1, 1, 1, 2, 2, 2, 1],
[0, 1, 2, 0, 1, 2, 0, 1, 2, 1],
],
),
)
],
)
@pytest.mark.parametrize(
"index,level",
[
("cow", 0),
("lama", 0),
("falcon", 0),
("speed", 1),
("weight", 1),
("length", 1),
pytest.param(
"cow",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"lama",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"falcon",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_multiindex(pdf, index, level, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace, level=level)
actual = gdf.drop(index=index, inplace=inplace, level=level)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_1(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=1, inplace=inplace)
actual = gdf.drop(labels=labels, axis=1, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
def test_dataframe_drop_error():
df = cudf.DataFrame({"a": [1], "b": [2], "c": [3]})
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "d"}),
rfunc_args_and_kwargs=([], {"columns": "d"}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
rfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
rfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
expected_error_message="Cannot specify both",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"axis": 1}),
rfunc_args_and_kwargs=([], {"axis": 1}),
expected_error_message="Need to specify at least",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([[2, 0]],),
rfunc_args_and_kwargs=([[2, 0]],),
expected_error_message="One or more values not found in axis",
)
def test_dataframe_drop_raises():
df = cudf.DataFrame(
{"a": [1, 2, 3], "c": [10, 20, 30]}, index=["x", "y", "z"]
)
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["p"],),
rfunc_args_and_kwargs=(["p"],),
expected_error_message="One or more values not found in axis",
)
# label dtype mismatch
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([3],),
rfunc_args_and_kwargs=([3],),
expected_error_message="One or more values not found in axis",
)
expect = pdf.drop("p", errors="ignore")
actual = df.drop("p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "p"}),
rfunc_args_and_kwargs=([], {"columns": "p"}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(columns="p", errors="ignore")
actual = df.drop(columns="p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
rfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(labels="p", axis=1, errors="ignore")
actual = df.drop(labels="p", axis=1, errors="ignore")
assert_eq(actual, expect)
def test_dataframe_column_add_drop_via_setitem():
df = cudf.DataFrame()
data = np.asarray(range(10))
df["a"] = data
df["b"] = data
assert tuple(df.columns) == ("a", "b")
del df["a"]
assert tuple(df.columns) == ("b",)
df["c"] = data
assert tuple(df.columns) == ("b", "c")
df["a"] = data
assert tuple(df.columns) == ("b", "c", "a")
def test_dataframe_column_set_via_attr():
data_0 = np.asarray([0, 2, 4, 5])
data_1 = np.asarray([1, 4, 2, 3])
data_2 = np.asarray([2, 0, 3, 0])
df = cudf.DataFrame({"a": data_0, "b": data_1, "c": data_2})
for i in range(10):
df.c = df.a
assert assert_eq(df.c, df.a, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
df.c = df.b
assert assert_eq(df.c, df.b, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
def test_dataframe_column_drop_via_attr():
df = cudf.DataFrame({"a": []})
with pytest.raises(AttributeError):
del df.a
assert tuple(df.columns) == tuple("a")
@pytest.mark.parametrize("axis", [0, "index"])
def test_dataframe_index_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper={1: 5, 2: 6}, axis=axis)
got = gdf.rename(mapper={1: 5, 2: 6}, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(index={1: 5, 2: 6})
got = gdf.rename(index={1: 5, 2: 6})
assert_eq(expect, got)
expect = pdf.rename({1: 5, 2: 6})
got = gdf.rename({1: 5, 2: 6})
assert_eq(expect, got)
# `pandas` can support indexes with mixed values. We throw a
# `NotImplementedError`.
with pytest.raises(NotImplementedError):
gdf.rename(mapper={1: "x", 2: "y"}, axis=axis)
def test_dataframe_MI_rename():
gdf = cudf.DataFrame(
{"a": np.arange(10), "b": np.arange(10), "c": np.arange(10)}
)
gdg = gdf.groupby(["a", "b"]).count()
pdg = gdg.to_pandas()
expect = pdg.rename(mapper={1: 5, 2: 6}, axis=0)
got = gdg.rename(mapper={1: 5, 2: 6}, axis=0)
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [1, "columns"])
def test_dataframe_column_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper=lambda name: 2 * name, axis=axis)
got = gdf.rename(mapper=lambda name: 2 * name, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(columns=lambda name: 2 * name)
got = gdf.rename(columns=lambda name: 2 * name)
assert_eq(expect, got)
rename_mapper = {"a": "z", "b": "y", "c": "x"}
expect = pdf.rename(columns=rename_mapper)
got = gdf.rename(columns=rename_mapper)
assert_eq(expect, got)
def test_dataframe_pop():
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": ["x", "y", "z"], "c": [7.0, 8.0, 9.0]}
)
gdf = cudf.DataFrame.from_pandas(pdf)
# Test non-existing column error
with pytest.raises(KeyError) as raises:
gdf.pop("fake_colname")
raises.match("fake_colname")
# check pop numeric column
pdf_pop = pdf.pop("a")
gdf_pop = gdf.pop("a")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check string column
pdf_pop = pdf.pop("b")
gdf_pop = gdf.pop("b")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check float column and empty dataframe
pdf_pop = pdf.pop("c")
gdf_pop = gdf.pop("c")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check empty dataframe edge case
empty_pdf = pd.DataFrame(columns=["a", "b"])
empty_gdf = cudf.DataFrame(columns=["a", "b"])
pb = empty_pdf.pop("b")
gb = empty_gdf.pop("b")
assert len(pb) == len(gb)
assert empty_pdf.empty and empty_gdf.empty
@pytest.mark.parametrize("nelem", [0, 3, 100, 1000])
def test_dataframe_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df["a"].dtype is np.dtype(np.int32)
df["b"] = df["a"].astype(np.float32)
assert df["b"].dtype is np.dtype(np.float32)
np.testing.assert_equal(df["a"].to_array(), df["b"].to_array())
@pytest.mark.parametrize("nelem", [0, 100])
def test_index_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df.index.dtype is np.dtype(np.int64)
df.index = df.index.astype(np.float32)
assert df.index.dtype is np.dtype(np.float32)
df["a"] = df["a"].astype(np.float32)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
df["b"] = df["a"]
df = df.set_index("b")
df["a"] = df["a"].astype(np.int16)
df.index = df.index.astype(np.int16)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
def test_dataframe_to_string():
pd.options.display.max_rows = 5
pd.options.display.max_columns = 8
# Test basic
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
string = str(df)
assert string.splitlines()[-1] == "[6 rows x 2 columns]"
# Test skipped columns
df = cudf.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6],
"b": [11, 12, 13, 14, 15, 16],
"c": [11, 12, 13, 14, 15, 16],
"d": [11, 12, 13, 14, 15, 16],
}
)
string = df.to_string()
assert string.splitlines()[-1] == "[6 rows x 4 columns]"
# Test masked
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
data = np.arange(6)
mask = np.zeros(1, dtype=cudf.utils.utils.mask_dtype)
mask[0] = 0b00101101
masked = cudf.Series.from_masked_array(data, mask)
assert masked.null_count == 2
df["c"] = masked
# check data
values = masked.copy()
validids = [0, 2, 3, 5]
densearray = masked.to_array()
np.testing.assert_equal(data[validids], densearray)
# valid position is corret
for i in validids:
assert data[i] == values[i]
# null position is correct
for i in range(len(values)):
if i not in validids:
assert values[i] is cudf.NA
pd.options.display.max_rows = 10
got = df.to_string()
expect = """
a b c
0 1 11 0
1 2 12 <NA>
2 3 13 2
3 4 14 3
4 5 15 <NA>
5 6 16 5
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_to_string_wide(monkeypatch):
monkeypatch.setenv("COLUMNS", "79")
# Test basic
df = cudf.DataFrame()
for i in range(100):
df["a{}".format(i)] = list(range(3))
pd.options.display.max_columns = 0
got = df.to_string()
expect = """
a0 a1 a2 a3 a4 a5 a6 a7 ... a92 a93 a94 a95 a96 a97 a98 a99
0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0
1 1 1 1 1 1 1 1 1 ... 1 1 1 1 1 1 1 1
2 2 2 2 2 2 2 2 2 ... 2 2 2 2 2 2 2 2
[3 rows x 100 columns]
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_empty_to_string():
# Test for printing empty dataframe
df = cudf.DataFrame()
got = df.to_string()
expect = "Empty DataFrame\nColumns: []\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_emptycolumns_to_string():
# Test for printing dataframe having empty columns
df = cudf.DataFrame()
df["a"] = []
df["b"] = []
got = df.to_string()
expect = "Empty DataFrame\nColumns: [a, b]\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy():
# Test for copying the dataframe using python copy pkg
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = copy(df)
df2["b"] = [4, 5, 6]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy_shallow():
# Test for copy dataframe using class method
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = df.copy()
df2["b"] = [4, 2, 3]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_dtypes():
dtypes = pd.Series(
[np.int32, np.float32, np.float64], index=["c", "a", "b"]
)
df = cudf.DataFrame(
{k: np.ones(10, dtype=v) for k, v in dtypes.iteritems()}
)
assert df.dtypes.equals(dtypes)
def test_dataframe_add_col_to_object_dataframe():
# Test for adding column to an empty object dataframe
cols = ["a", "b", "c"]
df = pd.DataFrame(columns=cols, dtype="str")
data = {k: v for (k, v) in zip(cols, [["a"] for _ in cols])}
gdf = cudf.DataFrame(data)
gdf = gdf[:0]
assert gdf.dtypes.equals(df.dtypes)
gdf["a"] = [1]
df["a"] = [10]
assert gdf.dtypes.equals(df.dtypes)
gdf["b"] = [1.0]
df["b"] = [10.0]
assert gdf.dtypes.equals(df.dtypes)
def test_dataframe_dir_and_getattr():
df = cudf.DataFrame(
{
"a": np.ones(10),
"b": np.ones(10),
"not an id": np.ones(10),
"oop$": np.ones(10),
}
)
o = dir(df)
assert {"a", "b"}.issubset(o)
assert "not an id" not in o
assert "oop$" not in o
# Getattr works
assert df.a.equals(df["a"])
assert df.b.equals(df["b"])
with pytest.raises(AttributeError):
df.not_a_column
@pytest.mark.parametrize("order", ["C", "F"])
def test_empty_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
# Check fully empty dataframe.
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 0)
df = cudf.DataFrame()
nelem = 123
for k in "abc":
df[k] = np.random.random(nelem)
# Check all columns in empty dataframe.
mat = df.head(0).as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 3)
@pytest.mark.parametrize("order", ["C", "F"])
def test_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
nelem = 123
for k in "abcd":
df[k] = np.random.random(nelem)
# Check all columns
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (nelem, 4)
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
# Check column subset
mat = df.as_gpu_matrix(order=order, columns=["a", "c"]).copy_to_host()
assert mat.shape == (nelem, 2)
for i, k in enumerate("ac"):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
def test_dataframe_as_gpu_matrix_null_values():
df = cudf.DataFrame()
nelem = 123
na = -10000
refvalues = {}
for k in "abcd":
df[k] = data = np.random.random(nelem)
bitmask = utils.random_bitmask(nelem)
df[k] = df[k].set_mask(bitmask)
boolmask = np.asarray(
utils.expand_bits_to_bytes(bitmask)[:nelem], dtype=np.bool_
)
data[~boolmask] = na
refvalues[k] = data
# Check null value causes error
with pytest.raises(ValueError) as raises:
df.as_gpu_matrix()
raises.match("column 'a' has null values")
for k in df.columns:
df[k] = df[k].fillna(na)
mat = df.as_gpu_matrix().copy_to_host()
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(refvalues[k], mat[:, i])
def test_dataframe_append_empty():
pdf = pd.DataFrame(
{
"key": [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4],
"value": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
gdf["newcol"] = 100
pdf["newcol"] = 100
assert len(gdf["newcol"]) == len(pdf)
assert len(pdf["newcol"]) == len(pdf)
assert_eq(gdf, pdf)
def test_dataframe_setitem_from_masked_object():
ary = np.random.randn(100)
mask = np.zeros(100, dtype=bool)
mask[:20] = True
np.random.shuffle(mask)
ary[mask] = np.nan
test1_null = cudf.Series(ary, nan_as_null=True)
assert test1_null.nullable
assert test1_null.null_count == 20
test1_nan = cudf.Series(ary, nan_as_null=False)
assert test1_nan.null_count == 0
test2_null = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=True
)
assert test2_null["a"].nullable
assert test2_null["a"].null_count == 20
test2_nan = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=False
)
assert test2_nan["a"].null_count == 0
gpu_ary = cupy.asarray(ary)
test3_null = cudf.Series(gpu_ary, nan_as_null=True)
assert test3_null.nullable
assert test3_null.null_count == 20
test3_nan = cudf.Series(gpu_ary, nan_as_null=False)
assert test3_nan.null_count == 0
test4 = cudf.DataFrame()
lst = [1, 2, None, 4, 5, 6, None, 8, 9]
test4["lst"] = lst
assert test4["lst"].nullable
assert test4["lst"].null_count == 2
def test_dataframe_append_to_empty():
pdf = pd.DataFrame()
pdf["a"] = []
pdf["b"] = [1, 2, 3]
gdf = cudf.DataFrame()
gdf["a"] = []
gdf["b"] = [1, 2, 3]
assert_eq(gdf, pdf)
def test_dataframe_setitem_index_len1():
gdf = cudf.DataFrame()
gdf["a"] = [1]
gdf["b"] = gdf.index._values
np.testing.assert_equal(gdf.b.to_array(), [0])
def test_empty_dataframe_setitem_df():
gdf1 = cudf.DataFrame()
gdf2 = cudf.DataFrame({"a": [1, 2, 3, 4, 5]})
gdf1["a"] = gdf2["a"]
assert_eq(gdf1, gdf2)
def test_assign():
gdf = cudf.DataFrame({"x": [1, 2, 3]})
gdf2 = gdf.assign(y=gdf.x + 1)
assert list(gdf.columns) == ["x"]
assert list(gdf2.columns) == ["x", "y"]
np.testing.assert_equal(gdf2.y.to_array(), [2, 3, 4])
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000])
def test_dataframe_hash_columns(nrows):
gdf = cudf.DataFrame()
data = np.asarray(range(nrows))
data[0] = data[-1] # make first and last the same
gdf["a"] = data
gdf["b"] = gdf.a + 100
out = gdf.hash_columns(["a", "b"])
assert isinstance(out, cupy.ndarray)
assert len(out) == nrows
assert out.dtype == np.int32
# Check default
out_all = gdf.hash_columns()
np.testing.assert_array_equal(cupy.asnumpy(out), cupy.asnumpy(out_all))
# Check single column
out_one = cupy.asnumpy(gdf.hash_columns(["a"]))
# First matches last
assert out_one[0] == out_one[-1]
# Equivalent to the cudf.Series.hash_values()
np.testing.assert_array_equal(cupy.asnumpy(gdf.a.hash_values()), out_one)
@pytest.mark.parametrize("nrows", [3, 10, 100, 1000])
@pytest.mark.parametrize("nparts", [1, 2, 8, 13])
@pytest.mark.parametrize("nkeys", [1, 2])
def test_dataframe_hash_partition(nrows, nparts, nkeys):
np.random.seed(123)
gdf = cudf.DataFrame()
keycols = []
for i in range(nkeys):
keyname = "key{}".format(i)
gdf[keyname] = np.random.randint(0, 7 - i, nrows)
keycols.append(keyname)
gdf["val1"] = np.random.randint(0, nrows * 2, nrows)
got = gdf.partition_by_hash(keycols, nparts=nparts)
# Must return a list
assert isinstance(got, list)
# Must have correct number of partitions
assert len(got) == nparts
# All partitions must be DataFrame type
assert all(isinstance(p, cudf.DataFrame) for p in got)
# Check that all partitions have unique keys
part_unique_keys = set()
for p in got:
if len(p):
# Take rows of the keycolumns and build a set of the key-values
unique_keys = set(map(tuple, p.as_matrix(columns=keycols)))
# Ensure that none of the key-values have occurred in other groups
assert not (unique_keys & part_unique_keys)
part_unique_keys |= unique_keys
assert len(part_unique_keys)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_value(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["val"] = gdf["val"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.key])
expected_value = row.key + 100 if valid else np.nan
got_value = row.val
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_keys(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["key"] = gdf["key"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3, keep_index=False)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.val - 100])
# val is key + 100
expected_value = row.val - 100 if valid else np.nan
got_value = row.key
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("keep_index", [True, False])
def test_dataframe_hash_partition_keep_index(keep_index):
gdf = cudf.DataFrame(
{"val": [1, 2, 3, 4], "key": [3, 2, 1, 4]}, index=[4, 3, 2, 1]
)
expected_df1 = cudf.DataFrame(
{"val": [1], "key": [3]}, index=[4] if keep_index else None
)
expected_df2 = cudf.DataFrame(
{"val": [2, 3, 4], "key": [2, 1, 4]},
index=[3, 2, 1] if keep_index else range(1, 4),
)
expected = [expected_df1, expected_df2]
parts = gdf.partition_by_hash(["key"], nparts=2, keep_index=keep_index)
for exp, got in zip(expected, parts):
assert_eq(exp, got)
def test_dataframe_hash_partition_empty():
gdf = cudf.DataFrame({"val": [1, 2], "key": [3, 2]}, index=["a", "b"])
parts = gdf.iloc[:0].partition_by_hash(["key"], nparts=3)
assert len(parts) == 3
for part in parts:
assert_eq(gdf.iloc[:0], part)
@pytest.mark.parametrize("dtype1", utils.supported_numpy_dtypes)
@pytest.mark.parametrize("dtype2", utils.supported_numpy_dtypes)
def test_dataframe_concat_different_numerical_columns(dtype1, dtype2):
df1 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype1)))
df2 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype2)))
if dtype1 != dtype2 and "datetime" in dtype1 or "datetime" in dtype2:
with pytest.raises(TypeError):
cudf.concat([df1, df2])
else:
pres = pd.concat([df1, df2])
gres = cudf.concat([cudf.from_pandas(df1), cudf.from_pandas(df2)])
assert_eq(cudf.from_pandas(pres), gres)
def test_dataframe_concat_different_column_types():
df1 = cudf.Series([42], dtype=np.float64)
df2 = cudf.Series(["a"], dtype="category")
with pytest.raises(ValueError):
cudf.concat([df1, df2])
df2 = cudf.Series(["a string"])
with pytest.raises(TypeError):
cudf.concat([df1, df2])
@pytest.mark.parametrize(
"df_1", [cudf.DataFrame({"a": [1, 2], "b": [1, 3]}), cudf.DataFrame({})]
)
@pytest.mark.parametrize(
"df_2", [cudf.DataFrame({"a": [], "b": []}), cudf.DataFrame({})]
)
def test_concat_empty_dataframe(df_1, df_2):
got = cudf.concat([df_1, df_2])
expect = pd.concat([df_1.to_pandas(), df_2.to_pandas()], sort=False)
# ignoring dtypes as pandas upcasts int to float
# on concatenation with empty dataframes
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"df1_d",
[
{"a": [1, 2], "b": [1, 2], "c": ["s1", "s2"], "d": [1.0, 2.0]},
{"b": [1.9, 10.9], "c": ["s1", "s2"]},
{"c": ["s1"], "b": [None], "a": [False]},
],
)
@pytest.mark.parametrize(
"df2_d",
[
{"a": [1, 2, 3]},
{"a": [1, None, 3], "b": [True, True, False], "c": ["s3", None, "s4"]},
{"a": [], "b": []},
{},
],
)
def test_concat_different_column_dataframe(df1_d, df2_d):
got = cudf.concat(
[cudf.DataFrame(df1_d), cudf.DataFrame(df2_d), cudf.DataFrame(df1_d)],
sort=False,
)
expect = pd.concat(
[pd.DataFrame(df1_d), pd.DataFrame(df2_d), pd.DataFrame(df1_d)],
sort=False,
)
# numerical columns are upcasted to float in cudf.DataFrame.to_pandas()
# casts nan to 0 in non-float numerical columns
numeric_cols = got.dtypes[got.dtypes != "object"].index
for col in numeric_cols:
got[col] = got[col].astype(np.float64).fillna(np.nan)
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"ser_1", [pd.Series([1, 2, 3]), pd.Series([], dtype="float64")]
)
@pytest.mark.parametrize("ser_2", [pd.Series([], dtype="float64")])
def test_concat_empty_series(ser_1, ser_2):
got = cudf.concat([cudf.Series(ser_1), cudf.Series(ser_2)])
expect = pd.concat([ser_1, ser_2])
assert_eq(got, expect)
def test_concat_with_axis():
df1 = pd.DataFrame(dict(x=np.arange(5), y=np.arange(5)))
df2 = pd.DataFrame(dict(a=np.arange(5), b=np.arange(5)))
concat_df = pd.concat([df1, df2], axis=1)
cdf1 = cudf.from_pandas(df1)
cdf2 = cudf.from_pandas(df2)
# concat only dataframes
concat_cdf = cudf.concat([cdf1, cdf2], axis=1)
assert_eq(concat_cdf, concat_df)
# concat only series
concat_s = pd.concat([df1.x, df1.y], axis=1)
cs1 = cudf.Series.from_pandas(df1.x)
cs2 = cudf.Series.from_pandas(df1.y)
concat_cdf_s = cudf.concat([cs1, cs2], axis=1)
assert_eq(concat_cdf_s, concat_s)
# concat series and dataframes
s3 = pd.Series(np.random.random(5))
cs3 = cudf.Series.from_pandas(s3)
concat_cdf_all = cudf.concat([cdf1, cs3, cdf2], axis=1)
concat_df_all = pd.concat([df1, s3, df2], axis=1)
assert_eq(concat_cdf_all, concat_df_all)
# concat manual multi index
midf1 = cudf.from_pandas(df1)
midf1.index = cudf.MultiIndex(
levels=[[0, 1, 2, 3], [0, 1]], codes=[[0, 1, 2, 3, 2], [0, 1, 0, 1, 0]]
)
midf2 = midf1[2:]
midf2.index = cudf.MultiIndex(
levels=[[3, 4, 5], [2, 0]], codes=[[0, 1, 2], [1, 0, 1]]
)
mipdf1 = midf1.to_pandas()
mipdf2 = midf2.to_pandas()
assert_eq(cudf.concat([midf1, midf2]), pd.concat([mipdf1, mipdf2]))
assert_eq(cudf.concat([midf2, midf1]), pd.concat([mipdf2, mipdf1]))
assert_eq(
cudf.concat([midf1, midf2, midf1]), pd.concat([mipdf1, mipdf2, mipdf1])
)
# concat groupby multi index
gdf1 = cudf.DataFrame(
{
"x": np.random.randint(0, 10, 10),
"y": np.random.randint(0, 10, 10),
"z": np.random.randint(0, 10, 10),
"v": np.random.randint(0, 10, 10),
}
)
gdf2 = gdf1[5:]
gdg1 = gdf1.groupby(["x", "y"]).min()
gdg2 = gdf2.groupby(["x", "y"]).min()
pdg1 = gdg1.to_pandas()
pdg2 = gdg2.to_pandas()
assert_eq(cudf.concat([gdg1, gdg2]), pd.concat([pdg1, pdg2]))
assert_eq(cudf.concat([gdg2, gdg1]), pd.concat([pdg2, pdg1]))
# series multi index concat
gdgz1 = gdg1.z
gdgz2 = gdg2.z
pdgz1 = gdgz1.to_pandas()
pdgz2 = gdgz2.to_pandas()
assert_eq(cudf.concat([gdgz1, gdgz2]), pd.concat([pdgz1, pdgz2]))
assert_eq(cudf.concat([gdgz2, gdgz1]), pd.concat([pdgz2, pdgz1]))
@pytest.mark.parametrize("nrows", [0, 3, 10, 100, 1000])
def test_nonmatching_index_setitem(nrows):
np.random.seed(0)
gdf = cudf.DataFrame()
gdf["a"] = np.random.randint(2147483647, size=nrows)
gdf["b"] = np.random.randint(2147483647, size=nrows)
gdf = gdf.set_index("b")
test_values = np.random.randint(2147483647, size=nrows)
gdf["c"] = test_values
assert len(test_values) == len(gdf["c"])
assert (
gdf["c"]
.to_pandas()
.equals(cudf.Series(test_values).set_index(gdf._index).to_pandas())
)
def test_from_pandas():
df = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
gdf = cudf.DataFrame.from_pandas(df)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = df.x
gs = cudf.Series.from_pandas(s)
assert isinstance(gs, cudf.Series)
assert_eq(s, gs)
@pytest.mark.parametrize("dtypes", [int, float])
def test_from_records(dtypes):
h_ary = np.ndarray(shape=(10, 4), dtype=dtypes)
rec_ary = h_ary.view(np.recarray)
gdf = cudf.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
df = pd.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame.from_records(rec_ary)
df = pd.DataFrame.from_records(rec_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
@pytest.mark.parametrize("columns", [None, ["first", "second", "third"]])
@pytest.mark.parametrize(
"index",
[
None,
["first", "second"],
"name",
"age",
"weight",
[10, 11],
["abc", "xyz"],
],
)
def test_from_records_index(columns, index):
rec_ary = np.array(
[("Rex", 9, 81.0), ("Fido", 3, 27.0)],
dtype=[("name", "U10"), ("age", "i4"), ("weight", "f4")],
)
gdf = cudf.DataFrame.from_records(rec_ary, columns=columns, index=index)
df = pd.DataFrame.from_records(rec_ary, columns=columns, index=index)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_construction_from_cupy_arrays():
h_ary = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
d_ary = cupy.asarray(h_ary)
gdf = cudf.DataFrame(d_ary, columns=["a", "b", "c"])
df = pd.DataFrame(h_ary, columns=["a", "b", "c"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
df = pd.DataFrame(h_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary, index=["a", "b"])
df = pd.DataFrame(h_ary, index=["a", "b"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=0, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=0, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=1, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=1, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_cupy_wrong_dimensions():
d_ary = cupy.empty((2, 3, 4), dtype=np.int32)
with pytest.raises(
ValueError, match="records dimension expected 1 or 2 but found: 3"
):
cudf.DataFrame(d_ary)
def test_dataframe_cupy_array_wrong_index():
d_ary = cupy.empty((2, 3), dtype=np.int32)
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index=["a"])
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index="a")
def test_index_in_dataframe_constructor():
a = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
b = cudf.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
assert_eq(a, b)
assert_eq(a.loc[4:], b.loc[4:])
dtypes = NUMERIC_TYPES + DATETIME_TYPES + ["bool"]
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
padf = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
gdf = cudf.DataFrame.from_arrow(padf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = pa.Array.from_pandas(df.a)
gs = cudf.Series.from_arrow(s)
assert isinstance(gs, cudf.Series)
# For some reason PyArrow to_pandas() converts to numpy array and has
# better type compatibility
np.testing.assert_array_equal(s.to_pandas(), gs.to_array())
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_to_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
pa_i = pa.Array.from_pandas(df.index)
pa_gi = gdf.index.to_arrow()
assert isinstance(pa_gi, pa.Array)
assert pa.Array.equals(pa_i, pa_gi)
@pytest.mark.parametrize("data_type", dtypes)
def test_to_from_arrow_nulls(data_type):
if data_type == "longlong":
data_type = "int64"
if data_type == "bool":
s1 = pa.array([True, None, False, None, True], type=data_type)
else:
dtype = np.dtype(data_type)
if dtype.type == np.datetime64:
time_unit, _ = np.datetime_data(dtype)
data_type = pa.timestamp(unit=time_unit)
s1 = pa.array([1, None, 3, None, 5], type=data_type)
gs1 = cudf.Series.from_arrow(s1)
assert isinstance(gs1, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s1.buffers()[0]).view("u1")[0],
gs1._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s1, gs1.to_arrow())
s2 = pa.array([None, None, None, None, None], type=data_type)
gs2 = cudf.Series.from_arrow(s2)
assert isinstance(gs2, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s2.buffers()[0]).view("u1")[0],
gs2._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s2, gs2.to_arrow())
def test_to_arrow_categorical():
df = pd.DataFrame()
df["a"] = pd.Series(["a", "b", "c"], dtype="category")
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
def test_from_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert_eq(
pd.Series(pa_cat.to_pandas()), # PyArrow returns a pd.Categorical
gd_cat.to_pandas(),
)
def test_to_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert pa.Array.equals(pa_cat, gd_cat.to_arrow())
@pytest.mark.parametrize("data_type", dtypes)
def test_from_scalar_typing(data_type):
if data_type == "datetime64[ms]":
scalar = (
np.dtype("int64")
.type(np.random.randint(0, 5))
.astype("datetime64[ms]")
)
elif data_type.startswith("datetime64"):
scalar = np.datetime64(datetime.date.today()).astype("datetime64[ms]")
data_type = "datetime64[ms]"
else:
scalar = np.dtype(data_type).type(np.random.randint(0, 5))
gdf = cudf.DataFrame()
gdf["a"] = [1, 2, 3, 4, 5]
gdf["b"] = scalar
assert gdf["b"].dtype == np.dtype(data_type)
assert len(gdf["b"]) == len(gdf["a"])
@pytest.mark.parametrize("data_type", NUMERIC_TYPES)
def test_from_python_array(data_type):
np_arr = np.random.randint(0, 100, 10).astype(data_type)
data = memoryview(np_arr)
data = arr.array(data.format, data)
gs = cudf.Series(data)
np.testing.assert_equal(gs.to_array(), np_arr)
def test_series_shape():
ps = pd.Series([1, 2, 3, 4])
cs = cudf.Series([1, 2, 3, 4])
assert ps.shape == cs.shape
def test_series_shape_empty():
ps = pd.Series(dtype="float64")
cs = cudf.Series([])
assert ps.shape == cs.shape
def test_dataframe_shape():
pdf = pd.DataFrame({"a": [0, 1, 2, 3], "b": [0.1, 0.2, None, 0.3]})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.shape == gdf.shape
def test_dataframe_shape_empty():
pdf = pd.DataFrame()
gdf = cudf.DataFrame()
assert pdf.shape == gdf.shape
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
@pytest.mark.parametrize("dtype", dtypes)
@pytest.mark.parametrize("nulls", ["none", "some", "all"])
def test_dataframe_transpose(nulls, num_cols, num_rows, dtype):
pdf = pd.DataFrame()
null_rep = np.nan if dtype in ["float32", "float64"] else None
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(np.random.randint(0, 26, num_rows).astype(dtype))
if nulls == "some":
idx = np.random.choice(
num_rows, size=int(num_rows / 2), replace=False
)
data[idx] = null_rep
elif nulls == "all":
data[:] = null_rep
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function)
assert_eq(expect, got_property)
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
def test_dataframe_transpose_category(num_cols, num_rows):
pdf = pd.DataFrame()
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(list(string.ascii_lowercase), dtype="category")
data = data.sample(num_rows, replace=True).reset_index(drop=True)
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function.to_pandas())
assert_eq(expect, got_property.to_pandas())
def test_generated_column():
gdf = cudf.DataFrame({"a": (i for i in range(5))})
assert len(gdf) == 5
@pytest.fixture
def pdf():
return pd.DataFrame({"x": range(10), "y": range(10)})
@pytest.fixture
def gdf(pdf):
return cudf.DataFrame.from_pandas(pdf)
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize(
"func",
[
lambda df, **kwargs: df.min(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.product(**kwargs),
lambda df, **kwargs: df.cummin(**kwargs),
lambda df, **kwargs: df.cummax(**kwargs),
lambda df, **kwargs: df.cumsum(**kwargs),
lambda df, **kwargs: df.cumprod(**kwargs),
lambda df, **kwargs: df.mean(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.std(ddof=1, **kwargs),
lambda df, **kwargs: df.var(ddof=1, **kwargs),
lambda df, **kwargs: df.std(ddof=2, **kwargs),
lambda df, **kwargs: df.var(ddof=2, **kwargs),
lambda df, **kwargs: df.kurt(**kwargs),
lambda df, **kwargs: df.skew(**kwargs),
lambda df, **kwargs: df.all(**kwargs),
lambda df, **kwargs: df.any(**kwargs),
],
)
@pytest.mark.parametrize("skipna", [True, False, None])
def test_dataframe_reductions(data, func, skipna):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf, skipna=skipna), func(gdf, skipna=skipna))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("func", [lambda df: df.count()])
def test_dataframe_count_reduction(data, func):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf), func(gdf))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("ops", ["sum", "product", "prod"])
@pytest.mark.parametrize("skipna", [True, False, None])
@pytest.mark.parametrize("min_count", [-10, -1, 0, 1, 2, 3, 10])
def test_dataframe_min_count_ops(data, ops, skipna, min_count):
psr = pd.DataFrame(data)
gsr = cudf.DataFrame(data)
if PANDAS_GE_120 and psr.shape[0] * psr.shape[1] < min_count:
pytest.xfail("https://github.com/pandas-dev/pandas/issues/39738")
assert_eq(
getattr(psr, ops)(skipna=skipna, min_count=min_count),
getattr(gsr, ops)(skipna=skipna, min_count=min_count),
check_dtype=False,
)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_df(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf, pdf)
g = binop(gdf, gdf)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_df(pdf, gdf, binop):
d = binop(pdf, pdf + 1)
g = binop(gdf, gdf + 1)
assert_eq(d, g)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_series(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf.x, pdf.y)
g = binop(gdf.x, gdf.y)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_series(pdf, gdf, binop):
d = binop(pdf.x, pdf.y + 1)
g = binop(gdf.x, gdf.y + 1)
assert_eq(d, g)
@pytest.mark.parametrize("unaryop", [operator.neg, operator.inv, operator.abs])
def test_unaryops_df(pdf, gdf, unaryop):
d = unaryop(pdf - 5)
g = unaryop(gdf - 5)
assert_eq(d, g)
@pytest.mark.parametrize(
"func",
[
lambda df: df.empty,
lambda df: df.x.empty,
lambda df: df.x.fillna(123, limit=None, method=None, axis=None),
lambda df: df.drop("x", axis=1, errors="raise"),
],
)
def test_unary_operators(func, pdf, gdf):
p = func(pdf)
g = func(gdf)
assert_eq(p, g)
def test_is_monotonic(gdf):
pdf = pd.DataFrame({"x": [1, 2, 3]}, index=[3, 1, 2])
gdf = cudf.DataFrame.from_pandas(pdf)
assert not gdf.index.is_monotonic
assert not gdf.index.is_monotonic_increasing
assert not gdf.index.is_monotonic_decreasing
def test_iter(pdf, gdf):
assert list(pdf) == list(gdf)
def test_iteritems(gdf):
for k, v in gdf.iteritems():
assert k in gdf.columns
assert isinstance(v, cudf.Series)
assert_eq(v, gdf[k])
@pytest.mark.parametrize("q", [0.5, 1, 0.001, [0.5], [], [0.005, 0.5, 1]])
@pytest.mark.parametrize("numeric_only", [True, False])
def test_quantile(q, numeric_only):
ts = pd.date_range("2018-08-24", periods=5, freq="D")
td = pd.to_timedelta(np.arange(5), unit="h")
pdf = pd.DataFrame(
{"date": ts, "delta": td, "val": np.random.randn(len(ts))}
)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(pdf["date"].quantile(q), gdf["date"].quantile(q))
assert_eq(pdf["delta"].quantile(q), gdf["delta"].quantile(q))
assert_eq(pdf["val"].quantile(q), gdf["val"].quantile(q))
if numeric_only:
assert_eq(pdf.quantile(q), gdf.quantile(q))
else:
q = q if isinstance(q, list) else [q]
assert_eq(
pdf.quantile(
q if isinstance(q, list) else [q], numeric_only=False
),
gdf.quantile(q, numeric_only=False),
)
def test_empty_quantile():
pdf = pd.DataFrame({"x": []})
df = cudf.DataFrame({"x": []})
actual = df.quantile()
expected = pdf.quantile()
assert_eq(actual, expected)
def test_from_pandas_function(pdf):
gdf = cudf.from_pandas(pdf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(pdf, gdf)
gdf = cudf.from_pandas(pdf.x)
assert isinstance(gdf, cudf.Series)
assert_eq(pdf.x, gdf)
with pytest.raises(TypeError):
cudf.from_pandas(123)
@pytest.mark.parametrize("preserve_index", [True, False])
def test_arrow_pandas_compat(pdf, gdf, preserve_index):
pdf["z"] = range(10)
pdf = pdf.set_index("z")
gdf["z"] = range(10)
gdf = gdf.set_index("z")
pdf_arrow_table = pa.Table.from_pandas(pdf, preserve_index=preserve_index)
gdf_arrow_table = gdf.to_arrow(preserve_index=preserve_index)
assert pa.Table.equals(pdf_arrow_table, gdf_arrow_table)
gdf2 = cudf.DataFrame.from_arrow(pdf_arrow_table)
pdf2 = pdf_arrow_table.to_pandas()
assert_eq(pdf2, gdf2)
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000, 100000])
def test_series_hash_encode(nrows):
data = np.asarray(range(nrows))
# Python hash returns different value which sometimes
# results in enc_with_name_arr and enc_arr to be same.
# And there is no other better way to make hash return same value.
# So using an integer name to get constant value back from hash.
s = cudf.Series(data, name=1)
num_features = 1000
encoded_series = s.hash_encode(num_features)
assert isinstance(encoded_series, cudf.Series)
enc_arr = encoded_series.to_array()
assert np.all(enc_arr >= 0)
assert np.max(enc_arr) < num_features
enc_with_name_arr = s.hash_encode(num_features, use_name=True).to_array()
assert enc_with_name_arr[0] != enc_arr[0]
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
def test_cuda_array_interface(dtype):
np_data = np.arange(10).astype(dtype)
cupy_data = cupy.array(np_data)
pd_data = pd.Series(np_data)
cudf_data = cudf.Series(cupy_data)
assert_eq(pd_data, cudf_data)
gdf = cudf.DataFrame()
gdf["test"] = cupy_data
pd_data.name = "test"
assert_eq(pd_data, gdf["test"])
@pytest.mark.parametrize("nelem", [0, 2, 3, 100])
@pytest.mark.parametrize("nchunks", [1, 2, 5, 10])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow_chunked_arrays(nelem, nchunks, data_type):
np_list_data = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array = pa.chunked_array(np_list_data)
expect = pd.Series(pa_chunk_array.to_pandas())
got = cudf.Series(pa_chunk_array)
assert_eq(expect, got)
np_list_data2 = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array2 = pa.chunked_array(np_list_data2)
pa_table = pa.Table.from_arrays(
[pa_chunk_array, pa_chunk_array2], names=["a", "b"]
)
expect = pa_table.to_pandas()
got = cudf.DataFrame.from_arrow(pa_table)
assert_eq(expect, got)
@pytest.mark.skip(reason="Test was designed to be run in isolation")
def test_gpu_memory_usage_with_boolmask():
ctx = cuda.current_context()
def query_GPU_memory(note=""):
memInfo = ctx.get_memory_info()
usedMemoryGB = (memInfo.total - memInfo.free) / 1e9
return usedMemoryGB
cuda.current_context().deallocations.clear()
nRows = int(1e8)
nCols = 2
dataNumpy = np.asfortranarray(np.random.rand(nRows, nCols))
colNames = ["col" + str(iCol) for iCol in range(nCols)]
pandasDF = pd.DataFrame(data=dataNumpy, columns=colNames, dtype=np.float32)
cudaDF = cudf.core.DataFrame.from_pandas(pandasDF)
boolmask = cudf.Series(np.random.randint(1, 2, len(cudaDF)).astype("bool"))
memory_used = query_GPU_memory()
cudaDF = cudaDF[boolmask]
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col0"].index._values.data_array_view.device_ctypes_pointer
)
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col1"].index._values.data_array_view.device_ctypes_pointer
)
assert memory_used == query_GPU_memory()
def test_boolmask(pdf, gdf):
boolmask = np.random.randint(0, 2, len(pdf)) > 0
gdf = gdf[boolmask]
pdf = pdf[boolmask]
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"mask_shape",
[
(2, "ab"),
(2, "abc"),
(3, "ab"),
(3, "abc"),
(3, "abcd"),
(4, "abc"),
(4, "abcd"),
],
)
def test_dataframe_boolmask(mask_shape):
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.random.randint(0, 10, 3)
pdf_mask = pd.DataFrame()
for col in mask_shape[1]:
pdf_mask[col] = np.random.randint(0, 2, mask_shape[0]) > 0
gdf = cudf.DataFrame.from_pandas(pdf)
gdf_mask = cudf.DataFrame.from_pandas(pdf_mask)
gdf = gdf[gdf_mask]
pdf = pdf[pdf_mask]
assert np.array_equal(gdf.columns, pdf.columns)
for col in gdf.columns:
assert np.array_equal(
gdf[col].fillna(-1).to_pandas().values, pdf[col].fillna(-1).values
)
@pytest.mark.parametrize(
"mask",
[
[True, False, True],
pytest.param(
cudf.Series([True, False, True]),
marks=pytest.mark.xfail(
reason="Pandas can't index a multiindex with a Series"
),
),
],
)
def test_dataframe_multiindex_boolmask(mask):
gdf = cudf.DataFrame(
{"w": [3, 2, 1], "x": [1, 2, 3], "y": [0, 1, 0], "z": [1, 1, 1]}
)
gdg = gdf.groupby(["w", "x"]).count()
pdg = gdg.to_pandas()
assert_eq(gdg[mask], pdg[mask])
def test_dataframe_assignment():
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.array([0, 1, 1, -2, 10])
gdf = cudf.DataFrame.from_pandas(pdf)
gdf[gdf < 0] = 999
pdf[pdf < 0] = 999
assert_eq(gdf, pdf)
def test_1row_arrow_table():
data = [pa.array([0]), pa.array([1])]
batch = pa.RecordBatch.from_arrays(data, ["f0", "f1"])
table = pa.Table.from_batches([batch])
expect = table.to_pandas()
got = cudf.DataFrame.from_arrow(table)
assert_eq(expect, got)
def test_arrow_handle_no_index_name(pdf, gdf):
gdf_arrow = gdf.to_arrow()
pdf_arrow = pa.Table.from_pandas(pdf)
assert pa.Table.equals(pdf_arrow, gdf_arrow)
got = cudf.DataFrame.from_arrow(gdf_arrow)
expect = pdf_arrow.to_pandas()
assert_eq(expect, got)
@pytest.mark.parametrize("num_rows", [1, 3, 10, 100])
@pytest.mark.parametrize("num_bins", [1, 2, 4, 20])
@pytest.mark.parametrize("right", [True, False])
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
@pytest.mark.parametrize("series_bins", [True, False])
def test_series_digitize(num_rows, num_bins, right, dtype, series_bins):
data = np.random.randint(0, 100, num_rows).astype(dtype)
bins = np.unique(np.sort(np.random.randint(2, 95, num_bins).astype(dtype)))
s = cudf.Series(data)
if series_bins:
s_bins = cudf.Series(bins)
indices = s.digitize(s_bins, right)
else:
indices = s.digitize(bins, right)
np.testing.assert_array_equal(
np.digitize(data, bins, right), indices.to_array()
)
def test_series_digitize_invalid_bins():
s = cudf.Series(np.random.randint(0, 30, 80), dtype="int32")
bins = cudf.Series([2, None, None, 50, 90], dtype="int32")
with pytest.raises(
ValueError, match="`bins` cannot contain null entries."
):
_ = s.digitize(bins)
def test_pandas_non_contiguious():
arr1 = np.random.sample([5000, 10])
assert arr1.flags["C_CONTIGUOUS"] is True
df = pd.DataFrame(arr1)
for col in df.columns:
assert df[col].values.flags["C_CONTIGUOUS"] is False
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.to_pandas(), df)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
@pytest.mark.parametrize("null_type", [np.nan, None, "mixed"])
def test_series_all_null(num_elements, null_type):
if null_type == "mixed":
data = []
data1 = [np.nan] * int(num_elements / 2)
data2 = [None] * int(num_elements / 2)
for idx in range(len(data1)):
data.append(data1[idx])
data.append(data2[idx])
else:
data = [null_type] * num_elements
# Typecast Pandas because None will return `object` dtype
expect = pd.Series(data, dtype="float64")
got = cudf.Series(data)
assert_eq(expect, got)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
def test_series_all_valid_nan(num_elements):
data = [np.nan] * num_elements
sr = cudf.Series(data, nan_as_null=False)
np.testing.assert_equal(sr.null_count, 0)
def test_series_rename():
pds = pd.Series([1, 2, 3], name="asdf")
gds = cudf.Series([1, 2, 3], name="asdf")
expect = pds.rename("new_name")
got = gds.rename("new_name")
assert_eq(expect, got)
pds = pd.Series(expect)
gds = cudf.Series(got)
assert_eq(pds, gds)
pds = pd.Series(expect, name="name name")
gds = cudf.Series(got, name="name name")
assert_eq(pds, gds)
@pytest.mark.parametrize("data_type", dtypes)
@pytest.mark.parametrize("nelem", [0, 100])
def test_head_tail(nelem, data_type):
def check_index_equality(left, right):
assert left.index.equals(right.index)
def check_values_equality(left, right):
if len(left) == 0 and len(right) == 0:
return None
np.testing.assert_array_equal(left.to_pandas(), right.to_pandas())
def check_frame_series_equality(left, right):
check_index_equality(left, right)
check_values_equality(left, right)
gdf = cudf.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
check_frame_series_equality(gdf.head(), gdf[:5])
check_frame_series_equality(gdf.head(3), gdf[:3])
check_frame_series_equality(gdf.head(-2), gdf[:-2])
check_frame_series_equality(gdf.head(0), gdf[0:0])
check_frame_series_equality(gdf["a"].head(), gdf["a"][:5])
check_frame_series_equality(gdf["a"].head(3), gdf["a"][:3])
check_frame_series_equality(gdf["a"].head(-2), gdf["a"][:-2])
check_frame_series_equality(gdf.tail(), gdf[-5:])
check_frame_series_equality(gdf.tail(3), gdf[-3:])
check_frame_series_equality(gdf.tail(-2), gdf[2:])
check_frame_series_equality(gdf.tail(0), gdf[0:0])
check_frame_series_equality(gdf["a"].tail(), gdf["a"][-5:])
check_frame_series_equality(gdf["a"].tail(3), gdf["a"][-3:])
check_frame_series_equality(gdf["a"].tail(-2), gdf["a"][2:])
def test_tail_for_string():
gdf = cudf.DataFrame()
gdf["id"] = cudf.Series(["a", "b"], dtype=np.object_)
gdf["v"] = cudf.Series([1, 2])
assert_eq(gdf.tail(3), gdf.to_pandas().tail(3))
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index(pdf, gdf, drop):
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_named_index(pdf, gdf, drop):
pdf.index.name = "cudf"
gdf.index.name = "cudf"
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index_inplace(pdf, gdf, drop):
pdf.reset_index(drop=drop, inplace=True)
gdf.reset_index(drop=drop, inplace=True)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 2, 3, 4, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize(
"index",
[
"a",
["a", "b"],
pd.CategoricalIndex(["I", "II", "III", "IV", "V"]),
pd.Series(["h", "i", "k", "l", "m"]),
["b", pd.Index(["I", "II", "III", "IV", "V"])],
["c", [11, 12, 13, 14, 15]],
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5), # corner case
[pd.Series(["h", "i", "k", "l", "m"]), pd.RangeIndex(0, 5)],
[
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5),
],
],
)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
def test_set_index(data, index, drop, append, inplace):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
expected = pdf.set_index(index, inplace=inplace, drop=drop, append=append)
actual = gdf.set_index(index, inplace=inplace, drop=drop, append=append)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 1, 2, 2, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize("index", ["a", pd.Index([1, 1, 2, 2, 3])])
@pytest.mark.parametrize("verify_integrity", [True])
@pytest.mark.xfail
def test_set_index_verify_integrity(data, index, verify_integrity):
gdf = cudf.DataFrame(data)
gdf.set_index(index, verify_integrity=verify_integrity)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("nelem", [10, 200, 1333])
def test_set_index_multi(drop, nelem):
np.random.seed(0)
a = np.arange(nelem)
np.random.shuffle(a)
df = pd.DataFrame(
{
"a": a,
"b": np.random.randint(0, 4, size=nelem),
"c": np.random.uniform(low=0, high=4, size=nelem),
"d": np.random.choice(["green", "black", "white"], nelem),
}
)
df["e"] = df["d"].astype("category")
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.set_index("a", drop=drop), gdf.set_index(["a"], drop=drop))
assert_eq(
df.set_index(["b", "c"], drop=drop),
gdf.set_index(["b", "c"], drop=drop),
)
assert_eq(
df.set_index(["d", "b"], drop=drop),
gdf.set_index(["d", "b"], drop=drop),
)
assert_eq(
df.set_index(["b", "d", "e"], drop=drop),
gdf.set_index(["b", "d", "e"], drop=drop),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_0(copy):
# TODO (ptaylor): pandas changes `int` dtype to `float64`
# when reindexing and filling new label indices with NaN
gdf = cudf.datasets.randomdata(
nrows=6,
dtypes={
"a": "category",
# 'b': int,
"c": float,
"d": str,
},
)
pdf = gdf.to_pandas()
# Validate reindex returns a copy unmodified
assert_eq(pdf.reindex(copy=True), gdf.reindex(copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_1(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis defaults to 0
assert_eq(pdf.reindex(index, copy=True), gdf.reindex(index, copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_2(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(index, axis=0, copy=True),
gdf.reindex(index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_3(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=0
assert_eq(
pdf.reindex(columns, axis=1, copy=True),
gdf.reindex(columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_4(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(labels=index, axis=0, copy=True),
gdf.reindex(labels=index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_5(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=1
assert_eq(
pdf.reindex(labels=columns, axis=1, copy=True),
gdf.reindex(labels=columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_6(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis='index'
assert_eq(
pdf.reindex(labels=index, axis="index", copy=True),
gdf.reindex(labels=index, axis="index", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_7(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis='columns'
assert_eq(
pdf.reindex(labels=columns, axis="columns", copy=True),
gdf.reindex(labels=columns, axis="columns", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_8(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes labels when index=labels
assert_eq(
pdf.reindex(index=index, copy=True),
gdf.reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_9(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes column names when columns=labels
assert_eq(
pdf.reindex(columns=columns, copy=True),
gdf.reindex(columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_10(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_change_dtype(copy):
if PANDAS_GE_110:
kwargs = {"check_freq": False}
else:
kwargs = {}
index = pd.date_range("12/29/2009", periods=10, freq="D")
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
**kwargs,
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_categorical_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"a": "category"})
pdf = gdf.to_pandas()
assert_eq(pdf["a"].reindex(copy=True), gdf["a"].reindex(copy=copy))
assert_eq(
pdf["a"].reindex(index, copy=True), gdf["a"].reindex(index, copy=copy)
)
assert_eq(
pdf["a"].reindex(index=index, copy=True),
gdf["a"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_float_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"c": float})
pdf = gdf.to_pandas()
assert_eq(pdf["c"].reindex(copy=True), gdf["c"].reindex(copy=copy))
assert_eq(
pdf["c"].reindex(index, copy=True), gdf["c"].reindex(index, copy=copy)
)
assert_eq(
pdf["c"].reindex(index=index, copy=True),
gdf["c"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_string_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"d": str})
pdf = gdf.to_pandas()
assert_eq(pdf["d"].reindex(copy=True), gdf["d"].reindex(copy=copy))
assert_eq(
pdf["d"].reindex(index, copy=True), gdf["d"].reindex(index, copy=copy)
)
assert_eq(
pdf["d"].reindex(index=index, copy=True),
gdf["d"].reindex(index=index, copy=copy),
)
def test_to_frame(pdf, gdf):
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = "foo"
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = False
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(gdf_new_name, pdf_new_name)
assert gdf_new_name.columns[0] is name
def test_dataframe_empty_sort_index():
pdf = pd.DataFrame({"x": []})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.sort_index()
got = gdf.sort_index()
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_sort_index(
axis, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{"b": [1, 3, 2], "a": [1, 4, 3], "c": [4, 1, 5]},
index=[3.0, 1.0, np.nan],
)
gdf = cudf.DataFrame.from_pandas(pdf)
expected = pdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
got = gdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
assert_eq(pdf, gdf)
else:
assert_eq(expected, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize(
"level",
[
0,
"b",
1,
["b"],
"a",
["a", "b"],
["b", "a"],
[0, 1],
[1, 0],
[0, 2],
None,
],
)
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_mulitindex_sort_index(
axis, level, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{
"b": [1.0, 3.0, np.nan],
"a": [1, 4, 3],
1: ["a", "b", "c"],
"e": [3, 1, 4],
"d": [1, 2, 8],
}
).set_index(["b", "a", 1])
gdf = cudf.DataFrame.from_pandas(pdf)
# ignore_index is supported in v.1.0
expected = pdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
inplace=inplace,
na_position=na_position,
)
if ignore_index is True:
expected = expected
got = gdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
if ignore_index is True:
pdf = pdf.reset_index(drop=True)
assert_eq(pdf, gdf)
else:
if ignore_index is True:
expected = expected.reset_index(drop=True)
assert_eq(expected, got)
@pytest.mark.parametrize("dtype", dtypes + ["category"])
def test_dataframe_0_row_dtype(dtype):
if dtype == "category":
data = pd.Series(["a", "b", "c", "d", "e"], dtype="category")
else:
data = np.array([1, 2, 3, 4, 5], dtype=dtype)
expect = cudf.DataFrame()
expect["x"] = data
expect["y"] = data
got = expect.head(0)
for col_name in got.columns:
assert expect[col_name].dtype == got[col_name].dtype
expect = cudf.Series(data)
got = expect.head(0)
assert expect.dtype == got.dtype
@pytest.mark.parametrize("nan_as_null", [True, False])
def test_series_list_nanasnull(nan_as_null):
data = [1.0, 2.0, 3.0, np.nan, None]
expect = pa.array(data, from_pandas=nan_as_null)
got = cudf.Series(data, nan_as_null=nan_as_null).to_arrow()
# Bug in Arrow 0.14.1 where NaNs aren't handled
expect = expect.cast("int64", safe=False)
got = got.cast("int64", safe=False)
assert pa.Array.equals(expect, got)
def test_column_assignment():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float}
)
new_cols = ["q", "r", "s"]
gdf.columns = new_cols
assert list(gdf.columns) == new_cols
def test_select_dtype():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float, "d": str}
)
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("float64"), gdf.select_dtypes("float64"))
assert_eq(pdf.select_dtypes(np.float64), gdf.select_dtypes(np.float64))
assert_eq(
pdf.select_dtypes(include=["float64"]),
gdf.select_dtypes(include=["float64"]),
)
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["int64", "float64"]),
gdf.select_dtypes(include=["int64", "float64"]),
)
assert_eq(
pdf.select_dtypes(include=np.number),
gdf.select_dtypes(include=np.number),
)
assert_eq(
pdf.select_dtypes(include=[np.int64, np.float64]),
gdf.select_dtypes(include=[np.int64, np.float64]),
)
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(exclude=np.number),
gdf.select_dtypes(exclude=np.number),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
rfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
rfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
)
gdf = cudf.DataFrame(
{"A": [3, 4, 5], "C": [1, 2, 3], "D": ["a", "b", "c"]}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["object"], exclude=["category"]),
gdf.select_dtypes(include=["object"], exclude=["category"]),
)
gdf = cudf.DataFrame({"a": range(10), "b": range(10, 20)})
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(include=["float"]),
gdf.select_dtypes(include=["float"]),
)
assert_eq(
pdf.select_dtypes(include=["object"]),
gdf.select_dtypes(include=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"]), gdf.select_dtypes(include=["int"])
)
assert_eq(
pdf.select_dtypes(exclude=["float"]),
gdf.select_dtypes(exclude=["float"]),
)
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes, rfunc=gdf.select_dtypes,
)
gdf = cudf.DataFrame(
{"a": cudf.Series([], dtype="int"), "b": cudf.Series([], dtype="str")}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
def test_select_dtype_datetime():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("datetime64"), gdf.select_dtypes("datetime64"))
assert_eq(
pdf.select_dtypes(np.dtype("datetime64")),
gdf.select_dtypes(np.dtype("datetime64")),
)
assert_eq(
pdf.select_dtypes(include="datetime64"),
gdf.select_dtypes(include="datetime64"),
)
def test_select_dtype_datetime_with_frequency():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_exceptions_equal(
pdf.select_dtypes,
gdf.select_dtypes,
(["datetime64[ms]"],),
(["datetime64[ms]"],),
)
def test_array_ufunc():
gdf = cudf.DataFrame({"x": [2, 3, 4.0], "y": [9.0, 2.5, 1.1]})
pdf = gdf.to_pandas()
assert_eq(np.sqrt(gdf), np.sqrt(pdf))
assert_eq(np.sqrt(gdf.x), np.sqrt(pdf.x))
@pytest.mark.parametrize("nan_value", [-5, -5.0, 0, 5, 5.0, None, "pandas"])
def test_series_to_gpu_array(nan_value):
s = cudf.Series([0, 1, None, 3])
np.testing.assert_array_equal(
s.to_array(nan_value), s.to_gpu_array(nan_value).copy_to_host()
)
def test_dataframe_describe_exclude():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(exclude=["float"])
pdf_results = pdf.describe(exclude=["float"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_include():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include=["int"])
pdf_results = pdf.describe(include=["int"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_default():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe()
pdf_results = pdf.describe()
assert_eq(pdf_results, gdf_results)
def test_series_describe_include_all():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
df["animal"] = np.random.choice(["dog", "cat", "bird"], data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include="all")
pdf_results = pdf.describe(include="all")
assert_eq(gdf_results[["x", "y"]], pdf_results[["x", "y"]])
assert_eq(gdf_results.index, pdf_results.index)
assert_eq(gdf_results.columns, pdf_results.columns)
assert_eq(
gdf_results[["animal"]].fillna(-1).astype("str"),
pdf_results[["animal"]].fillna(-1).astype("str"),
)
def test_dataframe_describe_percentiles():
np.random.seed(12)
data_length = 10000
sample_percentiles = [0.0, 0.1, 0.33, 0.84, 0.4, 0.99]
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(percentiles=sample_percentiles)
pdf_results = pdf.describe(percentiles=sample_percentiles)
assert_eq(pdf_results, gdf_results)
def test_get_numeric_data():
pdf = pd.DataFrame(
{"x": [1, 2, 3], "y": [1.0, 2.0, 3.0], "z": ["a", "b", "c"]}
)
gdf = cudf.from_pandas(pdf)
assert_eq(pdf._get_numeric_data(), gdf._get_numeric_data())
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_shift(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
shifted_outcome = gdf.a.shift(period).fillna(0)
expected_outcome = pdf.a.shift(period).fillna(0).astype(dtype)
if data_empty:
assert_eq(shifted_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(shifted_outcome, expected_outcome)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_diff(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
expected_outcome = pdf.a.diff(period)
diffed_outcome = gdf.a.diff(period).astype(expected_outcome.dtype)
if data_empty:
assert_eq(diffed_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(diffed_outcome, expected_outcome)
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_isnull_isna(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.isnull(), gdf.isnull())
assert_eq(df.isna(), gdf.isna())
# Test individual columns
for col in df:
assert_eq(df[col].isnull(), gdf[col].isnull())
assert_eq(df[col].isna(), gdf[col].isna())
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_notna_notnull(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.notnull(), gdf.notnull())
assert_eq(df.notna(), gdf.notna())
# Test individual columns
for col in df:
assert_eq(df[col].notnull(), gdf[col].notnull())
assert_eq(df[col].notna(), gdf[col].notna())
def test_ndim():
pdf = pd.DataFrame({"x": range(5), "y": range(5, 10)})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.ndim == gdf.ndim
assert pdf.x.ndim == gdf.x.ndim
s = pd.Series(dtype="float64")
gs = cudf.Series()
assert s.ndim == gs.ndim
@pytest.mark.parametrize(
"decimals",
[
-3,
0,
5,
pd.Series([1, 4, 3, -6], index=["w", "x", "y", "z"]),
cudf.Series([-4, -2, 12], index=["x", "y", "z"]),
{"w": -1, "x": 15, "y": 2},
],
)
def test_dataframe_round(decimals):
pdf = pd.DataFrame(
{
"w": np.arange(0.5, 10.5, 1),
"x": np.random.normal(-100, 100, 10),
"y": np.array(
[
14.123,
2.343,
np.nan,
0.0,
-8.302,
np.nan,
94.313,
-112.236,
-8.029,
np.nan,
]
),
"z": np.repeat([-0.6459412758761901], 10),
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
if isinstance(decimals, cudf.Series):
pdecimals = decimals.to_pandas()
else:
pdecimals = decimals
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
# with nulls, maintaining existing null mask
for c in pdf.columns:
arr = pdf[c].to_numpy().astype("float64") # for pandas nulls
arr.ravel()[np.random.choice(10, 5, replace=False)] = np.nan
pdf[c] = gdf[c] = arr
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
for c in gdf.columns:
np.array_equal(gdf[c].nullmask.to_array(), result[c].to_array())
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: all does not "
"support columns of object dtype."
)
],
),
],
)
def test_all(data):
# Pandas treats `None` in object type columns as True for some reason, so
# replacing with `False`
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data).replace(
[None], False
)
gdata = cudf.Series.from_pandas(pdata)
else:
pdata = pd.DataFrame(data, columns=["a", "b"]).replace([None], False)
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.all(bool_only=True)
expected = pdata.all(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.all(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.all(level="a")
got = gdata.all()
expected = pdata.all()
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[0, 0, 0, 0, 0],
[0, 0, None, 0],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: any does not "
"support columns of object dtype."
)
],
),
],
)
@pytest.mark.parametrize("axis", [0, 1])
def test_any(data, axis):
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data)
gdata = cudf.Series.from_pandas(pdata)
if axis == 1:
with pytest.raises(NotImplementedError):
gdata.any(axis=axis)
else:
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
else:
pdata = pd.DataFrame(data, columns=["a", "b"])
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.any(bool_only=True)
expected = pdata.any(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.any(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.any(level="a")
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
@pytest.mark.parametrize("axis", [0, 1])
def test_empty_dataframe_any(axis):
pdf = pd.DataFrame({}, columns=["a", "b"])
gdf = cudf.DataFrame.from_pandas(pdf)
got = gdf.any(axis=axis)
expected = pdf.any(axis=axis)
assert_eq(got, expected, check_index_type=False)
@pytest.mark.parametrize("indexed", [False, True])
def test_dataframe_sizeof(indexed):
rows = int(1e6)
index = list(i for i in range(rows)) if indexed else None
gdf = cudf.DataFrame({"A": [8] * rows, "B": [32] * rows}, index=index)
for c in gdf._data.columns:
assert gdf._index.__sizeof__() == gdf._index.__sizeof__()
cols_sizeof = sum(c.__sizeof__() for c in gdf._data.columns)
assert gdf.__sizeof__() == (gdf._index.__sizeof__() + cols_sizeof)
@pytest.mark.parametrize("a", [[], ["123"]])
@pytest.mark.parametrize("b", ["123", ["123"]])
@pytest.mark.parametrize(
"misc_data",
["123", ["123"] * 20, 123, [1, 2, 0.8, 0.9] * 50, 0.9, 0.00001],
)
@pytest.mark.parametrize("non_list_data", [123, "abc", "zyx", "rapids", 0.8])
def test_create_dataframe_cols_empty_data(a, b, misc_data, non_list_data):
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = b
actual["b"] = b
assert_eq(actual, expected)
expected = pd.DataFrame({"a": []})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = misc_data
actual["b"] = misc_data
assert_eq(actual, expected)
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = non_list_data
actual["b"] = non_list_data
assert_eq(actual, expected)
def test_empty_dataframe_describe():
pdf = pd.DataFrame({"a": [], "b": []})
gdf = cudf.from_pandas(pdf)
expected = pdf.describe()
actual = gdf.describe()
assert_eq(expected, actual)
def test_as_column_types():
col = column.as_column(cudf.Series([]))
assert_eq(col.dtype, np.dtype("float64"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float64"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="float32")
assert_eq(col.dtype, np.dtype("float32"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float32"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="str")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="str"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="object")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="object"))
assert_eq(pds, gds)
pds = pd.Series(np.array([1, 2, 3]), dtype="float32")
gds = cudf.Series(column.as_column(np.array([1, 2, 3]), dtype="float32"))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 3], dtype="float32")
gds = cudf.Series([1, 2, 3], dtype="float32")
assert_eq(pds, gds)
pds = pd.Series([], dtype="float64")
gds = cudf.Series(column.as_column(pds))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 4], dtype="int64")
gds = cudf.Series(column.as_column(cudf.Series([1, 2, 4]), dtype="int64"))
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="float32")
gds = cudf.Series(
column.as_column(cudf.Series([1.2, 18.0, 9.0]), dtype="float32")
)
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="str")
gds = cudf.Series(
column.as_column(cudf.Series([1.2, 18.0, 9.0]), dtype="str")
)
assert_eq(pds, gds)
pds = pd.Series(pd.Index(["1", "18", "9"]), dtype="int")
gds = cudf.Series(
cudf.core.index.StringIndex(["1", "18", "9"]), dtype="int"
)
assert_eq(pds, gds)
def test_one_row_head():
gdf = cudf.DataFrame({"name": ["carl"], "score": [100]}, index=[123])
pdf = gdf.to_pandas()
head_gdf = gdf.head()
head_pdf = pdf.head()
assert_eq(head_pdf, head_gdf)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric(dtype, as_dtype):
psr = pd.Series([1, 2, 4, 3], dtype=dtype)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric_nulls(dtype, as_dtype):
data = [1, 2, None, 3]
sr = cudf.Series(data, dtype=dtype)
got = sr.astype(as_dtype)
expect = cudf.Series([1, 2, None, 3], dtype=as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_numeric_to_other(dtype, as_dtype):
psr = pd.Series([1, 2, 3], dtype=dtype)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_string_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05"]
else:
data = ["1", "2", "3"]
psr = pd.Series(data)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_datetime_to_other(as_dtype):
data = ["2001-01-01", "2002-02-02", "2001-01-05"]
psr = pd.Series(data)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"inp",
[
("datetime64[ns]", "2011-01-01 00:00:00.000000000"),
("datetime64[us]", "2011-01-01 00:00:00.000000"),
("datetime64[ms]", "2011-01-01 00:00:00.000"),
("datetime64[s]", "2011-01-01 00:00:00"),
],
)
def test_series_astype_datetime_to_string(inp):
dtype, expect = inp
base_date = "2011-01-01"
sr = cudf.Series([base_date], dtype=dtype)
got = sr.astype(str)[0]
assert expect == got
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
"str",
],
)
def test_series_astype_categorical_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05", "2001-01-01"]
else:
data = [1, 2, 3, 1]
psr = pd.Series(data, dtype="category")
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_to_categorical_ordered(ordered):
psr = pd.Series([1, 2, 3, 1], dtype="category")
gsr = cudf.from_pandas(psr)
ordered_dtype_pd = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=ordered
)
ordered_dtype_gd = cudf.CategoricalDtype.from_pandas(ordered_dtype_pd)
assert_eq(
psr.astype("int32").astype(ordered_dtype_pd).astype("int32"),
gsr.astype("int32").astype(ordered_dtype_gd).astype("int32"),
)
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_cat_ordered_to_unordered(ordered):
pd_dtype = pd.CategoricalDtype(categories=[1, 2, 3], ordered=ordered)
pd_to_dtype = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=not ordered
)
gd_dtype = cudf.CategoricalDtype.from_pandas(pd_dtype)
gd_to_dtype = cudf.CategoricalDtype.from_pandas(pd_to_dtype)
psr = pd.Series([1, 2, 3], dtype=pd_dtype)
gsr = cudf.Series([1, 2, 3], dtype=gd_dtype)
expect = psr.astype(pd_to_dtype)
got = gsr.astype(gd_to_dtype)
assert_eq(expect, got)
def test_series_astype_null_cases():
data = [1, 2, None, 3]
# numerical to other
assert_eq(cudf.Series(data, dtype="str"), cudf.Series(data).astype("str"))
assert_eq(
cudf.Series(data, dtype="category"),
cudf.Series(data).astype("category"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="int32").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="uint32").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="datetime64[ms]"),
cudf.Series(data).astype("datetime64[ms]"),
)
# categorical to other
assert_eq(
cudf.Series(data, dtype="str"),
cudf.Series(data, dtype="category").astype("str"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="category").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="datetime64[ms]"),
cudf.Series(data, dtype="category").astype("datetime64[ms]"),
)
# string to other
assert_eq(
cudf.Series([1, 2, None, 3], dtype="int32"),
cudf.Series(["1", "2", None, "3"]).astype("int32"),
)
assert_eq(
cudf.Series(
["2001-01-01", "2001-02-01", None, "2001-03-01"],
dtype="datetime64[ms]",
),
cudf.Series(["2001-01-01", "2001-02-01", None, "2001-03-01"]).astype(
"datetime64[ms]"
),
)
assert_eq(
cudf.Series(["a", "b", "c", None], dtype="category").to_pandas(),
cudf.Series(["a", "b", "c", None]).astype("category").to_pandas(),
)
# datetime to other
data = [
"2001-01-01 00:00:00.000000",
"2001-02-01 00:00:00.000000",
None,
"2001-03-01 00:00:00.000000",
]
assert_eq(
cudf.Series(data),
cudf.Series(data, dtype="datetime64[us]").astype("str"),
)
assert_eq(
pd.Series(data, dtype="datetime64[ns]").astype("category"),
cudf.from_pandas(pd.Series(data, dtype="datetime64[ns]")).astype(
"category"
),
)
def test_series_astype_null_categorical():
sr = cudf.Series([None, None, None], dtype="category")
expect = cudf.Series([None, None, None], dtype="int32")
got = sr.astype("int32")
assert_eq(expect, got)
@pytest.mark.parametrize(
"data",
[
(
pd.Series([3, 3.0]),
pd.Series([2.3, 3.9]),
pd.Series([1.5, 3.9]),
pd.Series([1.0, 2]),
),
[
pd.Series([3, 3.0]),
pd.Series([2.3, 3.9]),
pd.Series([1.5, 3.9]),
pd.Series([1.0, 2]),
],
],
)
def test_create_dataframe_from_list_like(data):
pdf = pd.DataFrame(data, index=["count", "mean", "std", "min"])
gdf = cudf.DataFrame(data, index=["count", "mean", "std", "min"])
assert_eq(pdf, gdf)
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def test_create_dataframe_column():
pdf = pd.DataFrame(columns=["a", "b", "c"], index=["A", "Z", "X"])
gdf = cudf.DataFrame(columns=["a", "b", "c"], index=["A", "Z", "X"])
assert_eq(pdf, gdf)
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": [2, 3, 5]},
columns=["a", "b", "c"],
index=["A", "Z", "X"],
)
gdf = cudf.DataFrame(
{"a": [1, 2, 3], "b": [2, 3, 5]},
columns=["a", "b", "c"],
index=["A", "Z", "X"],
)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
[1, 2, 4],
[],
[5.0, 7.0, 8.0],
pd.Categorical(["a", "b", "c"]),
["m", "a", "d", "v"],
],
)
def test_series_values_host_property(data):
pds = cudf.utils.utils._create_pandas_series(data=data)
gds = cudf.Series(data)
np.testing.assert_array_equal(pds.values, gds.values_host)
@pytest.mark.parametrize(
"data",
[
[1, 2, 4],
[],
[5.0, 7.0, 8.0],
pytest.param(
pd.Categorical(["a", "b", "c"]),
marks=pytest.mark.xfail(raises=NotImplementedError),
),
pytest.param(
["m", "a", "d", "v"],
marks=pytest.mark.xfail(raises=NotImplementedError),
),
],
)
def test_series_values_property(data):
pds = cudf.utils.utils._create_pandas_series(data=data)
gds = cudf.Series(data)
gds_vals = gds.values
assert isinstance(gds_vals, cupy.ndarray)
np.testing.assert_array_equal(gds_vals.get(), pds.values)
@pytest.mark.parametrize(
"data",
[
{"A": [1, 2, 3], "B": [4, 5, 6]},
{"A": [1.0, 2.0, 3.0], "B": [4.0, 5.0, 6.0]},
{"A": [1, 2, 3], "B": [1.0, 2.0, 3.0]},
{"A": np.float32(np.arange(3)), "B": np.float64(np.arange(3))},
pytest.param(
{"A": [1, None, 3], "B": [1, 2, None]},
marks=pytest.mark.xfail(
reason="Nulls not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": [None, None, None], "B": [None, None, None]},
marks=pytest.mark.xfail(
reason="Nulls not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": [], "B": []},
marks=pytest.mark.xfail(reason="Requires at least 1 row"),
),
pytest.param(
{"A": [1, 2, 3], "B": ["a", "b", "c"]},
marks=pytest.mark.xfail(
reason="str or categorical not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": pd.Categorical(["a", "b", "c"]), "B": ["d", "e", "f"]},
marks=pytest.mark.xfail(
reason="str or categorical not supported by as_gpu_matrix"
),
),
],
)
def test_df_values_property(data):
pdf = pd.DataFrame.from_dict(data)
gdf = cudf.DataFrame.from_pandas(pdf)
pmtr = pdf.values
gmtr = gdf.values.get()
np.testing.assert_array_equal(pmtr, gmtr)
def test_value_counts():
pdf = pd.DataFrame(
{
"numeric": [1, 2, 3, 4, 5, 6, 1, 2, 4] * 10,
"alpha": ["u", "h", "d", "a", "m", "u", "h", "d", "a"] * 10,
}
)
gdf = cudf.DataFrame(
{
"numeric": [1, 2, 3, 4, 5, 6, 1, 2, 4] * 10,
"alpha": ["u", "h", "d", "a", "m", "u", "h", "d", "a"] * 10,
}
)
assert_eq(
pdf.numeric.value_counts().sort_index(),
gdf.numeric.value_counts().sort_index(),
check_dtype=False,
)
assert_eq(
pdf.alpha.value_counts().sort_index(),
gdf.alpha.value_counts().sort_index(),
check_dtype=False,
)
@pytest.mark.parametrize(
"data",
[
[],
[0, 12, 14],
[0, 14, 12, 12, 3, 10, 12, 14],
np.random.randint(-100, 100, 200),
pd.Series([0.0, 1.0, None, 10.0]),
[None, None, None, None],
[np.nan, None, -1, 2, 3],
],
)
@pytest.mark.parametrize(
"values",
[
np.random.randint(-100, 100, 10),
[],
[np.nan, None, -1, 2, 3],
[1.0, 12.0, None, None, 120],
[0, 14, 12, 12, 3, 10, 12, 14, None],
[None, None, None],
["0", "12", "14"],
["0", "12", "14", "a"],
],
)
def test_isin_numeric(data, values):
index = np.random.randint(0, 100, len(data))
psr = cudf.utils.utils._create_pandas_series(data=data, index=index)
gsr = cudf.Series.from_pandas(psr, nan_as_null=False)
expected = psr.isin(values)
got = gsr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(
["2018-01-01", "2019-04-03", None, "2019-12-30"],
dtype="datetime64[ns]",
),
pd.Series(
[
"2018-01-01",
"2019-04-03",
None,
"2019-12-30",
"2018-01-01",
"2018-01-01",
],
dtype="datetime64[ns]",
),
],
)
@pytest.mark.parametrize(
"values",
[
[],
[1514764800000000000, 1577664000000000000],
[
1514764800000000000,
1577664000000000000,
1577664000000000000,
1577664000000000000,
1514764800000000000,
],
["2019-04-03", "2019-12-30", "2012-01-01"],
[
"2012-01-01",
"2012-01-01",
"2012-01-01",
"2019-04-03",
"2019-12-30",
"2012-01-01",
],
],
)
def test_isin_datetime(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(["this", "is", None, "a", "test"]),
pd.Series(["test", "this", "test", "is", None, "test", "a", "test"]),
pd.Series(["0", "12", "14"]),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["this", "is"],
[None, None, None],
["12", "14", "19"],
pytest.param(
[12, 14, 19],
marks=pytest.mark.xfail(
not PANDAS_GE_120,
reason="pandas's failure here seems like a bug(in < 1.2) "
"given the reverse succeeds",
),
),
["is", "this", "is", "this", "is"],
],
)
def test_isin_string(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(["a", "b", "c", "c", "c", "d", "e"], dtype="category"),
pd.Series(["a", "b", None, "c", "d", "e"], dtype="category"),
pd.Series([0, 3, 10, 12], dtype="category"),
pd.Series([0, 3, 10, 12, 0, 10, 3, 0, 0, 3, 3], dtype="category"),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["a", "b", None, "f", "words"],
["0", "12", None, "14"],
[0, 10, 12, None, 39, 40, 1000],
[0, 0, 0, 0, 3, 3, 3, None, 1, 2, 3],
],
)
def test_isin_categorical(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(
["this", "is", None, "a", "test"], index=["a", "b", "c", "d", "e"]
),
pd.Series([0, 15, 10], index=[0, None, 9]),
pd.Series(
range(25),
index=pd.date_range(
start="2019-01-01", end="2019-01-02", freq="H"
),
),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["this", "is"],
[0, 19, 13],
["2019-01-01 04:00:00", "2019-01-01 06:00:00", "2018-03-02"],
],
)
def test_isin_index(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.index.isin(values)
expected = psr.index.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
pd.MultiIndex.from_arrays(
[[1, 2, 3], ["red", "blue", "green"]], names=("number", "color")
),
pd.MultiIndex.from_arrays([[], []], names=("number", "color")),
pd.MultiIndex.from_arrays(
[[1, 2, 3, 10, 100], ["red", "blue", "green", "pink", "white"]],
names=("number", "color"),
),
],
)
@pytest.mark.parametrize(
"values,level,err",
[
(["red", "orange", "yellow"], "color", None),
(["red", "white", "yellow"], "color", None),
([0, 1, 2, 10, 11, 15], "number", None),
([0, 1, 2, 10, 11, 15], None, TypeError),
(pd.Series([0, 1, 2, 10, 11, 15]), None, TypeError),
(pd.Index([0, 1, 2, 10, 11, 15]), None, TypeError),
(pd.Index([0, 1, 2, 8, 11, 15]), "number", None),
(pd.Index(["red", "white", "yellow"]), "color", None),
([(1, "red"), (3, "red")], None, None),
(((1, "red"), (3, "red")), None, None),
(
pd.MultiIndex.from_arrays(
[[1, 2, 3], ["red", "blue", "green"]],
names=("number", "color"),
),
None,
None,
),
(
pd.MultiIndex.from_arrays([[], []], names=("number", "color")),
None,
None,
),
(
pd.MultiIndex.from_arrays(
[
[1, 2, 3, 10, 100],
["red", "blue", "green", "pink", "white"],
],
names=("number", "color"),
),
None,
None,
),
],
)
def test_isin_multiindex(data, values, level, err):
pmdx = data
gmdx = cudf.from_pandas(data)
if err is None:
expected = pmdx.isin(values, level=level)
if isinstance(values, pd.MultiIndex):
values = cudf.from_pandas(values)
got = gmdx.isin(values, level=level)
assert_eq(got, expected)
else:
assert_exceptions_equal(
lfunc=pmdx.isin,
rfunc=gmdx.isin,
lfunc_args_and_kwargs=([values], {"level": level}),
rfunc_args_and_kwargs=([values], {"level": level}),
check_exception_type=False,
expected_error_message=re.escape(
"values need to be a Multi-Index or set/list-like tuple "
"squences when `level=None`."
),
)
@pytest.mark.parametrize(
"data",
[
pd.DataFrame(
{
"num_legs": [2, 4],
"num_wings": [2, 0],
"bird_cats": pd.Series(
["sparrow", "pigeon"],
dtype="category",
index=["falcon", "dog"],
),
},
index=["falcon", "dog"],
),
pd.DataFrame(
{"num_legs": [8, 2], "num_wings": [0, 2]},
index=["spider", "falcon"],
),
pd.DataFrame(
{
"num_legs": [8, 2, 1, 0, 2, 4, 5],
"num_wings": [2, 0, 2, 1, 2, 4, -1],
}
),
],
)
@pytest.mark.parametrize(
"values",
[
[0, 2],
{"num_wings": [0, 3]},
pd.DataFrame(
{"num_legs": [8, 2], "num_wings": [0, 2]},
index=["spider", "falcon"],
),
pd.DataFrame(
{
"num_legs": [2, 4],
"num_wings": [2, 0],
"bird_cats": pd.Series(
["sparrow", "pigeon"],
dtype="category",
index=["falcon", "dog"],
),
},
index=["falcon", "dog"],
),
["sparrow", "pigeon"],
pd.Series(["sparrow", "pigeon"], dtype="category"),
pd.Series([1, 2, 3, 4, 5]),
"abc",
123,
],
)
def test_isin_dataframe(data, values):
pdf = data
gdf = cudf.from_pandas(pdf)
if cudf.utils.dtypes.is_scalar(values):
assert_exceptions_equal(
lfunc=pdf.isin,
rfunc=gdf.isin,
lfunc_args_and_kwargs=([values],),
rfunc_args_and_kwargs=([values],),
)
else:
try:
expected = pdf.isin(values)
except ValueError as e:
if str(e) == "Lengths must match.":
pytest.xfail(
not PANDAS_GE_110,
"https://github.com/pandas-dev/pandas/issues/34256",
)
if isinstance(values, (pd.DataFrame, pd.Series)):
values = cudf.from_pandas(values)
got = gdf.isin(values)
assert_eq(got, expected)
def test_constructor_properties():
df = cudf.DataFrame()
key1 = "a"
key2 = "b"
val1 = np.array([123], dtype=np.float64)
val2 = np.array([321], dtype=np.float64)
df[key1] = val1
df[key2] = val2
# Correct use of _constructor (for DataFrame)
assert_eq(df, df._constructor({key1: val1, key2: val2}))
# Correct use of _constructor (for cudf.Series)
assert_eq(df[key1], df[key2]._constructor(val1, name=key1))
# Correct use of _constructor_sliced (for DataFrame)
assert_eq(df[key1], df._constructor_sliced(val1, name=key1))
# Correct use of _constructor_expanddim (for cudf.Series)
assert_eq(df, df[key2]._constructor_expanddim({key1: val1, key2: val2}))
# Incorrect use of _constructor_sliced (Raises for cudf.Series)
with pytest.raises(NotImplementedError):
df[key1]._constructor_sliced
# Incorrect use of _constructor_expanddim (Raises for DataFrame)
with pytest.raises(NotImplementedError):
df._constructor_expanddim
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", ALL_TYPES)
def test_df_astype_numeric_to_all(dtype, as_dtype):
if "uint" in dtype:
data = [1, 2, None, 4, 7]
elif "int" in dtype or "longlong" in dtype:
data = [1, 2, None, 4, -7]
elif "float" in dtype:
data = [1.0, 2.0, None, 4.0, np.nan, -7.0]
gdf = cudf.DataFrame()
gdf["foo"] = cudf.Series(data, dtype=dtype)
gdf["bar"] = cudf.Series(data, dtype=dtype)
insert_data = cudf.Series(data, dtype=dtype)
expect = cudf.DataFrame()
expect["foo"] = insert_data.astype(as_dtype)
expect["bar"] = insert_data.astype(as_dtype)
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_df_astype_string_to_other(as_dtype):
if "datetime64" in as_dtype:
# change None to "NaT" after this issue is fixed:
# https://github.com/rapidsai/cudf/issues/5117
data = ["2001-01-01", "2002-02-02", "2000-01-05", None]
elif as_dtype == "int32":
data = [1, 2, 3]
elif as_dtype == "category":
data = ["1", "2", "3", None]
elif "float" in as_dtype:
data = [1.0, 2.0, 3.0, np.nan]
insert_data = cudf.Series.from_pandas(pd.Series(data, dtype="str"))
expect_data = cudf.Series(data, dtype=as_dtype)
gdf = cudf.DataFrame()
expect = cudf.DataFrame()
gdf["foo"] = insert_data
gdf["bar"] = insert_data
expect["foo"] = expect_data
expect["bar"] = expect_data
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int64",
"datetime64[s]",
"datetime64[us]",
"datetime64[ns]",
"str",
"category",
],
)
def test_df_astype_datetime_to_other(as_dtype):
data = [
"1991-11-20 00:00:00.000",
"2004-12-04 00:00:00.000",
"2016-09-13 00:00:00.000",
None,
]
gdf = cudf.DataFrame()
expect = cudf.DataFrame()
gdf["foo"] = cudf.Series(data, dtype="datetime64[ms]")
gdf["bar"] = cudf.Series(data, dtype="datetime64[ms]")
if as_dtype == "int64":
expect["foo"] = cudf.Series(
[690595200000, 1102118400000, 1473724800000, None], dtype="int64"
)
expect["bar"] = cudf.Series(
[690595200000, 1102118400000, 1473724800000, None], dtype="int64"
)
elif as_dtype == "str":
expect["foo"] = cudf.Series(data, dtype="str")
expect["bar"] = cudf.Series(data, dtype="str")
elif as_dtype == "category":
expect["foo"] = cudf.Series(gdf["foo"], dtype="category")
expect["bar"] = cudf.Series(gdf["bar"], dtype="category")
else:
expect["foo"] = cudf.Series(data, dtype=as_dtype)
expect["bar"] = cudf.Series(data, dtype=as_dtype)
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
"str",
],
)
def test_df_astype_categorical_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05", "2001-01-01"]
else:
data = [1, 2, 3, 1]
psr = pd.Series(data, dtype="category")
pdf = pd.DataFrame()
pdf["foo"] = psr
pdf["bar"] = psr
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(pdf.astype(as_dtype), gdf.astype(as_dtype))
@pytest.mark.parametrize("ordered", [True, False])
def test_df_astype_to_categorical_ordered(ordered):
psr = pd.Series([1, 2, 3, 1], dtype="category")
pdf = pd.DataFrame()
pdf["foo"] = psr
pdf["bar"] = psr
gdf = cudf.DataFrame.from_pandas(pdf)
ordered_dtype_pd = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=ordered
)
ordered_dtype_gd = cudf.CategoricalDtype.from_pandas(ordered_dtype_pd)
assert_eq(
pdf.astype(ordered_dtype_pd).astype("int32"),
gdf.astype(ordered_dtype_gd).astype("int32"),
)
@pytest.mark.parametrize(
"dtype,args",
[(dtype, {}) for dtype in ALL_TYPES]
+ [("category", {"ordered": True}), ("category", {"ordered": False})],
)
def test_empty_df_astype(dtype, args):
df = cudf.DataFrame()
kwargs = {}
kwargs.update(args)
assert_eq(df, df.astype(dtype=dtype, **kwargs))
@pytest.mark.parametrize(
"errors",
[
pytest.param(
"raise", marks=pytest.mark.xfail(reason="should raise error here")
),
pytest.param("other", marks=pytest.mark.xfail(raises=ValueError)),
"ignore",
pytest.param(
"warn", marks=pytest.mark.filterwarnings("ignore:Traceback")
),
],
)
def test_series_astype_error_handling(errors):
sr = cudf.Series(["random", "words"])
got = sr.astype("datetime64", errors=errors)
assert_eq(sr, got)
@pytest.mark.parametrize("dtype", ALL_TYPES)
def test_df_constructor_dtype(dtype):
if "datetime" in dtype:
data = ["1991-11-20", "2004-12-04", "2016-09-13", None]
elif dtype == "str":
data = ["a", "b", "c", None]
elif "float" in dtype:
data = [1.0, 0.5, -1.1, np.nan, None]
elif "bool" in dtype:
data = [True, False, None]
else:
data = [1, 2, 3, None]
sr = cudf.Series(data, dtype=dtype)
expect = cudf.DataFrame()
expect["foo"] = sr
expect["bar"] = sr
got = cudf.DataFrame({"foo": data, "bar": data}, dtype=dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"data",
[
cudf.datasets.randomdata(
nrows=10, dtypes={"a": "category", "b": int, "c": float, "d": int}
),
cudf.datasets.randomdata(
nrows=10, dtypes={"a": "category", "b": int, "c": float, "d": str}
),
cudf.datasets.randomdata(
nrows=10, dtypes={"a": bool, "b": int, "c": float, "d": str}
),
cudf.DataFrame(),
cudf.DataFrame({"a": [0, 1, 2], "b": [1, None, 3]}),
cudf.DataFrame(
{
"a": [1, 2, 3, 4],
"b": [7, np.NaN, 9, 10],
"c": [np.NaN, np.NaN, np.NaN, np.NaN],
"d": cudf.Series([None, None, None, None], dtype="int64"),
"e": [100, None, 200, None],
"f": cudf.Series([10, None, np.NaN, 11], nan_as_null=False),
}
),
cudf.DataFrame(
{
"a": [10, 11, 12, 13, 14, 15],
"b": cudf.Series(
[10, None, np.NaN, 2234, None, np.NaN], nan_as_null=False
),
}
),
],
)
@pytest.mark.parametrize(
"op", ["max", "min", "sum", "product", "mean", "var", "std"]
)
@pytest.mark.parametrize("skipna", [True, False])
def test_rowwise_ops(data, op, skipna):
gdf = data
pdf = gdf.to_pandas()
if op in ("var", "std"):
expected = getattr(pdf, op)(axis=1, ddof=0, skipna=skipna)
got = getattr(gdf, op)(axis=1, ddof=0, skipna=skipna)
else:
expected = getattr(pdf, op)(axis=1, skipna=skipna)
got = getattr(gdf, op)(axis=1, skipna=skipna)
assert_eq(expected, got, check_exact=False)
@pytest.mark.parametrize(
"op", ["max", "min", "sum", "product", "mean", "var", "std"]
)
def test_rowwise_ops_nullable_dtypes_all_null(op):
gdf = cudf.DataFrame(
{
"a": [1, 2, 3, 4],
"b": [7, np.NaN, 9, 10],
"c": [np.NaN, np.NaN, np.NaN, np.NaN],
"d": cudf.Series([None, None, None, None], dtype="int64"),
"e": [100, None, 200, None],
"f": cudf.Series([10, None, np.NaN, 11], nan_as_null=False),
}
)
expected = cudf.Series([None, None, None, None], dtype="float64")
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"op,expected",
[
(
"max",
cudf.Series(
[10.0, None, np.NaN, 2234.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"min",
cudf.Series(
[10.0, None, np.NaN, 13.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"sum",
cudf.Series(
[20.0, None, np.NaN, 2247.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"product",
cudf.Series(
[100.0, None, np.NaN, 29042.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"mean",
cudf.Series(
[10.0, None, np.NaN, 1123.5, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"var",
cudf.Series(
[0.0, None, np.NaN, 1233210.25, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"std",
cudf.Series(
[0.0, None, np.NaN, 1110.5, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
],
)
def test_rowwise_ops_nullable_dtypes_partial_null(op, expected):
gdf = cudf.DataFrame(
{
"a": [10, 11, 12, 13, 14, 15],
"b": cudf.Series(
[10, None, np.NaN, 2234, None, np.NaN], nan_as_null=False,
),
}
)
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"op,expected",
[
(
"max",
cudf.Series([10, None, None, 2234, None, 453], dtype="int64",),
),
("min", cudf.Series([10, None, None, 13, None, 15], dtype="int64",),),
(
"sum",
cudf.Series([20, None, None, 2247, None, 468], dtype="int64",),
),
(
"product",
cudf.Series([100, None, None, 29042, None, 6795], dtype="int64",),
),
(
"mean",
cudf.Series(
[10.0, None, None, 1123.5, None, 234.0], dtype="float32",
),
),
(
"var",
cudf.Series(
[0.0, None, None, 1233210.25, None, 47961.0], dtype="float32",
),
),
(
"std",
cudf.Series(
[0.0, None, None, 1110.5, None, 219.0], dtype="float32",
),
),
],
)
def test_rowwise_ops_nullable_int_dtypes(op, expected):
gdf = cudf.DataFrame(
{
"a": [10, 11, None, 13, None, 15],
"b": cudf.Series(
[10, None, 323, 2234, None, 453], nan_as_null=False,
),
}
)
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ms]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ns]"
),
"t3": cudf.Series(
["1960-08-31 06:00:00", "2030-08-02 10:00:00"], dtype="<M8[s]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[us]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(["1940-08-31 06:00:00", None], dtype="<M8[ms]"),
"i1": cudf.Series([1001, 2002], dtype="int64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
"f1": cudf.Series([-100.001, 123.456], dtype="float64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
"f1": cudf.Series([-100.001, 123.456], dtype="float64"),
"b1": cudf.Series([True, False], dtype="bool"),
},
],
)
@pytest.mark.parametrize("op", ["max", "min"])
@pytest.mark.parametrize("skipna", [True, False])
def test_rowwise_ops_datetime_dtypes(data, op, skipna):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
got = getattr(gdf, op)(axis=1, skipna=skipna)
expected = getattr(pdf, op)(axis=1, skipna=skipna)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data,op,skipna",
[
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"max",
True,
),
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"min",
False,
),
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"min",
True,
),
],
)
def test_rowwise_ops_datetime_dtypes_2(data, op, skipna):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
got = getattr(gdf, op)(axis=1, skipna=skipna)
expected = getattr(pdf, op)(axis=1, skipna=skipna)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
(
{
"t1": pd.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ns]",
),
"t2": pd.Series(
["1940-08-31 06:00:00", pd.NaT], dtype="<M8[ns]"
),
}
)
],
)
def test_rowwise_ops_datetime_dtypes_pdbug(data):
pdf = pd.DataFrame(data)
gdf = cudf.from_pandas(pdf)
expected = pdf.max(axis=1, skipna=False)
got = gdf.max(axis=1, skipna=False)
if PANDAS_GE_120:
assert_eq(got, expected)
else:
# PANDAS BUG: https://github.com/pandas-dev/pandas/issues/36907
with pytest.raises(AssertionError, match="numpy array are different"):
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[5.0, 6.0, 7.0],
"single value",
np.array(1, dtype="int64"),
np.array(0.6273643, dtype="float64"),
],
)
def test_insert(data):
pdf = pd.DataFrame.from_dict({"A": [1, 2, 3], "B": ["a", "b", "c"]})
gdf = cudf.DataFrame.from_pandas(pdf)
# insertion by index
pdf.insert(0, "foo", data)
gdf.insert(0, "foo", data)
assert_eq(pdf, gdf)
pdf.insert(3, "bar", data)
gdf.insert(3, "bar", data)
assert_eq(pdf, gdf)
pdf.insert(1, "baz", data)
gdf.insert(1, "baz", data)
assert_eq(pdf, gdf)
# pandas insert doesn't support negative indexing
pdf.insert(len(pdf.columns), "qux", data)
gdf.insert(-1, "qux", data)
assert_eq(pdf, gdf)
def test_cov():
gdf = cudf.datasets.randomdata(10)
pdf = gdf.to_pandas()
assert_eq(pdf.cov(), gdf.cov())
@pytest.mark.xfail(reason="cupy-based cov does not support nulls")
def test_cov_nans():
pdf = pd.DataFrame()
pdf["a"] = [None, None, None, 2.00758632, None]
pdf["b"] = [0.36403686, None, None, None, None]
pdf["c"] = [None, None, None, 0.64882227, None]
pdf["d"] = [None, -1.46863125, None, 1.22477948, -0.06031689]
gdf = cudf.from_pandas(pdf)
assert_eq(pdf.cov(), gdf.cov())
@pytest.mark.parametrize(
"gsr",
[
cudf.Series([4, 2, 3]),
cudf.Series([4, 2, 3], index=["a", "b", "c"]),
cudf.Series([4, 2, 3], index=["a", "b", "d"]),
cudf.Series([4, 2], index=["a", "b"]),
cudf.Series([4, 2, 3], index=cudf.core.index.RangeIndex(0, 3)),
pytest.param(
cudf.Series([4, 2, 3, 4, 5], index=["a", "b", "d", "0", "12"]),
marks=pytest.mark.xfail,
),
],
)
@pytest.mark.parametrize("colnames", [["a", "b", "c"], [0, 1, 2]])
@pytest.mark.parametrize(
"op",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_df_sr_binop(gsr, colnames, op):
data = [[3.0, 2.0, 5.0], [3.0, None, 5.0], [6.0, 7.0, np.nan]]
data = dict(zip(colnames, data))
gsr = gsr.astype("float64")
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas(nullable=True)
psr = gsr.to_pandas(nullable=True)
expect = op(pdf, psr)
got = op(gdf, gsr).to_pandas(nullable=True)
assert_eq(expect, got, check_dtype=False)
expect = op(psr, pdf)
got = op(gsr, gdf).to_pandas(nullable=True)
assert_eq(expect, got, check_dtype=False)
@pytest.mark.parametrize(
"op",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
# comparison ops will temporarily XFAIL
# see PR https://github.com/rapidsai/cudf/pull/7491
pytest.param(operator.eq, marks=pytest.mark.xfail()),
pytest.param(operator.lt, marks=pytest.mark.xfail()),
pytest.param(operator.le, marks=pytest.mark.xfail()),
pytest.param(operator.gt, marks=pytest.mark.xfail()),
pytest.param(operator.ge, marks=pytest.mark.xfail()),
pytest.param(operator.ne, marks=pytest.mark.xfail()),
],
)
@pytest.mark.parametrize(
"gsr", [cudf.Series([1, 2, 3, 4, 5], index=["a", "b", "d", "0", "12"])]
)
def test_df_sr_binop_col_order(gsr, op):
colnames = [0, 1, 2]
data = [[0, 2, 5], [3, None, 5], [6, 7, np.nan]]
data = dict(zip(colnames, data))
gdf = cudf.DataFrame(data)
pdf = pd.DataFrame.from_dict(data)
psr = gsr.to_pandas()
expect = op(pdf, psr).astype("float")
out = op(gdf, gsr).astype("float")
got = out[expect.columns]
assert_eq(expect, got)
@pytest.mark.parametrize("set_index", [None, "A", "C", "D"])
@pytest.mark.parametrize("index", [True, False])
@pytest.mark.parametrize("deep", [True, False])
def test_memory_usage(deep, index, set_index):
# Testing numerical/datetime by comparing with pandas
# (string and categorical columns will be different)
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int64"),
"B": np.arange(rows, dtype="int32"),
"C": np.arange(rows, dtype="float64"),
}
)
df["D"] = pd.to_datetime(df.A)
if set_index:
df = df.set_index(set_index)
gdf = cudf.from_pandas(df)
if index and set_index is None:
# Special Case: Assume RangeIndex size == 0
assert gdf.index.memory_usage(deep=deep) == 0
else:
# Check for Series only
assert df["B"].memory_usage(index=index, deep=deep) == gdf[
"B"
].memory_usage(index=index, deep=deep)
# Check for entire DataFrame
assert_eq(
df.memory_usage(index=index, deep=deep).sort_index(),
gdf.memory_usage(index=index, deep=deep).sort_index(),
)
@pytest.mark.xfail
def test_memory_usage_string():
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(["apple", "banana", "orange"], rows),
}
)
gdf = cudf.from_pandas(df)
# Check deep=False (should match pandas)
assert gdf.B.memory_usage(deep=False, index=False) == df.B.memory_usage(
deep=False, index=False
)
# Check string column
assert gdf.B.memory_usage(deep=True, index=False) == df.B.memory_usage(
deep=True, index=False
)
# Check string index
assert gdf.set_index("B").index.memory_usage(
deep=True
) == df.B.memory_usage(deep=True, index=False)
def test_memory_usage_cat():
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(["apple", "banana", "orange"], rows),
}
)
df["B"] = df.B.astype("category")
gdf = cudf.from_pandas(df)
expected = (
gdf.B._column.cat().categories.__sizeof__()
+ gdf.B._column.cat().codes.__sizeof__()
)
# Check cat column
assert gdf.B.memory_usage(deep=True, index=False) == expected
# Check cat index
assert gdf.set_index("B").index.memory_usage(deep=True) == expected
def test_memory_usage_list():
df = cudf.DataFrame({"A": [[0, 1, 2, 3], [4, 5, 6], [7, 8], [9]]})
expected = (
df.A._column.offsets._memory_usage()
+ df.A._column.elements._memory_usage()
)
assert expected == df.A.memory_usage()
@pytest.mark.xfail
def test_memory_usage_multi():
rows = int(100)
deep = True
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(np.arange(3, dtype="int64"), rows),
"C": np.random.choice(np.arange(3, dtype="float64"), rows),
}
).set_index(["B", "C"])
gdf = cudf.from_pandas(df)
# Assume MultiIndex memory footprint is just that
# of the underlying columns, levels, and codes
expect = rows * 16 # Source Columns
expect += rows * 16 # Codes
expect += 3 * 8 # Level 0
expect += 3 * 8 # Level 1
assert expect == gdf.index.memory_usage(deep=deep)
@pytest.mark.parametrize(
"list_input",
[
pytest.param([1, 2, 3, 4], id="smaller"),
pytest.param([1, 2, 3, 4, 5, 6], id="larger"),
],
)
@pytest.mark.parametrize(
"key",
[
pytest.param("list_test", id="new_column"),
pytest.param("id", id="existing_column"),
],
)
def test_setitem_diff_size_list(list_input, key):
gdf = cudf.datasets.randomdata(5)
with pytest.raises(
ValueError, match=("All columns must be of equal length")
):
gdf[key] = list_input
@pytest.mark.parametrize(
"series_input",
[
pytest.param(cudf.Series([1, 2, 3, 4]), id="smaller_cudf"),
pytest.param(cudf.Series([1, 2, 3, 4, 5, 6]), id="larger_cudf"),
pytest.param(cudf.Series([1, 2, 3], index=[4, 5, 6]), id="index_cudf"),
pytest.param(pd.Series([1, 2, 3, 4]), id="smaller_pandas"),
pytest.param(pd.Series([1, 2, 3, 4, 5, 6]), id="larger_pandas"),
pytest.param(pd.Series([1, 2, 3], index=[4, 5, 6]), id="index_pandas"),
],
)
@pytest.mark.parametrize(
"key",
[
pytest.param("list_test", id="new_column"),
pytest.param("id", id="existing_column"),
],
)
def test_setitem_diff_size_series(series_input, key):
gdf = cudf.datasets.randomdata(5)
pdf = gdf.to_pandas()
pandas_input = series_input
if isinstance(pandas_input, cudf.Series):
pandas_input = pandas_input.to_pandas()
expect = pdf
expect[key] = pandas_input
got = gdf
got[key] = series_input
# Pandas uses NaN and typecasts to float64 if there's missing values on
# alignment, so need to typecast to float64 for equality comparison
expect = expect.astype("float64")
got = got.astype("float64")
assert_eq(expect, got)
def test_tupleize_cols_False_set():
pdf = pd.DataFrame()
gdf = cudf.DataFrame()
pdf[("a", "b")] = [1]
gdf[("a", "b")] = [1]
assert_eq(pdf, gdf)
assert_eq(pdf.columns, gdf.columns)
def test_init_multiindex_from_dict():
pdf = pd.DataFrame({("a", "b"): [1]})
gdf = cudf.DataFrame({("a", "b"): [1]})
assert_eq(pdf, gdf)
assert_eq(pdf.columns, gdf.columns)
def test_change_column_dtype_in_empty():
pdf = pd.DataFrame({"a": [], "b": []})
gdf = cudf.from_pandas(pdf)
assert_eq(pdf, gdf)
pdf["b"] = pdf["b"].astype("int64")
gdf["b"] = gdf["b"].astype("int64")
assert_eq(pdf, gdf)
def test_dataframe_from_table_empty_index():
df = cudf.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
odict = df._data
tbl = cudf._lib.table.Table(odict)
result = cudf.DataFrame._from_table(tbl) # noqa: F841
@pytest.mark.parametrize("dtype", ["int64", "str"])
def test_dataframe_from_dictionary_series_same_name_index(dtype):
pd_idx1 = pd.Index([1, 2, 0], name="test_index").astype(dtype)
pd_idx2 = pd.Index([2, 0, 1], name="test_index").astype(dtype)
pd_series1 = pd.Series([1, 2, 3], index=pd_idx1)
pd_series2 = pd.Series([1, 2, 3], index=pd_idx2)
gd_idx1 = cudf.from_pandas(pd_idx1)
gd_idx2 = cudf.from_pandas(pd_idx2)
gd_series1 = cudf.Series([1, 2, 3], index=gd_idx1)
gd_series2 = cudf.Series([1, 2, 3], index=gd_idx2)
expect = pd.DataFrame({"a": pd_series1, "b": pd_series2})
got = cudf.DataFrame({"a": gd_series1, "b": gd_series2})
if dtype == "str":
# Pandas actually loses its index name erroneously here...
expect.index.name = "test_index"
assert_eq(expect, got)
assert expect.index.names == got.index.names
@pytest.mark.parametrize(
"arg", [slice(2, 8, 3), slice(1, 20, 4), slice(-2, -6, -2)]
)
def test_dataframe_strided_slice(arg):
mul = pd.DataFrame(
{
"Index": [1, 2, 3, 4, 5, 6, 7, 8, 9],
"AlphaIndex": ["a", "b", "c", "d", "e", "f", "g", "h", "i"],
}
)
pdf = pd.DataFrame(
{"Val": [10, 9, 8, 7, 6, 5, 4, 3, 2]},
index=pd.MultiIndex.from_frame(mul),
)
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf[arg]
got = gdf[arg]
assert_eq(expect, got)
@pytest.mark.parametrize(
"data,condition,other,error",
[
(pd.Series(range(5)), pd.Series(range(5)) > 0, None, None),
(pd.Series(range(5)), pd.Series(range(5)) > 1, None, None),
(pd.Series(range(5)), pd.Series(range(5)) > 1, 10, None),
(
pd.Series(range(5)),
pd.Series(range(5)) > 1,
pd.Series(range(5, 10)),
None,
),
(
pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"]),
(
pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"])
% 3
)
== 0,
-pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"]),
None,
),
(
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}),
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}) == 4,
None,
None,
),
(
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}),
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}) != 4,
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[True, True, True],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[True, True, True, False],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True, True, False], [True, True, True, False]],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True], [False, True], [True, False], [False, True]],
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
cuda.to_device(
np.array(
[[True, True], [False, True], [True, False], [False, True]]
)
),
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
cupy.array(
[[True, True], [False, True], [True, False], [False, True]]
),
17,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True], [False, True], [True, False], [False, True]],
17,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[
[True, True, False, True],
[True, True, False, True],
[True, True, False, True],
[True, True, False, True],
],
None,
ValueError,
),
(
pd.Series([1, 2, np.nan]),
pd.Series([1, 2, np.nan]) == 4,
None,
None,
),
(
pd.Series([1, 2, np.nan]),
pd.Series([1, 2, np.nan]) != 4,
None,
None,
),
(
pd.Series([4, np.nan, 6]),
pd.Series([4, np.nan, 6]) == 4,
None,
None,
),
(
pd.Series([4, np.nan, 6]),
pd.Series([4, np.nan, 6]) != 4,
None,
None,
),
(
pd.Series([4, np.nan, 6], dtype="category"),
pd.Series([4, np.nan, 6], dtype="category") != 4,
None,
None,
),
(
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category"),
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category") == "b",
None,
None,
),
(
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category"),
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category") == "b",
"s",
None,
),
(
pd.Series([1, 2, 3, 2, 5]),
pd.Series([1, 2, 3, 2, 5]) == 2,
pd.DataFrame(
{
"a": pd.Series([1, 2, 3, 2, 5]),
"b": pd.Series([1, 2, 3, 2, 5]),
}
),
NotImplementedError,
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_df_sr_mask_where(data, condition, other, error, inplace):
ps_where = data
gs_where = cudf.from_pandas(data)
ps_mask = ps_where.copy(deep=True)
gs_mask = gs_where.copy(deep=True)
if hasattr(condition, "__cuda_array_interface__"):
if type(condition).__module__.split(".")[0] == "cupy":
ps_condition = cupy.asnumpy(condition)
else:
ps_condition = np.array(condition).astype("bool")
else:
ps_condition = condition
if type(condition).__module__.split(".")[0] == "pandas":
gs_condition = cudf.from_pandas(condition)
else:
gs_condition = condition
ps_other = other
if type(other).__module__.split(".")[0] == "pandas":
gs_other = cudf.from_pandas(other)
else:
gs_other = other
if error is None:
expect_where = ps_where.where(
ps_condition, other=ps_other, inplace=inplace
)
got_where = gs_where.where(
gs_condition, other=gs_other, inplace=inplace
)
expect_mask = ps_mask.mask(
ps_condition, other=ps_other, inplace=inplace
)
got_mask = gs_mask.mask(gs_condition, other=gs_other, inplace=inplace)
if inplace:
expect_where = ps_where
got_where = gs_where
expect_mask = ps_mask
got_mask = gs_mask
if pd.api.types.is_categorical_dtype(expect_where):
np.testing.assert_array_equal(
expect_where.cat.codes,
got_where.cat.codes.astype(expect_where.cat.codes.dtype)
.fillna(-1)
.to_array(),
)
assert_eq(expect_where.cat.categories, got_where.cat.categories)
np.testing.assert_array_equal(
expect_mask.cat.codes,
got_mask.cat.codes.astype(expect_mask.cat.codes.dtype)
.fillna(-1)
.to_array(),
)
assert_eq(expect_mask.cat.categories, got_mask.cat.categories)
else:
assert_eq(
expect_where.fillna(-1),
got_where.fillna(-1),
check_dtype=False,
)
assert_eq(
expect_mask.fillna(-1), got_mask.fillna(-1), check_dtype=False
)
else:
assert_exceptions_equal(
lfunc=ps_where.where,
rfunc=gs_where.where,
lfunc_args_and_kwargs=(
[ps_condition],
{"other": ps_other, "inplace": inplace},
),
rfunc_args_and_kwargs=(
[gs_condition],
{"other": gs_other, "inplace": inplace},
),
compare_error_message=False
if error is NotImplementedError
else True,
)
assert_exceptions_equal(
lfunc=ps_mask.mask,
rfunc=gs_mask.mask,
lfunc_args_and_kwargs=(
[ps_condition],
{"other": ps_other, "inplace": inplace},
),
rfunc_args_and_kwargs=(
[gs_condition],
{"other": gs_other, "inplace": inplace},
),
compare_error_message=False,
)
@pytest.mark.parametrize(
"data,condition,other,has_cat",
[
(
pd.DataFrame(
{
"a": pd.Series(["a", "a", "b", "c", "a", "d", "d", "a"]),
"b": pd.Series(["o", "p", "q", "e", "p", "p", "a", "a"]),
}
),
pd.DataFrame(
{
"a": pd.Series(["a", "a", "b", "c", "a", "d", "d", "a"]),
"b": pd.Series(["o", "p", "q", "e", "p", "p", "a", "a"]),
}
)
!= "a",
None,
None,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
!= "a",
None,
True,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
== "a",
None,
True,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
!= "a",
"a",
True,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
== "a",
"a",
True,
),
],
)
def test_df_string_cat_types_mask_where(data, condition, other, has_cat):
ps = data
gs = cudf.from_pandas(data)
ps_condition = condition
if type(condition).__module__.split(".")[0] == "pandas":
gs_condition = cudf.from_pandas(condition)
else:
gs_condition = condition
ps_other = other
if type(other).__module__.split(".")[0] == "pandas":
gs_other = cudf.from_pandas(other)
else:
gs_other = other
expect_where = ps.where(ps_condition, other=ps_other)
got_where = gs.where(gs_condition, other=gs_other)
expect_mask = ps.mask(ps_condition, other=ps_other)
got_mask = gs.mask(gs_condition, other=gs_other)
if has_cat is None:
assert_eq(
expect_where.fillna(-1).astype("str"),
got_where.fillna(-1),
check_dtype=False,
)
assert_eq(
expect_mask.fillna(-1).astype("str"),
got_mask.fillna(-1),
check_dtype=False,
)
else:
assert_eq(expect_where, got_where, check_dtype=False)
assert_eq(expect_mask, got_mask, check_dtype=False)
@pytest.mark.parametrize(
"data,expected_upcast_type,error",
[
(
pd.Series([random.random() for _ in range(10)], dtype="float32"),
np.dtype("float32"),
None,
),
(
pd.Series([random.random() for _ in range(10)], dtype="float16"),
np.dtype("float32"),
None,
),
(
pd.Series([random.random() for _ in range(10)], dtype="float64"),
np.dtype("float64"),
None,
),
(
pd.Series([random.random() for _ in range(10)], dtype="float128"),
None,
NotImplementedError,
),
],
)
def test_from_pandas_unsupported_types(data, expected_upcast_type, error):
pdf = pd.DataFrame({"one_col": data})
if error == NotImplementedError:
with pytest.raises(error):
cudf.from_pandas(data)
with pytest.raises(error):
cudf.Series(data)
with pytest.raises(error):
cudf.from_pandas(pdf)
with pytest.raises(error):
cudf.DataFrame(pdf)
else:
df = cudf.from_pandas(data)
assert_eq(data, df, check_dtype=False)
assert df.dtype == expected_upcast_type
df = cudf.Series(data)
assert_eq(data, df, check_dtype=False)
assert df.dtype == expected_upcast_type
df = cudf.from_pandas(pdf)
assert_eq(pdf, df, check_dtype=False)
assert df["one_col"].dtype == expected_upcast_type
df = cudf.DataFrame(pdf)
assert_eq(pdf, df, check_dtype=False)
assert df["one_col"].dtype == expected_upcast_type
@pytest.mark.parametrize("nan_as_null", [True, False])
@pytest.mark.parametrize("index", [None, "a", ["a", "b"]])
def test_from_pandas_nan_as_null(nan_as_null, index):
data = [np.nan, 2.0, 3.0]
if index is None:
pdf = pd.DataFrame({"a": data, "b": data})
expected = cudf.DataFrame(
{
"a": column.as_column(data, nan_as_null=nan_as_null),
"b": column.as_column(data, nan_as_null=nan_as_null),
}
)
else:
pdf = pd.DataFrame({"a": data, "b": data}).set_index(index)
expected = cudf.DataFrame(
{
"a": column.as_column(data, nan_as_null=nan_as_null),
"b": column.as_column(data, nan_as_null=nan_as_null),
}
)
expected = cudf.DataFrame(
{
"a": column.as_column(data, nan_as_null=nan_as_null),
"b": column.as_column(data, nan_as_null=nan_as_null),
}
)
expected = expected.set_index(index)
got = cudf.from_pandas(pdf, nan_as_null=nan_as_null)
assert_eq(expected, got)
@pytest.mark.parametrize("nan_as_null", [True, False])
def test_from_pandas_for_series_nan_as_null(nan_as_null):
data = [np.nan, 2.0, 3.0]
psr = pd.Series(data)
expected = cudf.Series(column.as_column(data, nan_as_null=nan_as_null))
got = cudf.from_pandas(psr, nan_as_null=nan_as_null)
assert_eq(expected, got)
@pytest.mark.parametrize("copy", [True, False])
def test_df_series_dataframe_astype_copy(copy):
gdf = cudf.DataFrame({"col1": [1, 2], "col2": [3, 4]})
pdf = gdf.to_pandas()
assert_eq(
gdf.astype(dtype="float", copy=copy),
pdf.astype(dtype="float", copy=copy),
)
assert_eq(gdf, pdf)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
assert_eq(
gsr.astype(dtype="float", copy=copy),
psr.astype(dtype="float", copy=copy),
)
assert_eq(gsr, psr)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
actual = gsr.astype(dtype="int64", copy=copy)
expected = psr.astype(dtype="int64", copy=copy)
assert_eq(expected, actual)
assert_eq(gsr, psr)
actual[0] = 3
expected[0] = 3
assert_eq(gsr, psr)
@pytest.mark.parametrize("copy", [True, False])
def test_df_series_dataframe_astype_dtype_dict(copy):
gdf = cudf.DataFrame({"col1": [1, 2], "col2": [3, 4]})
pdf = gdf.to_pandas()
assert_eq(
gdf.astype(dtype={"col1": "float"}, copy=copy),
pdf.astype(dtype={"col1": "float"}, copy=copy),
)
assert_eq(gdf, pdf)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
assert_eq(
gsr.astype(dtype={None: "float"}, copy=copy),
psr.astype(dtype={None: "float"}, copy=copy),
)
assert_eq(gsr, psr)
assert_exceptions_equal(
lfunc=psr.astype,
rfunc=gsr.astype,
lfunc_args_and_kwargs=([], {"dtype": {"a": "float"}, "copy": copy}),
rfunc_args_and_kwargs=([], {"dtype": {"a": "float"}, "copy": copy}),
)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
actual = gsr.astype({None: "int64"}, copy=copy)
expected = psr.astype({None: "int64"}, copy=copy)
assert_eq(expected, actual)
assert_eq(gsr, psr)
actual[0] = 3
expected[0] = 3
assert_eq(gsr, psr)
@pytest.mark.parametrize(
"data,columns",
[
([1, 2, 3, 100, 112, 35464], ["a"]),
(range(100), None),
([], None),
((-10, 21, 32, 32, 1, 2, 3), ["p"]),
((), None),
([[1, 2, 3], [1, 2, 3]], ["col1", "col2", "col3"]),
([range(100), range(100)], ["range" + str(i) for i in range(100)]),
(((1, 2, 3), (1, 2, 3)), ["tuple0", "tuple1", "tuple2"]),
([[1, 2, 3]], ["list col1", "list col2", "list col3"]),
([range(100)], ["range" + str(i) for i in range(100)]),
(((1, 2, 3),), ["k1", "k2", "k3"]),
],
)
def test_dataframe_init_1d_list(data, columns):
expect = pd.DataFrame(data, columns=columns)
actual = cudf.DataFrame(data, columns=columns)
assert_eq(
expect, actual, check_index_type=False if len(data) == 0 else True
)
expect = pd.DataFrame(data, columns=None)
actual = cudf.DataFrame(data, columns=None)
assert_eq(
expect, actual, check_index_type=False if len(data) == 0 else True
)
@pytest.mark.parametrize(
"data,cols,index",
[
(
np.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "b"],
["a", "b", "c", "d"],
),
(
np.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "b"],
[0, 20, 30, 10],
),
(
np.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "b"],
[0, 1, 2, 3],
),
(np.array([11, 123, -2342, 232]), ["a"], [1, 2, 11, 12]),
(np.array([11, 123, -2342, 232]), ["a"], ["khsdjk", "a", "z", "kk"]),
(
cupy.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "z"],
["a", "z", "a", "z"],
),
(cupy.array([11, 123, -2342, 232]), ["z"], [0, 1, 1, 0]),
(cupy.array([11, 123, -2342, 232]), ["z"], [1, 2, 3, 4]),
(cupy.array([11, 123, -2342, 232]), ["z"], ["a", "z", "d", "e"]),
(np.random.randn(2, 4), ["a", "b", "c", "d"], ["a", "b"]),
(np.random.randn(2, 4), ["a", "b", "c", "d"], [1, 0]),
(cupy.random.randn(2, 4), ["a", "b", "c", "d"], ["a", "b"]),
(cupy.random.randn(2, 4), ["a", "b", "c", "d"], [1, 0]),
],
)
def test_dataframe_init_from_arrays_cols(data, cols, index):
gd_data = data
if isinstance(data, cupy.core.ndarray):
# pandas can't handle cupy arrays in general
pd_data = data.get()
# additional test for building DataFrame with gpu array whose
# cuda array interface has no `descr` attribute
numba_data = cuda.as_cuda_array(data)
else:
pd_data = data
numba_data = None
# verify with columns & index
pdf = pd.DataFrame(pd_data, columns=cols, index=index)
gdf = cudf.DataFrame(gd_data, columns=cols, index=index)
assert_eq(pdf, gdf, check_dtype=False)
# verify with columns
pdf = pd.DataFrame(pd_data, columns=cols)
gdf = cudf.DataFrame(gd_data, columns=cols)
assert_eq(pdf, gdf, check_dtype=False)
pdf = pd.DataFrame(pd_data)
gdf = cudf.DataFrame(gd_data)
assert_eq(pdf, gdf, check_dtype=False)
if numba_data is not None:
gdf = cudf.DataFrame(numba_data)
assert_eq(pdf, gdf, check_dtype=False)
@pytest.mark.parametrize(
"col_data",
[
range(5),
["a", "b", "x", "y", "z"],
[1.0, 0.213, 0.34332],
["a"],
[1],
[0.2323],
[],
],
)
@pytest.mark.parametrize(
"assign_val",
[
1,
2,
np.array(2),
cupy.array(2),
0.32324,
np.array(0.34248),
cupy.array(0.34248),
"abc",
np.array("abc", dtype="object"),
np.array("abc", dtype="str"),
np.array("abc"),
None,
],
)
def test_dataframe_assign_scalar(col_data, assign_val):
pdf = pd.DataFrame({"a": col_data})
gdf = cudf.DataFrame({"a": col_data})
pdf["b"] = (
cupy.asnumpy(assign_val)
if isinstance(assign_val, cupy.ndarray)
else assign_val
)
gdf["b"] = assign_val
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"col_data",
[
1,
2,
np.array(2),
cupy.array(2),
0.32324,
np.array(0.34248),
cupy.array(0.34248),
"abc",
np.array("abc", dtype="object"),
np.array("abc", dtype="str"),
np.array("abc"),
None,
],
)
@pytest.mark.parametrize(
"assign_val",
[
1,
2,
np.array(2),
cupy.array(2),
0.32324,
np.array(0.34248),
cupy.array(0.34248),
"abc",
np.array("abc", dtype="object"),
np.array("abc", dtype="str"),
np.array("abc"),
None,
],
)
def test_dataframe_assign_scalar_with_scalar_cols(col_data, assign_val):
pdf = pd.DataFrame(
{
"a": cupy.asnumpy(col_data)
if isinstance(col_data, cupy.ndarray)
else col_data
},
index=["dummy_mandatory_index"],
)
gdf = cudf.DataFrame({"a": col_data}, index=["dummy_mandatory_index"])
pdf["b"] = (
cupy.asnumpy(assign_val)
if isinstance(assign_val, cupy.ndarray)
else assign_val
)
gdf["b"] = assign_val
assert_eq(pdf, gdf)
def test_dataframe_info_basic():
buffer = io.StringIO()
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
StringIndex: 10 entries, a to 1111
Data columns (total 10 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 0 10 non-null float64
1 1 10 non-null float64
2 2 10 non-null float64
3 3 10 non-null float64
4 4 10 non-null float64
5 5 10 non-null float64
6 6 10 non-null float64
7 7 10 non-null float64
8 8 10 non-null float64
9 9 10 non-null float64
dtypes: float64(10)
memory usage: 859.0+ bytes
"""
)
df = pd.DataFrame(
np.random.randn(10, 10),
index=["a", "2", "3", "4", "5", "6", "7", "8", "100", "1111"],
)
cudf.from_pandas(df).info(buf=buffer, verbose=True)
s = buffer.getvalue()
assert str_cmp == s
def test_dataframe_info_verbose_mem_usage():
buffer = io.StringIO()
df = pd.DataFrame({"a": [1, 2, 3], "b": ["safdas", "assa", "asdasd"]})
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 3 entries, 0 to 2
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 a 3 non-null int64
1 b 3 non-null object
dtypes: int64(1), object(1)
memory usage: 56.0+ bytes
"""
)
cudf.from_pandas(df).info(buf=buffer, verbose=True)
s = buffer.getvalue()
assert str_cmp == s
buffer.truncate(0)
buffer.seek(0)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 3 entries, 0 to 2
Columns: 2 entries, a to b
dtypes: int64(1), object(1)
memory usage: 56.0+ bytes
"""
)
cudf.from_pandas(df).info(buf=buffer, verbose=False)
s = buffer.getvalue()
assert str_cmp == s
buffer.truncate(0)
buffer.seek(0)
df = pd.DataFrame(
{"a": [1, 2, 3], "b": ["safdas", "assa", "asdasd"]},
index=["sdfdsf", "sdfsdfds", "dsfdf"],
)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
StringIndex: 3 entries, sdfdsf to dsfdf
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 a 3 non-null int64
1 b 3 non-null object
dtypes: int64(1), object(1)
memory usage: 91.0 bytes
"""
)
cudf.from_pandas(df).info(buf=buffer, verbose=True, memory_usage="deep")
s = buffer.getvalue()
assert str_cmp == s
buffer.truncate(0)
buffer.seek(0)
int_values = [1, 2, 3, 4, 5]
text_values = ["alpha", "beta", "gamma", "delta", "epsilon"]
float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
df = cudf.DataFrame(
{
"int_col": int_values,
"text_col": text_values,
"float_col": float_values,
}
)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 int_col 5 non-null int64
1 text_col 5 non-null object
2 float_col 5 non-null float64
dtypes: float64(1), int64(1), object(1)
memory usage: 130.0 bytes
"""
)
df.info(buf=buffer, verbose=True, memory_usage="deep")
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
def test_dataframe_info_null_counts():
int_values = [1, 2, 3, 4, 5]
text_values = ["alpha", "beta", "gamma", "delta", "epsilon"]
float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
df = cudf.DataFrame(
{
"int_col": int_values,
"text_col": text_values,
"float_col": float_values,
}
)
buffer = io.StringIO()
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 3 columns):
# Column Dtype
--- ------ -----
0 int_col int64
1 text_col object
2 float_col float64
dtypes: float64(1), int64(1), object(1)
memory usage: 130.0+ bytes
"""
)
df.info(buf=buffer, verbose=True, null_counts=False)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df.info(buf=buffer, verbose=True, max_cols=0)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df = cudf.DataFrame()
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 0 entries
Empty DataFrame"""
)
df.info(buf=buffer, verbose=True)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df = cudf.DataFrame(
{
"a": [1, 2, 3, None, 10, 11, 12, None],
"b": ["a", "b", "c", "sd", "sdf", "sd", None, None],
}
)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 8 entries, 0 to 7
Data columns (total 2 columns):
# Column Dtype
--- ------ -----
0 a int64
1 b object
dtypes: int64(1), object(1)
memory usage: 238.0+ bytes
"""
)
pd.options.display.max_info_rows = 2
df.info(buf=buffer, max_cols=2, null_counts=None)
pd.reset_option("display.max_info_rows")
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 8 entries, 0 to 7
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 a 6 non-null int64
1 b 6 non-null object
dtypes: int64(1), object(1)
memory usage: 238.0+ bytes
"""
)
df.info(buf=buffer, max_cols=2, null_counts=None)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df.info(buf=buffer, null_counts=True)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
@pytest.mark.parametrize(
"data1",
[
[1, 2, 3, 4, 5, 6, 7],
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0],
[
1.9876543,
2.9876654,
3.9876543,
4.1234587,
5.23,
6.88918237,
7.00001,
],
[
-1.9876543,
-2.9876654,
-3.9876543,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
0.112121,
-21.1212,
],
[
-1.987654321,
-2.987654321,
-3.987654321,
-0.1221,
-2.1221,
-0.112121,
21.1212,
],
],
)
@pytest.mark.parametrize(
"data2",
[
[1, 2, 3, 4, 5, 6, 7],
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0],
[
1.9876543,
2.9876654,
3.9876543,
4.1234587,
5.23,
6.88918237,
7.00001,
],
[
-1.9876543,
-2.9876654,
-3.9876543,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
0.112121,
-21.1212,
],
[
-1.987654321,
-2.987654321,
-3.987654321,
-0.1221,
-2.1221,
-0.112121,
21.1212,
],
],
)
@pytest.mark.parametrize("rtol", [0, 0.01, 1e-05, 1e-08, 5e-1, 50.12])
@pytest.mark.parametrize("atol", [0, 0.01, 1e-05, 1e-08, 50.12])
def test_cudf_isclose(data1, data2, rtol, atol):
array1 = cupy.array(data1)
array2 = cupy.array(data2)
expected = cudf.Series(cupy.isclose(array1, array2, rtol=rtol, atol=atol))
actual = cudf.isclose(
cudf.Series(data1), cudf.Series(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
actual = cudf.isclose(data1, data2, rtol=rtol, atol=atol)
assert_eq(expected, actual)
actual = cudf.isclose(
cupy.array(data1), cupy.array(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
actual = cudf.isclose(
np.array(data1), np.array(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
actual = cudf.isclose(
pd.Series(data1), pd.Series(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data1",
[
[
-1.9876543,
-2.9876654,
np.nan,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
np.nan,
-21.1212,
],
],
)
@pytest.mark.parametrize(
"data2",
[
[
-1.9876543,
-2.9876654,
-3.9876543,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
0.112121,
-21.1212,
],
[
-1.987654321,
-2.987654321,
-3.987654321,
np.nan,
np.nan,
np.nan,
21.1212,
],
],
)
@pytest.mark.parametrize("equal_nan", [True, False])
def test_cudf_isclose_nulls(data1, data2, equal_nan):
array1 = cupy.array(data1)
array2 = cupy.array(data2)
expected = cudf.Series(cupy.isclose(array1, array2, equal_nan=equal_nan))
actual = cudf.isclose(
cudf.Series(data1), cudf.Series(data2), equal_nan=equal_nan
)
assert_eq(expected, actual, check_dtype=False)
actual = cudf.isclose(data1, data2, equal_nan=equal_nan)
assert_eq(expected, actual, check_dtype=False)
def test_cudf_isclose_different_index():
s1 = cudf.Series(
[-1.9876543, -2.9876654, -3.9876543, -4.1234587, -5.23, -7.00001],
index=[0, 1, 2, 3, 4, 5],
)
s2 = cudf.Series(
[-1.9876543, -2.9876654, -7.00001, -4.1234587, -5.23, -3.9876543],
index=[0, 1, 5, 3, 4, 2],
)
expected = cudf.Series([True] * 6, index=s1.index)
assert_eq(expected, cudf.isclose(s1, s2))
s1 = cudf.Series(
[-1.9876543, -2.9876654, -3.9876543, -4.1234587, -5.23, -7.00001],
index=[0, 1, 2, 3, 4, 5],
)
s2 = cudf.Series(
[-1.9876543, -2.9876654, -7.00001, -4.1234587, -5.23, -3.9876543],
index=[0, 1, 5, 10, 4, 2],
)
expected = cudf.Series(
[True, True, True, False, True, True], index=s1.index
)
assert_eq(expected, cudf.isclose(s1, s2))
s1 = cudf.Series(
[-1.9876543, -2.9876654, -3.9876543, -4.1234587, -5.23, -7.00001],
index=[100, 1, 2, 3, 4, 5],
)
s2 = cudf.Series(
[-1.9876543, -2.9876654, -7.00001, -4.1234587, -5.23, -3.9876543],
index=[0, 1, 100, 10, 4, 2],
)
expected = cudf.Series(
[False, True, True, False, True, False], index=s1.index
)
assert_eq(expected, cudf.isclose(s1, s2))
def test_dataframe_to_dict_error():
df = cudf.DataFrame({"a": [1, 2, 3], "b": [9, 5, 3]})
with pytest.raises(
TypeError,
match=re.escape(
r"cuDF does not support conversion to host memory "
r"via `to_dict()` method. Consider using "
r"`.to_pandas().to_dict()` to construct a Python dictionary."
),
):
df.to_dict()
with pytest.raises(
TypeError,
match=re.escape(
r"cuDF does not support conversion to host memory "
r"via `to_dict()` method. Consider using "
r"`.to_pandas().to_dict()` to construct a Python dictionary."
),
):
df["a"].to_dict()
@pytest.mark.parametrize(
"df",
[
pd.DataFrame({"a": [1, 2, 3, 4, 5, 10, 11, 12, 33, 55, 19]}),
pd.DataFrame(
{
"one": [1, 2, 3, 4, 5, 10],
"two": ["abc", "def", "ghi", "xyz", "pqr", "abc"],
}
),
pd.DataFrame(
{
"one": [1, 2, 3, 4, 5, 10],
"two": ["abc", "def", "ghi", "xyz", "pqr", "abc"],
},
index=[10, 20, 30, 40, 50, 60],
),
pd.DataFrame(
{
"one": [1, 2, 3, 4, 5, 10],
"two": ["abc", "def", "ghi", "xyz", "pqr", "abc"],
},
index=["a", "b", "c", "d", "e", "f"],
),
pd.DataFrame(index=["a", "b", "c", "d", "e", "f"]),
pd.DataFrame(columns=["a", "b", "c", "d", "e", "f"]),
pd.DataFrame(index=[10, 11, 12]),
pd.DataFrame(columns=[10, 11, 12]),
pd.DataFrame(),
pd.DataFrame({"one": [], "two": []}),
pd.DataFrame({2: [], 1: []}),
pd.DataFrame(
{
0: [1, 2, 3, 4, 5, 10],
1: ["abc", "def", "ghi", "xyz", "pqr", "abc"],
100: ["a", "b", "b", "x", "z", "a"],
},
index=[10, 20, 30, 40, 50, 60],
),
],
)
def test_dataframe_keys(df):
gdf = cudf.from_pandas(df)
assert_eq(df.keys(), gdf.keys())
@pytest.mark.parametrize(
"ps",
[
pd.Series([1, 2, 3, 4, 5, 10, 11, 12, 33, 55, 19]),
pd.Series(["abc", "def", "ghi", "xyz", "pqr", "abc"]),
pd.Series(
[1, 2, 3, 4, 5, 10],
index=["abc", "def", "ghi", "xyz", "pqr", "abc"],
),
pd.Series(
["abc", "def", "ghi", "xyz", "pqr", "abc"],
index=[1, 2, 3, 4, 5, 10],
),
pd.Series(index=["a", "b", "c", "d", "e", "f"], dtype="float64"),
pd.Series(index=[10, 11, 12], dtype="float64"),
pd.Series(dtype="float64"),
pd.Series([], dtype="float64"),
],
)
def test_series_keys(ps):
gds = cudf.from_pandas(ps)
if len(ps) == 0 and not isinstance(ps.index, pd.RangeIndex):
assert_eq(ps.keys().astype("float64"), gds.keys())
else:
assert_eq(ps.keys(), gds.keys())
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame(index=[10, 20, 30]),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB")),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[7, 8]),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[7, 20, 11, 9],
),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[100]),
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame(
{"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]},
index=[100, 200, 300, 400, 500, 0],
),
],
)
@pytest.mark.parametrize(
"other",
[
pd.DataFrame([[5, 6], [7, 8]], columns=list("AB")),
pd.DataFrame([[5, 6], [7, 8]], columns=list("BD")),
pd.DataFrame([[5, 6], [7, 8]], columns=list("DE")),
pd.DataFrame(),
pd.DataFrame(
{"c": [10, 11, 22, 33, 44, 100]}, index=[7, 8, 9, 10, 11, 20]
),
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[200]),
pd.DataFrame([]),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
| pd.DataFrame([], index=[100]) | pandas.DataFrame |
from collections import defaultdict
import numpy as np
import pandas as pd
import basty.utils.misc as misc
class AnnotationInfo:
def __init__(self):
self._inactive_annotation = None
self._noise_annotation = None
self._arouse_annotation = None
self._label_to_behavior = None
self._behavior_to_label = None
self._mask_annotated = None
# no annotation by default
self.has_annotation = False
self.use_annotations_to_mask = {}
@property
def noise_annotation(self):
if self._noise_annotation is None:
raise ValueError("'noise_annotation' is not set.")
return self._noise_annotation
@noise_annotation.setter
def noise_annotation(self, value):
if not isinstance(value, str):
raise ValueError("'noise_annotation' must have type 'str'.")
self._noise_annotation = value
@property
def inactive_annotation(self):
if self._inactive_annotation is None:
raise ValueError("'inactive_annotation' is not set.")
return self._inactive_annotation
@inactive_annotation.setter
def inactive_annotation(self, value):
if not isinstance(value, str):
raise ValueError("'inactive_annotation' must have type 'str'.")
self._inactive_annotation = value
@property
def arouse_annotation(self):
if self._arouse_annotation is None:
raise ValueError("'arouse_annotation' is not set.")
return self._arouse_annotation
@arouse_annotation.setter
def arouse_annotation(self, value):
if not isinstance(value, str):
raise ValueError("'arouse_annotation' must have type 'str'.")
self._arouse_annotation = value
@property
def mask_annotated(self):
if self._mask_annotated is None:
raise ValueError("'mask_annotated' is not set.")
return self._mask_annotated
@mask_annotated.setter
def mask_annotated(self, value):
if not isinstance(value, np.ndarray) and value.ndim == 1:
raise ValueError("'mask_annotated' must have type '1D numpy.ndarray'.")
self._mask_annotated = value
@property
def label_to_behavior(self):
if self._label_to_behavior is None:
raise ValueError("'label_to_behavior' is not set.")
return self._label_to_behavior
@property
def behavior_to_label(self):
if self._behavior_to_label is None:
raise ValueError("'behavior_to_label' is not set.")
return self._behavior_to_label
@label_to_behavior.setter
def label_to_behavior(self, value):
if not isinstance(value, dict):
raise ValueError("'label_to_behavior' must have type 'dict'.")
self._label_to_behavior = misc.sort_dict(value)
self._behavior_to_label = misc.reverse_dict(self._label_to_behavior)
@behavior_to_label.setter
def behavior_to_label(self, value):
if not isinstance(value, dict):
raise ValueError("'behavior_to_label' must have type 'dict'.")
self._behavior_to_label = misc.sort_dict(value)
self._label_to_behavior = misc.reverse_dict(self._behavior_to_label)
class ExptInfo:
def __init__(self, fps):
self.fps = fps
self.expt_frame_count = None
self._part_to_frame_count = None
self._mask_dormant = None
self._mask_active = None
self._mask_noise = None
self._ftname_to_snapft = None
self._ftname_to_deltaft = None
self._snapft_to_ftname = None
self._deltaft_to_ftname = None
@property
def part_to_frame_count(self):
if self._part_to_frame_count is None:
raise ValueError("'part_to_frame_count' is not set.")
return self._part_to_frame_count
@part_to_frame_count.setter
def part_to_frame_count(self, value):
if not isinstance(value, dict):
raise ValueError("'part_to_frame_count' must have type 'dict'.")
self.expt_frame_count = sum(value.values())
self._part_to_frame_count = value
@property
def mask_noise(self):
if self._mask_noise is None:
raise ValueError("'mask_noise' is not set.")
return self._mask_noise
@mask_noise.setter
def mask_noise(self, value):
if not isinstance(value, np.ndarray) and value.ndim == 1:
raise ValueError("'mask_noise' must have type '1D numpy.ndarray'.")
self._mask_noise = value
@property
def mask_dormant(self):
if self._mask_dormant is None:
raise ValueError("'mask_dormant' is not set.")
return self._mask_dormant
@mask_dormant.setter
def mask_dormant(self, value):
if not isinstance(value, np.ndarray) and value.ndim == 1:
raise ValueError("'mask_dormant' must have type '1D numpy.ndarray'.")
self._mask_dormant = value
@property
def mask_active(self):
if self._mask_active is None:
raise ValueError("'mask_active' is not set.")
if self._mask_noise is not None:
mask_active = np.logical_and(
self._mask_active, np.logical_not(self._mask_noise)
)
else:
mask_active = self._mask_active
return mask_active
@mask_active.setter
def mask_active(self, value):
if not isinstance(value, np.ndarray) and value.ndim == 1:
raise ValueError("'mask_active' must have type '1D numpy.ndarray'.")
self._mask_active = value
@property
def ftname_to_snapft(self):
if self._ftname_to_snapft is None:
raise ValueError("'ftname_to_snapft' is not set.")
return self._ftname_to_snapft
@property
def snapft_to_ftname(self):
if self._snapft_to_ftname is None:
raise ValueError("'snapft_to_ftname' is not set.")
return self._snapft_to_ftname
@property
def ftname_to_deltaft(self):
if self._ftname_to_deltaft is None:
raise ValueError("'ftname_to_deltaft' is not set.")
return self._ftname_to_deltaft
@property
def deltaft_to_ftname(self):
if self._deltaft_to_ftname is None:
raise ValueError("'deltaft_to_ftname' is not set.")
return self._deltaft_to_ftname
@snapft_to_ftname.setter
def snapft_to_ftname(self, value):
if not isinstance(value, dict):
raise ValueError("'snapft_to_ftname' must have type 'dict'.")
self._snapft_to_ftname = misc.sort_dict(value)
self._ftname_to_snapft = misc.reverse_dict(self._snapft_to_ftname)
@ftname_to_snapft.setter
def ftname_to_snapft(self, value):
if not isinstance(value, dict):
raise ValueError("'ftname_to_snapft' must have type 'dict'.")
self._ftname_to_snapft = misc.sort_dict(value)
self._snapft_to_ftname = misc.reverse_dict(self._ftname_to_snapft)
@deltaft_to_ftname.setter
def deltaft_to_ftname(self, value):
if not isinstance(value, dict):
raise ValueError("'deltaft_to_ftname' must have type 'dict'.")
self._deltaft_to_ftname = misc.sort_dict(value)
self._ftname_to_deltaft = misc.reverse_dict(self._deltaft_to_ftname)
@ftname_to_deltaft.setter
def ftname_to_deltaft(self, value):
if not isinstance(value, dict):
raise ValueError("'ftname_to_deltaft' must have type 'dict'.")
self._ftname_to_deltaft = misc.sort_dict(value)
self._deltaft_to_ftname = misc.reverse_dict(self._ftname_to_deltaft)
def _get_sec(self, idx):
sec_total = idx // self.fps
second = sec_total % 60
return sec_total, second
def get_hour_stamp(self, idx):
if idx is np.nan:
stamp = np.nan
else:
sec_total, second = self._get_sec(idx)
minute = (sec_total // 60) % 60
hour = sec_total // 3600
stamp = str(int(hour)) + ":" + str(int(minute)) + ":" + str(int(second))
return stamp
def get_minute_stamp(self, idx):
if idx is np.nan:
stamp = np.nan
else:
sec_total, second = self._get_sec(idx)
minute = sec_total // 60
stamp = str(int(minute)) + ":" + str(int(second))
return stamp
class ExptRecord(ExptInfo, AnnotationInfo):
def __init__(self, name, data_path, expt_path, fps=30):
self.name = name
self.data_path = data_path
self.expt_path = expt_path
ExptInfo.__init__(self, fps)
AnnotationInfo.__init__(self)
def generate_report(self, labels, is_behavior=False, use_time_stamps=False):
assert isinstance(labels, np.ndarray)
assert np.issubdtype(labels.dtype, np.integer)
intvls = misc.cont_intvls(labels)
if is_behavior:
try:
labels = [self.label_to_behavior[lbl] for lbl in labels]
except KeyError:
raise ValueError(
"Given labels are not defined for label to behavior mapping."
)
report_dict = defaultdict(list)
for i in range(intvls.shape[0] - 1):
dur = intvls[i + 1] - intvls[i]
if use_time_stamps:
report_dict["Duration"].append(self.get_minute_stamp(dur))
report_dict["Beginning"].append(self.get_hour_stamp(intvls[i]))
report_dict["End"].append(self.get_hour_stamp(intvls[i + 1]))
else:
report_dict["Duration"].append(dur)
report_dict["Beginning"].append(intvls[i])
report_dict["End"].append(intvls[i + 1])
report_dict["Label"].append(labels[intvls[i]])
df_report = | pd.DataFrame.from_dict(report_dict) | pandas.DataFrame.from_dict |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 21 11:12:24 2018
@author: benjamin
"""
from fastText import train_supervised, load_model
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils.multiclass import unique_labels
from tempfile import NamedTemporaryFile
import numpy as np
import pandas as pd
class sk_Fasttext(BaseEstimator, ClassifierMixin):
'''
This a scikit wrapper for fastText, and thus the attributes below are almost the same as in the original libary
- Thus most of the text is ripped from: github.com/facebookresearch/fastText/
- Since this is wrapper all line Breakes (\n) will be removed in the input data.
- Thus if you want those to count replace them with another character
The following arguments are mandatory:
- None as of now
The following arguments are optional:
-verbose verbosity level [2] - not implemented
-mkTemp use a tempoary file for trainning [True] -not implemented
-train_file filename of trainning file nb. only used id mkTemp == False
The following arguments for the dictionary are optional:
-minCount minimal number of word occurences [1]
-minCountLabel minimal count of word occurences [0]
-wordNgrams max length of word ngram [1]
-bucket number of buckets [2000000]
-minn min length of char ngram[0]
-maxn max length of char ngram[0]
-t sampling threshold [0.0001]
-label labels prefix [__label__]
The following arguments for the trainning are optional:
- lr learning rate[0.1]
- lrUpdateRate change the rate of updates for the learning rate [100]
- dim size of word vectors [100]
- ws size of the context window [5]
- epoch number of epochs [5]
- neg number of negatives sampled
- loss loss function {ns, hs, softmax} [softmax]
- thread number of threads [12]
'''
def __init__(self, label = "__label__", verbose = 2, mkTemp = True,
train_file = "ft_temp", minCount = 1, minCountLabel = 0, wordNgrams = 1,
bucket = 2000000, minn = 0, maxn = 0, t = 0.0001, lr = 0.1,
lrUpdateRate = 100, dim = 100, ws = 5, epoch = 5, neg = 5,
loss = "softmax", thread = 12
):
self.label = label
self.verbose = verbose
self.mkTemp = mkTemp
self.train_file = train_file
self.minCount = minCount
self.minCountLabel = minCountLabel
self.wordNgrams = wordNgrams
self.bucket = bucket
self.minn = minn
self.maxn = maxn
self.t = t
self.lr = lr
self.lrUpdateRate = lrUpdateRate
self.dim = dim
self.ws = ws
self.epoch = epoch
self.neg = neg
self.loss = loss
self.thread = thread
self.type = str
def fit(self, X, y):
# Checks that dimensions are correct
if type(X) == type(pd.DataFrame()):
X = X.values
y = list(y)
self.type = type(y[0])
X = [x[0] for x in X]
# initiated the class variables
self.classes_ = unique_labels(y)
### First we need to create the tempoary file for trainning
if self.mkTemp == True:
f = NamedTemporaryFile(mode="w")
else:
f = open(self.train_file, "w")
for i in range(len(X)):
textToWrite = self.label + str(y[i]) + " "
textToWrite += X[i].replace("\n", " ")
f.write(textToWrite + "\n")
self.classifier = train_supervised(f.name, label=self.label, verbose=self.verbose,
minCount = self.minCount, minCountLabel = self.minCountLabel,
wordNgrams = self.wordNgrams, bucket = self.bucket,
minn = self.minn, maxn = self.maxn, t = self.t,
lr = self.lr, lrUpdateRate = self.lrUpdateRate,
dim = self.dim, ws = self.ws, epoch = self.epoch,
neg = self.neg, loss = self.loss, thread = self.thread)
self.X_ = X
self.y_ = y
# Closes and destroys temp file
f.close()
# Return Classifier
return self
def predict(self, X):
# transforms pandas til numpy
if type(X) == type( | pd.DataFrame() | pandas.DataFrame |
# SPDX-License-Identifier: Apache-2.0
# Licensed to the Ed-Fi Alliance under one or more agreements.
# The Ed-Fi Alliance licenses this file to you under the Apache License, Version 2.0.
# See the LICENSE and NOTICES files in the project root for more information.
import os
from typing import Tuple
import pandas as pd
import pytest
from unittest.mock import Mock
import sqlalchemy
from edfi_schoology_extractor import usage_analytics_facade
from edfi_schoology_extractor.mapping import usage_analytics as usageMap
from edfi_schoology_extractor.helpers import csv_reader
from edfi_schoology_extractor.helpers import sync
INPUT_DIRECTORY = "./input"
def _setup_empty_filesystem(fs):
fs.path_separator = "/"
fs.is_windows_fs = False
fs.is_macos = False
fs.create_dir(INPUT_DIRECTORY)
def _setup_filesystem(fs):
fs.path_separator = "/"
fs.is_windows_fs = False
fs.is_macos = False
fs.create_dir(INPUT_DIRECTORY)
contents = """role_name,user_building_id,user_building_name,username,email,schoology_user_id,unique_user_id,action_type,item_type,item_id,item_name,course_name,course_code,section_name,last_event_timestamp,event_count,role_id,user_building_code,last_name,first_name,device_type,item_building_id,item_building_name,item_building_code,item_parent_type,group_id,group_name,course_id,section_id,section_school_code,section_code,month,date,timestamp,time spent (seconds)
Student,2908525646,Ed-Fi Alliance - Grand Bend High school,kyle.hughes,<EMAIL>,100032891,604874,CREATE,SESSION,,,,,,2020-11-04 17:28:43.097,1,796380,,Hughes,Kyle,WEB,2908525646,Ed-Fi Alliance - Grand Bend High school,,USER,,,,,,,11,11/04/2020,17:28,
"""
fs.create_file(os.path.join(INPUT_DIRECTORY, "input.csv"), contents=contents)
def describe_when_getting_system_activities():
def describe_given_no_usage_analytics_provided():
@pytest.fixture
def system(fs):
_setup_empty_filesystem(fs)
csv_reader.load_data_frame = Mock(return_value= | pd.DataFrame(["one"]) | pandas.DataFrame |
"""
Integrated Label Preparation Code
Created on 4/25/2019
@author: RH
"""
#CPTAC initial prep
import pandas as pd
imlist = pd.read_excel('../S043_CPTAC_UCEC_Discovery_Cohort_Study_Specimens_r1_Sept2018.xlsx', header=4)
imlist = imlist[imlist['Group'] == 'Tumor ']
cllist = pd.read_csv('../UCEC_V2.1/waffles_updated.txt', sep='\t', header = 0)
cllist = cllist[cllist['Proteomics_Tumor_Normal'] == 'Tumor']
joined = pd.merge(imlist, cllist, how='inner', on=['Participant_ID'])
joined.to_csv('../joined_PID.csv', index = False)
#CPTAC prep
import pandas as pd
import shutil
import os
import csv
def flatten(l, a):
for i in l:
if isinstance(i, list):
flatten(i, a)
else:
a.append(i)
return a
# Get all images in the root directory
def image_ids_in(root_dir, ignore=['.DS_Store', 'dict.csv']):
ids = []
for id in os.listdir(root_dir):
if id in ignore:
print('Skipping ID:', id)
else:
dirname = id.split('_')[-2]
ids.append((id, dirname))
return ids
PID = pd.read_csv("../joined_PID.csv", header = 0)
temp = []
ls = []
for idx, row in PID.iterrows():
if "," in row["Parent Sample ID(s)"]:
m = row["Parent Sample ID(s)"].split(',')
for x in m:
w = row
ls.append(x)
temp.append(w)
PID = PID.drop(idx)
temp = pd.DataFrame(temp)
temp["Parent Sample ID(s)"] = ls
PID = PID.append(temp, ignore_index=True)
PID = PID.sort_values(["Parent Sample ID(s)"], ascending=1)
PID.to_csv("../new_joined_PID.csv", header = True, index = False)
PID = pd.read_csv("../new_joined_PID.csv", header = 0)
ref_list = PID["Parent Sample ID(s)"].tolist()
imids = image_ids_in('../CPTAC_img')
inlist = []
outlist = []
reverse_inlist = []
try:
os.mkdir('../CPTAC_img/inlist')
except FileExistsError:
pass
try:
os.mkdir('../CPTAC_img/outlist')
except FileExistsError:
pass
for im in imids:
if im[1] in ref_list:
inlist.append(im[0])
reverse_inlist.append(im[1])
shutil.move('../CPTAC_img/'+str(im[0]), '../CPTAC_img/inlist/'+str(im[0]))
else:
outlist.append(im[0])
shutil.move('../CPTAC_img/' + str(im[0]), '../CPTAC_img/outlist/' + str(im[0]))
csvfile = "../CPTAC_inlist.csv"
with open(csvfile, "w") as output:
writer = csv.writer(output, lineterminator='\n')
for val in inlist:
writer.writerow([val])
csvfile = "../CPTAC_outlist.csv"
with open(csvfile, "w") as output:
writer = csv.writer(output, lineterminator='\n')
for val in outlist:
writer.writerow([val])
filtered_PID = PID[PID["Parent Sample ID(s)"].isin(reverse_inlist)]
tpdict = {'CN-High': 'Serous-like', 'CN-Low': 'Endometrioid', 'MSI-H': 'MSI', 'POLE': 'POLE', 'Other': 'Other'}
a = filtered_PID['TCGA_subtype']
filtered_PID['Subtype'] = a
filtered_PID.Subtype = filtered_PID.Subtype.replace(tpdict)
filtered_PID = filtered_PID[filtered_PID.Subtype != 'Other']
filtered_PID.to_csv("../filtered_joined_PID.csv", header=True, index=False)
#TCGA prep
import pandas as pd
def flatten(l, a):
for i in l:
if isinstance(i, list):
flatten(i, a)
else:
a.append(i)
return a
image_meta = | pd.read_csv('../TCGA_Image_meta.tsv', sep='\t', header=0) | pandas.read_csv |
import streamlit as st
import pandas as pd
import seaborn as sns
import numpy as np
from matplotlib import pyplot as plt
from sklearn.neighbors import NearestNeighbors
import random
import missingno as msno
import ppscore as pps
from sklearn.neighbors import NearestNeighbors
from sklearn.preprocessing import StandardScaler
import base64
from io import BytesIO
pd.set_option('display.max_columns', 500)
@st.cache
def readcsv(csv):
df=pd.read_csv(csv)
return df
def load_data():
DATA_URL = ('https://raw.githubusercontent.com/guireis1/Codenation-Final-Project/master/estaticos_portfolio1.csv')
data = pd.read_csv(DATA_URL)
return data
def head(dataframe):
if len(dataframe) > 1000:
lenght = 1000
else:
lenght = len(dataframe)
slider = st.slider('Linhas exibidas:', 10, lenght)
st.dataframe(dataframe.head(slider))
def vis(data):
for i in data.columns:
if i == 'setor':
sns.set(style="whitegrid")
plt.figure(figsize=(20,10))
sns.countplot(x="setor", data=data, palette="Reds_r",saturation=0.5)
plt.title('Contagem dos Setores',fontsize=20)
plt.xlabel('')
plt.ylabel('')
st.pyplot()
if i == 'natureza_juridica_macro':
sns.set(style="whitegrid")
plt.figure(figsize=(20,10))
sns.countplot(x="natureza_juridica_macro", data=data, palette="Reds_r",saturation=0.5)
plt.title('Contagem da natureza jurídica',fontsize=20)
plt.xlabel('')
plt.ylabel('')
st.pyplot()
if i == 'de_faixa_faturamento_estimado_grupo':
sns.set(style="whitegrid")
plt.figure(figsize=(20,20))
sns.countplot(y="de_faixa_faturamento_estimado_grupo",hue='setor', data=data, palette="Reds_r",saturation=0.5)
plt.title('Contagem do faturamento por setor',fontsize=20)
plt.xlabel('')
plt.ylabel('')
st.pyplot()
if i == 'nm_meso_regiao':
sns.set(style="whitegrid")
plt.figure(figsize=(20,20))
sns.countplot(y="nm_meso_regiao", data=data, palette="Reds_r",saturation=0.5)
plt.title('Contagem Meso Região',fontsize=20)
plt.xlabel('')
plt.ylabel('')
st.pyplot()
@st.cache
def descritiva(dataframe):
desc = dataframe.describe().T
desc['column'] = desc.index
exploratory = pd.DataFrame()
exploratory['NaN'] = dataframe.isnull().sum().values
exploratory['NaN %'] = 100 * (dataframe.isnull().sum().values / len(dataframe))
exploratory['NaN %'] = exploratory['NaN %'].apply(lambda x: str(round(x,2)) + " %")
exploratory['column'] = dataframe.columns
exploratory['dtype'] = dataframe.dtypes.values
exploratory = exploratory.merge(desc, on='column', how='left')
exploratory.loc[exploratory['dtype'] == 'object', 'count'] = len(dataframe) - exploratory['NaN']
exploratory.set_index('column', inplace=True)
return exploratory
def missing(data,nome):
#plt.figure(figsize=(10,15))
msno.bar(data,sort='descending')
plt.title(nome,fontsize=30)
st.pyplot()
def missing_dendo(data,nome):
msno.dendrogram(data)
plt.title(nome,fontsize=30)
st.pyplot()
def geoloc(data,coord):
coordenadas = []
null_count= 0
for local in data['nm_micro_regiao']:
coords=coord[coord['nome']==local][['latitude','longitude']]
if not coords.empty:
coordenadas.append([coords['latitude'].values[0]-random.uniform(0,0.25),
coords['longitude'].values[0]-random.uniform(0,0.25)])
else:
null_count += 1
print(null_count)
return coordenadas
# st.map(coordenadas)
#for mark in coordenadas:
# folium.Marker([mark[0],mark[1]],icon=folium.Icon(icon='exclamation',color='darkred',prefix='fa')).add_to(m)
# print(null_count_port)
def recommend(port_pre,slider_nn,market_col_select_scaled,market):
valor_nn = slider_nn
nn= NearestNeighbors(n_neighbors=valor_nn,metric='cosine')
nn.fit(market_col_select_scaled)
nn_port_list = {}
for row in range(port_pre.shape[0]):
nn_port_list[row] = nn.kneighbors(port_pre.iloc[[row]].values)
nn_size = len(nn_port_list)
nn_num = len(nn_port_list[0][1][0])
nn_index = nn_port_list[0][1][0]
nn_distance = nn_port_list[0][0][0]
np.delete(nn_index, [0,1])
np.delete(nn_distance, [0,1])
for i in range(1,nn_size):
nn_index = np.concatenate((nn_index,nn_port_list[i][1][0]),axis=None)
nn_distance = np.concatenate((nn_distance,nn_port_list[i][0][0]),axis=None)
if len(nn_index) != nn_size*nn_num:
print ('Erro')
id_origin = {}
for idx,ind in zip(nn_index,range(len(nn_index))):
id_origin[ind] = (port_pre.iloc[int(ind/valor_nn)].name , market.iloc[idx].name, (nn_distance[ind]))
recommend = pd.DataFrame.from_dict(id_origin,orient='index')
recommend.rename(columns={0:'id_origin',1:'id',2:'distance'},inplace=True)
recommend=recommend[recommend['id'].isin(port_pre.index)==0] #tirando os conflitos
recommend.set_index('id',inplace=True)
suggestion = recommend.merge(market, how='left', left_index=True,right_index=True) ##unindo com o market
suggestion = suggestion.loc[~suggestion.index.duplicated(keep='first')] ##tirando os duplicados
return suggestion
def get_table_download_link(df):
"""Generates a link allowing the data in a given panda dataframe to be downloaded
in: dataframe
out: href string
"""
csv = df.to_csv(index=False)
b64 = base64.b64encode(csv.encode()).decode() # some strings <-> bytes conversions necessary here
return f'<a href="data:file/csv;base64,{b64}" download="recomendacao.csv">Download csv file</a>'
#
def main():
st.image('img/gui_logo.jpeg', use_column_width=True)
st.header('Bem vindo!')
st.subheader('**Você está no sistema de recomendação de clientes**')
st.markdown('O sistema recomendará novos clientes baseado em comparações com os seus atuais clientes de forma customizada a partir das características desejadas.')
st.markdown('### Precisamos que você nos forneça o **portifólio de seus clientes!**')
st.markdown(' *Obs.: Caso você não tenha um portifólio para usar, escolha um [desses](https://github.com/guireis1/Codenation-Final-Project/tree/master/data). *')
file3= st.file_uploader('Upload clientes.csv',type='csv')
if file3 is not None:
market_pre = pd.read_csv('data/data_preprocess.csv')
market = pd.read_csv('data/market.csv')
#market = pd.DataFrame(readcsv(file2))
#market= pd.read_csv(file2)
#market_pre = pd.DataFrame(readcsv(file1))
#market_pre = pd.read_csv(file1)
port = pd.DataFrame(readcsv(file3))
st.text('Loading data...done!')
#Començando o processamento
#market = pd.read_csv('market.csv')
#market_pre = pd.read_csv('data_preprocess.csv')
#port = pd.read_csv('data/estaticos_portfolio1.csv')
market_pre.set_index('id',inplace=True)
market.set_index(market_pre.index,inplace=True)
market.drop('Unnamed: 0',axis=1,inplace=True)
port= port.set_index('id')
port.drop(port.columns,axis=1,inplace=True)
port_market = market.merge(port,how='right',left_index=True,right_index=True)
port_market_pre = market_pre.merge(port,how='right',left_index=True,right_index=True)
st.markdown('DataFrame do Portofólio:')
head(port_market)
#Todos datasets prontos
#st.sidebar.image(st.image('img/logo.png', use_column_width=True))
st.sidebar.header('Opções de análise do Portifólio:')
sidemulti = st.sidebar.multiselect('Escolha: ',('Visualização','Descritiva','Geolocalização'))
if ('Visualização' in sidemulti):
st.markdown('## **Visualização do Portifólio**')
st.markdown('Perfil de clientes considerando features importantes')
vis(port_market)
st.markdown('*Para melhor visualização clique na imagem*')
if ('Descritiva' in sidemulti):
st.markdown('## **Análise Descritiva do Portifólio**')
st.dataframe(descritiva(port_market))
missing(port_market,'Visualização dos nulos do Portifólio')
missing_dendo(port_market,'Dendograma dos nulos do Portifólio')
st.markdown('*Para melhor visualização clique na imagem*')
if ('Geolocalização' in sidemulti):
coordenadas = pd.read_csv('https://raw.githubusercontent.com/guireis1/Codenation-Final-Project/master/data/coordenadas')
coordenadas.drop('Unnamed: 0',axis=1,inplace=True)
st.markdown('## **Geolocalização do Portifólio**')
st.markdown('Localização das empresas contidas no portifólio')
cord_port = geoloc(port_market,coordenadas)
cord_port_df=pd.DataFrame(cord_port,columns=('lat','lon'))
st.map(cord_port_df)
st.sidebar.header('Opções de análise do mercado:')
sidemulti_market = st.sidebar.multiselect('Escolha: ',('Visualização','Descritiva','Correlação','Análise dos Nulos','Colunas excluídas'))
if ('Visualização' in sidemulti_market):
st.markdown('## **Visualização do Mercado**')
vis(market)
st.markdown('*Para melhor visualização clique na imagem*')
if ('Descritiva' in sidemulti_market):
st.markdown('## **Análise Descritiva do Mercado**')
st.dataframe(descritiva(market))
#missing(market,'Visualização dos nulos')
#missing_dendo(market,'Dendograma nulos')
if ('Correlação' in sidemulti_market):
st.markdown('## **Correlações do Mercado**')
st.markdown('Correlação padrão')
st.image('img/corr_matrix.png', use_column_width=True)
st.markdown('Correlação usando PPS')
st.image('img/corr_pps.png', use_column_width=True)
if ('Análise dos Nulos' in sidemulti_market):
st.markdown('## **Análise dos nulos **')
st.markdown('### **Colunas Numéricas:**')
st.image('img/valores20.png', use_column_width=True)
st.image('img/valores60.png', use_column_width=True)
st.image('img/valores80.png', use_column_width=True)
st.image('img/dendo_90.png', use_column_width=True)
st.image('img/dendo100.png', use_column_width=True)
st.markdown('### **Colunas Categoricas:**')
st.image('img/valores_nulos.png', use_column_width=True)
st.image('img/dendo_cat.png', use_column_width=True)
if ('Colunas excluídas' in sidemulti_market):
col_excluidas=[ 'sg_uf', 'idade_emp_cat', 'fl_me', 'fl_sa', 'fl_epp', 'fl_ltda', 'dt_situacao', 'fl_st_especial', 'nm_divisao', 'nm_segmento', 'fl_spa',
'vl_total_tancagem', 'vl_total_veiculos_antt', 'fl_optante_simples', 'qt_art', 'vl_total_veiculos_pesados_grupo', 'vl_total_veiculos_leves_grupo', 'vl_total_tancagem_grupo',
'vl_total_veiculos_antt_grupo', 'vl_potenc_cons_oleo_gas', 'fl_optante_simei', 'sg_uf_matriz', 'de_saude_rescencia', 'nu_meses_rescencia', 'de_indicador_telefone',
'fl_simples_irregular', 'vl_frota', 'qt_socios_pf', 'qt_socios_pj', 'idade_maxima_socios', 'idade_minima_socios', 'qt_socios_st_regular', 'qt_socios_st_suspensa',
'qt_socios_masculino', 'qt_socios_feminino', 'qt_socios_pep', 'qt_alteracao_socio_total', 'qt_alteracao_socio_90d', 'qt_alteracao_socio_180d', 'qt_alteracao_socio_365d',
'qt_socios_pj_ativos', 'qt_socios_pj_nulos', 'qt_socios_pj_baixados', 'qt_socios_pj_suspensos', 'qt_socios_pj_inaptos', 'vl_idade_media_socios_pj', 'vl_idade_maxima_socios_pj',
'vl_idade_minima_socios_pj', 'qt_coligados', 'qt_socios_coligados', 'qt_coligados_matriz', 'qt_coligados_ativo', 'qt_coligados_baixada', 'qt_coligados_inapta',
'qt_coligados_suspensa', 'qt_coligados_nula', 'idade_media_coligadas', 'idade_maxima_coligadas', 'idade_minima_coligadas', 'coligada_mais_nova_ativa',
'coligada_mais_antiga_ativa', 'idade_media_coligadas_ativas', 'coligada_mais_nova_baixada', 'coligada_mais_antiga_baixada', 'idade_media_coligadas_baixadas',
'qt_coligados_sa', 'qt_coligados_me', 'qt_coligados_mei', 'qt_coligados_ltda', 'qt_coligados_epp', 'qt_coligados_norte', 'qt_coligados_sul', 'qt_coligados_nordeste',
'qt_coligados_centro', 'qt_coligados_sudeste', 'qt_coligados_exterior', 'qt_ufs_coligados', 'qt_regioes_coligados', 'qt_ramos_coligados', 'qt_coligados_industria',
'qt_coligados_agropecuaria', 'qt_coligados_comercio', 'qt_coligados_serviço', 'qt_coligados_ccivil', 'qt_funcionarios_coligados',
'qt_funcionarios_coligados_gp', 'media_funcionarios_coligados_gp', 'max_funcionarios_coligados_gp', 'min_funcionarios_coligados_gp', 'vl_folha_coligados', 'media_vl_folha_coligados', 'max_vl_folha_coligados', 'min_vl_folha_coligados', 'vl_folha_coligados_gp', 'media_vl_folha_coligados_gp',
'max_vl_folha_coligados_gp', 'min_vl_folha_coligados_gp', 'faturamento_est_coligados', 'media_faturamento_est_coligados', 'max_faturamento_est_coligados', 'min_faturamento_est_coligados',
'faturamento_est_coligados_gp', 'media_faturamento_est_coligados_gp', 'max_faturamento_est_coligados_gp', 'min_faturamento_est_coligados_gp', 'total_filiais_coligados', 'media_filiais_coligados', 'max_filiais_coligados',
'min_filiais_coligados', 'qt_coligados_atividade_alto', 'qt_coligados_atividade_medio', 'qt_coligados_atividade_baixo', 'qt_coligados_atividade_mt_baixo', 'qt_coligados_atividade_inativo',
'qt_coligadas', 'sum_faturamento_estimado_coligadas', 'de_faixa_faturamento_estimado', 'vl_faturamento_estimado_aux', 'vl_faturamento_estimado_grupo_aux', 'qt_ex_funcionarios',
'qt_funcionarios_grupo', 'percent_func_genero_masc', 'percent_func_genero_fem', 'idade_ate_18', 'idade_de_19_a_23', 'idade_de_24_a_28', 'idade_de_29_a_33',
'idade_de_34_a_38', 'idade_de_39_a_43', 'idade_de_44_a_48', 'idade_de_49_a_53', 'idade_de_54_a_58', 'idade_acima_de_58', 'grau_instrucao_macro_analfabeto',
'grau_instrucao_macro_escolaridade_fundamental', 'grau_instrucao_macro_escolaridade_media', 'grau_instrucao_macro_escolaridade_superior', 'grau_instrucao_macro_desconhecido',
'total', 'meses_ultima_contratacaco', 'qt_admitidos_12meses', 'qt_desligados_12meses', 'qt_desligados', 'qt_admitidos', 'media_meses_servicos_all', 'max_meses_servicos_all', 'min_meses_servicos_all',
'media_meses_servicos', 'max_meses_servicos', 'min_meses_servicos', 'qt_funcionarios_12meses', 'qt_funcionarios_24meses', 'tx_crescimento_12meses', 'tx_crescimento_24meses']
st.markdown('## **Colunas excluídas**')
st.markdown('Decidimos não utiliza-las por quantidade de linhas não preenchidas, grandes correlações com outrar variáveis, pouca importância para o modelo ou redundância!')
st.markdown('**São elas:**')
st.write(col_excluidas)
st.sidebar.header('Sistema de recomendação')
start_model = st.sidebar.checkbox('Aperte para começarmos a modelagem do sistema!')
st.sidebar.markdown('**Desenvolvido por,**')
st.sidebar.markdown('*<NAME>*')
st.sidebar.markdown('[LinkedIn](https://www.linkedin.com/in/guilherme-reis-2862ab153/)')
st.sidebar.markdown('[GitHub](https://github.com/guireis1/)')
if start_model:
st.header('**Modelagem**')
st.subheader('**Primeiro selecione as features que gostaria de usar**')
st.markdown('*Essas serão as colunas que serão utilizadas no sistema de recomendação!*')
st.markdown('**Colunas que recomendamos:**')
col_select=[]
ramo = st.checkbox('de_ramo')
idade = st.checkbox('idade_emp_cat')
meso = st.checkbox('nm_meso_regiao')
juridica = st.checkbox('natureza_juridica_macro')
faturamento = st.checkbox('de_faixa_faturamento_estimado_grupo')
filiais = st.checkbox('qt_filiais')
mei = st.checkbox('fl_mei')
rm = st.checkbox('fl_rm')
st.markdown('**Colunas opcionais:**')
setor = st.checkbox('setor')
rotatividade = st.checkbox('tx_rotatividade')
idade_socios = st.checkbox('idade_media_socios')
socios = st.checkbox('qt_socios')
renda = st.checkbox('empsetorcensitariofaixarendapopulacao')
leve = st.checkbox('vl_total_veiculos_leves_grupo')
pesado = st.checkbox('vl_total_veiculos_pesados_grupo')
iss = st.checkbox('fl_passivel_iss')
atividade = st.checkbox('de_nivel_atividade')
saude = st.checkbox('de_saude_tributaria')
veiculo = st.checkbox('fl_veiculo')
antt = st.checkbox('fl_antt')
telefone = st.checkbox('fl_telefone')
email = st.checkbox('fl_email')
matriz = st.checkbox('fl_matriz')
if ramo:
col_select.append('de_ramo')
if idade:
col_select.append('idade_emp_cat')
if meso:
col_select.append('nm_meso_regiao')
meso_ohe=pd.get_dummies(market_pre['nm_meso_regiao'],drop_first=True)
if faturamento:
col_select.append('de_faixa_faturamento_estimado_grupo')
if juridica:
col_select.append('natureza_juridica_macro')
juridico_ohe= | pd.get_dummies(market_pre['natureza_juridica_macro'],drop_first=True) | pandas.get_dummies |
import json
import pandas as pd
from tqdm import tqdm
from curami.commons import file_utils
from curami.preprocess.clean import AttributeCleaner
def generate_features_file(from_file_no, to_file_no):
pd_unique_attributes = | pd.read_csv(file_utils.unique_attributes_file_final) | pandas.read_csv |
import numpy as _np
from scipy.stats import sem as _sem
import pandas as _pd
import matplotlib.pyplot as _plt
from nicepy import format_fig as _ff, format_ax as _fa
class TofData:
"""
General class for TOF data
"""
def __init__(self, filename, params, norm=True, noise_range=(3, 8), bkg_range=(3, 8), fluor=True, factor=0.92152588, offset=-0.36290086):
"""
:param filename:
:param params:
:param norm:
:param noise_range:
:param bkg_range:
:param fluor:
:param factor:
:param offset:
"""
self.filename = filename
self.idx = False
self.norm = norm
self.noise_range = noise_range
self.bkg_range = bkg_range
self.factor = factor
self.offset = offset
self.fluor = fluor
self._get_data(filename)
self._subtract_bkg()
self._get_noise()
self._get_params(filename, params)
self.peaks = None
def _get_data(self, filename):
"""
:param filename:
:return:
"""
dat = list(_np.loadtxt(filename))
fluor = dat.pop()
if self.fluor is False:
fluor = 1
center = int(len(dat) / 2)
time = _np.array([s for s in dat[:center]])
mass = self._time_to_mass(time, self.factor, self.offset)
raw = _np.array([-i / fluor for i in dat[center:]]) / fluor
raw = _pd.DataFrame({'Time': time, 'Mass': mass, 'Volts': raw})
if self.norm is True:
tot = raw['Volts'].sum()
raw['Volts'] = raw['Volts']/tot
self.raw = raw
def _subtract_bkg(self):
"""
:return:
"""
temp = self._select_range('Mass', self.bkg_range[0], self.bkg_range[1])['Volts']
m = temp.mean()
self.raw['Volts'] = self.raw['Volts'] - m
def _get_noise(self):
"""
:return:
"""
temp = self._select_range('Mass', self.noise_range[0], self.noise_range[1])['Volts']
n = temp.std()
self.noise = n
def _get_params(self, filename, params):
"""
:param filename:
:param params:
:return:
"""
listed = filename.replace('.txt', '').split('_')
temp = {key: listed[val] for key, val in params.items()}
self.params = {}
for key, val in temp.items():
if key.lower() == 'version':
val = val.lower()
val = val.replace('v', '')
else:
pass
if '.' in val or 'e' in val:
try:
val = float(val)
except ValueError:
pass
else:
try:
val = int(val)
except ValueError:
val = val.lower()
self.params[key] = val
self.params = _pd.Series(self.params)
def _select_range(self, column, lower, upper):
"""
Selects part of data that is between values upper and lower in column
:param column: column name to rate_constants used to bound
:param lower: lower value in column
:param upper: upper value in column
:return: parsed data frame
"""
temp = self.raw[(self.raw[column] <= upper) & (self.raw[column] >= lower)]
return temp
def _get_closest(self, column, value):
"""
:param column:
:param value:
:return:
"""
temp = self.raw.loc[(self.raw[column] - value).abs().idxmin()]
return temp
def _get_range(self, mass, pk_range=(-80, 80)):
"""
:param mass:
:param pk_range:
:return:
"""
idx = self._get_closest('Mass', mass).name
lower = idx + pk_range[0]
if lower < 0:
lower = 0
upper = idx + pk_range[1]
if upper > self.raw.index.max():
upper = self.raw.index.max()
return lower, upper
def _get_peak(self, lower, upper):
"""
:param lower:
:param upper:
:return:
"""
temp = self.raw.loc[range(lower, upper + 1)]
p = temp['Volts'].sum()
if p < self.noise:
p = 0
return p
def get_peaks(self, masses, **kwargs):
"""
:param masses:
:param kwargs:
:return:
"""
self.peaks = {}
self.idx = {}
for key, val in masses.items():
lower, upper = self._get_range(val, **kwargs)
self.peaks[key] = self._get_peak(lower, upper)
self.idx[key] = (lower, val, upper)
self.peaks = _pd.Series(self.peaks)
self.idx = _pd.Series(self.idx)
@staticmethod
def _time_to_mass(time, factor, offset):
mass = [(t - factor) ** 2 + offset for t in time]
return mass
def show(self, x='Mass', shade=True, **kwargs):
"""
:param x:
:param shade:
:param kwargs:
:return:
"""
fig, ax = _plt.subplots()
title = {key: val for key, val in self.params.items()}
self.raw.plot.line(x=x, y='Volts', title='%s' % title, color='black', ax=ax, **kwargs)
if shade is True:
if self.idx is not False:
for key, val in self.idx.items():
idx_range = self.raw.loc[range(val[0], val[2] + 1)]
ax.fill_between(idx_range[x], idx_range['Volts'], label=key, alpha=0.5)
ax.legend(loc=0)
else:
pass
else:
pass
ax.legend(loc=0)
return fig, ax
class TofSet:
def __init__(self, filenames, params, **kwargs):
"""
:param filenames:
:param params:
:param kwargs:
"""
self.filenames = filenames
self.params = params
self._get_tofs(**kwargs)
self._get_raw()
self.idx = False
self.peaks = None
def _get_tofs(self, **kwargs):
"""
:param kwargs:
:return:
"""
tof_list = []
for filename in self.filenames:
t = TofData(filename, self.params, **kwargs)
tof_list.append(t)
tof_objs = []
for t in tof_list:
temp = t.params.copy()
temp['tof'] = t
tof_objs.append(temp)
temp = _pd.DataFrame(tof_objs)
temp.set_index(list(self.params.keys()), inplace=True)
self.tof_objs = temp.sort_index()
def _get_raw(self):
"""
:return:
"""
# temp = self.tof_objs.copy()
# temp['raw'] = [t.raw for t in temp['tof']]
# self.raw = temp.drop('tof', axis=1)
temp = self.tof_objs.copy()
raw = []
for tof in temp['tof']:
t = tof.raw
for key, val in tof.params.items():
t[key] = val
raw.append(t)
self.raw = _pd.concat(raw)
self.raw.set_index(list(self.params.keys()), inplace=True)
self.raw.sort_index(inplace=True)
def _get_tof_peaks(self, masses, **kwargs):
"""
:param masses:
:param kwargs:
:return:
"""
for t in self.tof_objs['tof']:
t.get_peaks(masses, **kwargs)
self.idx = t.idx
def get_peaks(self, masses, **kwargs):
"""
:param masses:
:param kwargs:
:return:
"""
self._get_tof_peaks(masses, **kwargs)
temp_list = []
for t in self.tof_objs['tof']:
temp = _pd.concat([t.peaks, t.params])
temp_list.append(temp)
temp = _pd.concat(temp_list, axis=1)
self.peaks = temp.transpose()
self.peaks.set_index(list(self.params.keys()), inplace=True)
self.peaks.sort_index(inplace=True)
# self.peaks['total'] = self.peaks.sum(axis=1)
def get_raw_means(self, levels=None):
if levels is None:
levels = []
for key in ['version', 'delay']:
if key in self.params.keys():
levels.append(key)
else:
pass
self.levels = levels
grouped = self.tof_objs.groupby(levels)
temp_mean = []
temp_error = []
for indices, group in grouped:
times = _np.mean([tof.raw['Time'] for tof in group['tof']], axis=0)
masses = _np.mean([tof.raw['Mass'] for tof in group['tof']], axis=0)
volts = _np.mean([tof.raw['Volts'] for tof in group['tof']], axis=0)
errors = _sem([tof.raw['Volts'] for tof in group['tof']], axis=0)
df_mean = _pd.DataFrame({'Time': times, 'Mass': masses, 'Volts': volts})
df_error = | _pd.DataFrame({'Time': times, 'Mass': masses, 'Volts': errors}) | pandas.DataFrame |
#!/usr/bin/env python
"""
Name: Filter and Convert Fetched Data from OpenSky Network
Author: <NAME>
Copyright: University of Liverpool © 2021
License: MIT
Version: 1.0
Status: Operational
Description: The source code performs a filter and conversion of data ready for analysis.
"""
import sys, csv, pytz
import pandas as pd
import tkinter as tk
from tkinter import filedialog
import time
import numpy as np
import pyproj
import fileinput
root = tk.Tk()
root.withdraw()
v = tk.StringVar(root)
class data():
def filter():
file_path = filedialog.askopenfilename()
v.set(file_path)
rows = []
with open(file_path, 'r') as f:
for row in csv.reader(f):
# Check each row then strip whitespaces and blanks
row = [col.strip() for col in row]
rows.append(row)
df = pd.DataFrame(rows)
df.columns = df.iloc[0]
df = df[1:]
df = df.drop(df.columns[[10, 13, 14, 15]], axis = 1, inplace=False)
df = df.drop_duplicates(keep=False)
df = df.set_index('time', inplace=False)
df.to_csv('test01.csv')
df = pd.read_csv('test01.csv')
df = df[df['onground'] == False]
df = df[df['alert'] == False]
df = df[df['baroaltitude'] >= 2590]
#df['time'] = pd.to_datetime(df['time'],unit='s').dt.time
df['hour'] = pd.to_datetime(df['hour'],unit='s')
df = df.set_index('time', inplace=False)
df.to_csv('test01.csv')
filter_msg = print("\n Noise Data is removed successfully")
return filter_msg
def convert():
# Load the data and convert lat/lon to Eastings/Northings (UK zone 30)
df = pd.read_csv('test01.csv')
p = pyproj.Proj(proj='utm', zone=30, ellps='WGS84', preserve_units=False)
x,y = p(df["lon"].values, df["lat"].values)
# Create columns for x and y and store the generated data
df = df.assign(eastings = x, northings = y)
# Convert velocity from [m/s] to [knots], vertical speed to [FT/s] and altitude from [m] to [FT]
df['velocity'] = round(df['velocity']*1.943844)
df['vertrate'] = round(df['vertrate']/0.3048)
df['baroaltitude'] = round(df['baroaltitude']*3.28084)
df['heading'] = round(df['heading']) # get constant values for direction
df['icao24'] = df['icao24'].str.upper()
# Classifying in their Airspace classes
conditions = [
(18050 <= df['baroaltitude']) & (df['baroaltitude'] <= 60000), # Class A
(8550 <= df['baroaltitude']) & (df['baroaltitude'] <= 18000), # Class C
(8500 <= df['baroaltitude']) & (df['baroaltitude'] <= 12500), # Class E
]
classes = ['Class A', 'Class C', 'Class E']
df['airspace'] = np.select(conditions, classes)
df = df.set_index('time', inplace=False)
df = df.sort_index()
df.to_csv('test01.csv') # save the file
# Load data to save flights as their callsign in seperate csv. files
df = | pd.read_csv('test01.csv') | pandas.read_csv |
# Copyright 2018 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as pltcolors
import matplotlib.cm as cmx
import click
from ciml import listener
from ciml import gather_results
import datetime
import itertools
import os
import queue
import re
import sys
import warnings
warnings.filterwarnings("ignore")
try:
from ciml import nn_trainer
from ciml import svm_trainer
from ciml import tf_trainer
except ImportError:
print("Warning: could not import CIML trainers")
from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import
try:
import tensorflow.compat.v1 as tf
tf.disable_eager_execution()
from tensorflow.python.training import adagrad
from tensorflow.python.training import adam
from tensorflow.python.training import ftrl
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import rmsprop
from tensorflow.python.training import proximal_adagrad
_OPTIMIZER_CLS_NAMES = {
'Adagrad': adagrad.AdagradOptimizer,
'Adam': adam.AdamOptimizer,
'Ftrl': ftrl.FtrlOptimizer,
'RMSProp': rmsprop.RMSPropOptimizer,
'SGD': gradient_descent.GradientDescentOptimizer,
'ProximalAdagrad': proximal_adagrad.ProximalAdagradOptimizer
}
except ImportError:
print("Warning: could not import Tensorflow")
_OPTIMIZER_CLS_NAMES = {}
default_db_uri = ('mysql+pymysql://query:<EMAIL>@logstash.<EMAIL>/'
'subunit2sql')
def fixed_lenght_example(result, normalized_length=5500,
aggregation_functions=None):
"""Normalize one example.
Normalize one example of data to a fixed length (L).
The input is s x d.
To achieve fixed lenght:
- if aggregation functions are provided, apply them, or else
- if s > L, cut each dstat column data to L
- if s < L, pad with zeros to reach L
The output is a pd.DataFrame with shape (L, d)
"""
# Fix length of dataset
example = result['dstat']
init_len = len(example)
dstat_keys = example.keys()
if aggregation_functions:
# Run all aggregation functions on each DataFrame column in the example
agg_dict = {column: [x(example[column]) for x in aggregation_functions]
for column
in example.columns}
example = | pd.DataFrame.from_dict(agg_dict) | pandas.DataFrame.from_dict |
"""
Pandas(2)
"""
## 5. Data Aggregation(데이터 수집)
import pandas as pd
from numpy.random import seed, rand, randint
import numpy as np
# 넘파이의 무작위 함수
seed(42)
df = pd.DataFrame({
'Weather': ['cold', 'hot', 'cold', 'hot', 'cold', 'hot', 'cold'],
'Food': ['soup', 'soup', 'icecream', 'chocolate', 'icecream', 'icecream', 'soup'],
'Price': 10 * rand(7),
'Number': randint(1, 9)
})
print(df)
# weather 열로 그룹화(2개 그룹)
weather_group = df.groupby('Weather')
i = 0
for name, group in weather_group:
i = i + 1
print("Group", i, name)
print(group)
# 첫번째 열, 마지막 열, 각 그룹의 평균값
print("Weather group first\n", weather_group.first())
print("Weather group last\n", weather_group.last())
print("Weather group mean\n", weather_group.mean())
# 여러 열을 그룹화하고 새로운 그룹을 생성
wf_group = df.groupby(['Weather', 'Food'])
print("WF Groups", wf_group.groups)
# 넘파이 함수를 리스트 방식으로 적용
print("WF Aggregated\n", wf_group.agg([np.mean, np.median]))
## 6. Concatenating and appending DataFrames(행의 결합과 추가)
print("df :3\n", df[:3]) # 첫번째 세 개의 열
print("Concat Back together\n", pd.concat([df[:3], df[3:]]))
print("Appending rows\n", df[:3].append(df[5:]))
## 7. joining DataFrames - 판다스.merge(), 데이터프레임.join()
path = 'D:\\1.Workspace\\1.Python\\part2.data_analysis\\pythonDataAnalysis2nd\\'
# 택시회사 직원번호와 목적지 데이터
dests = pd.read_csv(path + 'ch3-2.dest.csv')
print("Dests\n", dests)
# 택시운전기사의 팁 데이터
tips = pd.read_csv(path +'ch3-2.tips.csv')
print("Tips\n", tips)
print("Merge() on key\n", pd.merge(dests, tips, on='EmpNr'))
print("Dests join() tips\n", dests.join(tips, lsuffix='Dest', rsuffix='Tips'))
print("Inner join with merge()\n", pd.merge(dests, tips, how='inner'))
print("Outer join with merge()\n", pd.merge(dests, tips, how='outer'))
## 8. Handlng missing Values(누락된 데이터 다루기)
path = 'D:\\1.Workspace\\1.Python\\part2.data_analysis\\pythonDataAnalysis2nd\\'
df = pd.read_csv(path + 'ch3-1.WHO_first9cols.csv')
# country and Net primary school enrolment ratio male(%)의 헤더가 포함된 앞 세 개 열
df = df[['Country', df.columns[-2]]][:2]
print("New df\n", df)
print("Null Values\n", pd.isnull(df))
print("Total Null Values\n", pd.isnull(df).sum()) # Nan 값의 총 갯수(True = 1)
print("Not Null Values\n", df.notnull())
print("Last Column Doubled\n", 2 * df[df.columns[-1]]) # Nan 값을 곱하거나 더해도 Nan
print("Last Column plus NaN\n", df[df.columns[-1]] + np.nan)
print("Zero filled\n", df.fillna(0))
## 9. dealing with dates(날짜 다루기)
print("Date range", pd.date_range('1/1/1900', periods=42, freq='D'))
import sys
try:
print("Date range", pd.date_range('1/1/1677', periods=4, freq='D'))
except:
etype, value, _ = sys.exc_info()
print("Error encountered", etype, value)
# pd.DateOffset으로 허용되는 날짜 범위
offset = pd.DateOffset(seconds=2 ** 33/10 ** 9)
mid = pd.to_datetime('1/1/1970')
print("Start valid range", mid - offset)
print("End valid range", mid + offset)
# 문자열을 날짜로 변환
print("With format", pd.to_datetime(['19021112', '19031230'], format='%Y%m%d'))
# 날짜가 아닌 문자열(둰째 열)은 변환되지 않음
print("Illegal date", pd.to_datetime(['1902-11-12', 'not a date']))
print("Illegal date coerced", | pd.to_datetime(['1902-11-12', 'not a date'], errors='coerce') | pandas.to_datetime |
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
from subprocess import check_output
print(check_output(["ls", "../input"]).decode("utf8"))
# Any results you write to the current directory are saved as output.
data_frame_train = pd.read_csv('../input/train.csv')
data_frame_test = pd.read_csv("../input/test.csv")
print(data_frame_train.shape)
print(data_frame_train.head(5))
print(data_frame_train.dtypes)
print(data_frame_train.describe())
class_count = data_frame_train.groupby('type').size()
print(class_count)
sns.set()
sns.pairplot(data_frame_train,hue="type")
print(data_frame_test.columns)
df = data_frame_train["type"]
indexes_test = data_frame_test["id"]
data_frame_train = data_frame_train.drop(["type","color","id"],axis=1)
data_frame_test = data_frame_test.drop(["color","id"],axis=1)
data_frame_train = | pd.get_dummies(data_frame_train) | pandas.get_dummies |
"""
Tests for scalar Timedelta arithmetic ops
"""
from datetime import datetime, timedelta
import operator
import numpy as np
import pytest
import pandas as pd
from pandas import NaT, Timedelta, Timestamp, offsets
import pandas._testing as tm
from pandas.core import ops
class TestTimedeltaAdditionSubtraction:
"""
Tests for Timedelta methods:
__add__, __radd__,
__sub__, __rsub__
"""
@pytest.mark.parametrize(
"ten_seconds",
[
Timedelta(10, unit="s"),
timedelta(seconds=10),
np.timedelta64(10, "s"),
np.timedelta64(10000000000, "ns"),
offsets.Second(10),
],
)
def test_td_add_sub_ten_seconds(self, ten_seconds):
# GH#6808
base = Timestamp("20130101 09:01:12.123456")
expected_add = Timestamp("20130101 09:01:22.123456")
expected_sub = Timestamp("20130101 09:01:02.123456")
result = base + ten_seconds
assert result == expected_add
result = base - ten_seconds
assert result == expected_sub
@pytest.mark.parametrize(
"one_day_ten_secs",
[
Timedelta("1 day, 00:00:10"),
Timedelta("1 days, 00:00:10"),
timedelta(days=1, seconds=10),
np.timedelta64(1, "D") + np.timedelta64(10, "s"),
offsets.Day() + offsets.Second(10),
],
)
def test_td_add_sub_one_day_ten_seconds(self, one_day_ten_secs):
# GH#6808
base = Timestamp("20130102 09:01:12.123456")
expected_add = Timestamp("20130103 09:01:22.123456")
expected_sub = Timestamp("20130101 09:01:02.123456")
result = base + one_day_ten_secs
assert result == expected_add
result = base - one_day_ten_secs
assert result == expected_sub
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_datetimelike_scalar(self, op):
# GH#19738
td = Timedelta(10, unit="d")
result = op(td, datetime(2016, 1, 1))
if op is operator.add:
# datetime + Timedelta does _not_ call Timedelta.__radd__,
# so we get a datetime back instead of a Timestamp
assert isinstance(result, Timestamp)
assert result == Timestamp(2016, 1, 11)
result = op(td, Timestamp("2018-01-12 18:09"))
assert isinstance(result, Timestamp)
assert result == Timestamp("2018-01-22 18:09")
result = op(td, np.datetime64("2018-01-12"))
assert isinstance(result, Timestamp)
assert result == Timestamp("2018-01-22")
result = op(td, NaT)
assert result is NaT
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_td(self, op):
td = Timedelta(10, unit="d")
result = op(td, Timedelta(days=10))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=20)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_pytimedelta(self, op):
td = Timedelta(10, unit="d")
result = op(td, timedelta(days=9))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=19)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_timedelta64(self, op):
td = Timedelta(10, unit="d")
result = op(td, np.timedelta64(-4, "D"))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=6)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_offset(self, op):
td = Timedelta(10, unit="d")
result = op(td, offsets.Hour(6))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=10, hours=6)
def test_td_sub_td(self):
td = Timedelta(10, unit="d")
expected = Timedelta(0, unit="ns")
result = td - td
assert isinstance(result, Timedelta)
assert result == expected
def test_td_sub_pytimedelta(self):
td = Timedelta(10, unit="d")
expected = Timedelta(0, unit="ns")
result = td - td.to_pytimedelta()
assert isinstance(result, Timedelta)
assert result == expected
result = td.to_pytimedelta() - td
assert isinstance(result, Timedelta)
assert result == expected
def test_td_sub_timedelta64(self):
td = Timedelta(10, unit="d")
expected = Timedelta(0, unit="ns")
result = td - td.to_timedelta64()
assert isinstance(result, Timedelta)
assert result == expected
result = td.to_timedelta64() - td
assert isinstance(result, Timedelta)
assert result == expected
def test_td_sub_nat(self):
# In this context pd.NaT is treated as timedelta-like
td = Timedelta(10, unit="d")
result = td - NaT
assert result is NaT
def test_td_sub_td64_nat(self):
td = Timedelta(10, unit="d")
td_nat = np.timedelta64("NaT")
result = td - td_nat
assert result is NaT
result = td_nat - td
assert result is NaT
def test_td_sub_offset(self):
td = Timedelta(10, unit="d")
result = td - offsets.Hour(1)
assert isinstance(result, Timedelta)
assert result == Timedelta(239, unit="h")
def test_td_add_sub_numeric_raises(self):
td = Timedelta(10, unit="d")
for other in [2, 2.0, np.int64(2), np.float64(2)]:
with pytest.raises(TypeError):
td + other
with pytest.raises(TypeError):
other + td
with pytest.raises(TypeError):
td - other
with pytest.raises(TypeError):
other - td
def test_td_rsub_nat(self):
td = Timedelta(10, unit="d")
result = NaT - td
assert result is NaT
result = np.datetime64("NaT") - td
assert result is NaT
def test_td_rsub_offset(self):
result = offsets.Hour(1) - Timedelta(10, unit="d")
assert isinstance(result, Timedelta)
assert result == Timedelta(-239, unit="h")
def test_td_sub_timedeltalike_object_dtype_array(self):
# GH#21980
arr = np.array([Timestamp("20130101 9:01"), Timestamp("20121230 9:02")])
exp = np.array([Timestamp("20121231 9:01"), Timestamp("20121229 9:02")])
res = arr - Timedelta("1D")
tm.assert_numpy_array_equal(res, exp)
def test_td_sub_mixed_most_timedeltalike_object_dtype_array(self):
# GH#21980
now = Timestamp.now()
arr = np.array([now, Timedelta("1D"), np.timedelta64(2, "h")])
exp = np.array(
[
now - Timedelta("1D"),
Timedelta("0D"),
np.timedelta64(2, "h") - Timedelta("1D"),
]
)
res = arr - Timedelta("1D")
tm.assert_numpy_array_equal(res, exp)
def test_td_rsub_mixed_most_timedeltalike_object_dtype_array(self):
# GH#21980
now = Timestamp.now()
arr = np.array([now, Timedelta("1D"), np.timedelta64(2, "h")])
with pytest.raises(TypeError):
Timedelta("1D") - arr
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_timedeltalike_object_dtype_array(self, op):
# GH#21980
arr = np.array([Timestamp("20130101 9:01"), Timestamp("20121230 9:02")])
exp = np.array([Timestamp("20130102 9:01"), Timestamp("20121231 9:02")])
res = op(arr, Timedelta("1D"))
tm.assert_numpy_array_equal(res, exp)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_mixed_timedeltalike_object_dtype_array(self, op):
# GH#21980
now = Timestamp.now()
arr = np.array([now, Timedelta("1D")])
exp = np.array([now + Timedelta("1D"), Timedelta("2D")])
res = op(arr, Timedelta("1D"))
tm.assert_numpy_array_equal(res, exp)
# TODO: moved from index tests following #24365, may need de-duplication
def test_ops_ndarray(self):
td = Timedelta("1 day")
# timedelta, timedelta
other = pd.to_timedelta(["1 day"]).values
expected = pd.to_timedelta(["2 days"]).values
tm.assert_numpy_array_equal(td + other, expected)
tm.assert_numpy_array_equal(other + td, expected)
msg = r"unsupported operand type\(s\) for \+: 'Timedelta' and 'int'"
with pytest.raises(TypeError, match=msg):
td + np.array([1])
msg = r"unsupported operand type\(s\) for \+: 'numpy.ndarray' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
np.array([1]) + td
expected = pd.to_timedelta(["0 days"]).values
tm.assert_numpy_array_equal(td - other, expected)
tm.assert_numpy_array_equal(-other + td, expected)
msg = r"unsupported operand type\(s\) for -: 'Timedelta' and 'int'"
with pytest.raises(TypeError, match=msg):
td - np.array([1])
msg = r"unsupported operand type\(s\) for -: 'numpy.ndarray' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
np.array([1]) - td
expected = pd.to_timedelta(["2 days"]).values
tm.assert_numpy_array_equal(td * np.array([2]), expected)
tm.assert_numpy_array_equal(np.array([2]) * td, expected)
msg = (
"ufunc '?multiply'? cannot use operands with types"
r" dtype\('<m8\[ns\]'\) and dtype\('<m8\[ns\]'\)"
)
with pytest.raises(TypeError, match=msg):
td * other
with pytest.raises(TypeError, match=msg):
other * td
tm.assert_numpy_array_equal(td / other, np.array([1], dtype=np.float64))
tm.assert_numpy_array_equal(other / td, np.array([1], dtype=np.float64))
# timedelta, datetime
other = pd.to_datetime(["2000-01-01"]).values
expected = pd.to_datetime(["2000-01-02"]).values
tm.assert_numpy_array_equal(td + other, expected)
tm.assert_numpy_array_equal(other + td, expected)
expected = pd.to_datetime(["1999-12-31"]).values
tm.assert_numpy_array_equal(-td + other, expected)
tm.assert_numpy_array_equal(other - td, expected)
class TestTimedeltaMultiplicationDivision:
"""
Tests for Timedelta methods:
__mul__, __rmul__,
__div__, __rdiv__,
__truediv__, __rtruediv__,
__floordiv__, __rfloordiv__,
__mod__, __rmod__,
__divmod__, __rdivmod__
"""
# ---------------------------------------------------------------
# Timedelta.__mul__, __rmul__
@pytest.mark.parametrize(
"td_nat", [NaT, np.timedelta64("NaT", "ns"), np.timedelta64("NaT")]
)
@pytest.mark.parametrize("op", [operator.mul, ops.rmul])
def test_td_mul_nat(self, op, td_nat):
# GH#19819
td = Timedelta(10, unit="d")
with pytest.raises(TypeError):
op(td, td_nat)
@pytest.mark.parametrize("nan", [np.nan, np.float64("NaN"), float("nan")])
@pytest.mark.parametrize("op", [operator.mul, ops.rmul])
def test_td_mul_nan(self, op, nan):
# np.float64('NaN') has a 'dtype' attr, avoid treating as array
td = Timedelta(10, unit="d")
result = op(td, nan)
assert result is NaT
@pytest.mark.parametrize("op", [operator.mul, ops.rmul])
def test_td_mul_scalar(self, op):
# GH#19738
td = Timedelta(minutes=3)
result = op(td, 2)
assert result == Timedelta(minutes=6)
result = op(td, 1.5)
assert result == Timedelta(minutes=4, seconds=30)
assert op(td, np.nan) is NaT
assert op(-1, td).value == -1 * td.value
assert op(-1.0, td).value == -1.0 * td.value
with pytest.raises(TypeError):
# timedelta * datetime is gibberish
op(td, Timestamp(2016, 1, 2))
with pytest.raises(TypeError):
# invalid multiply with another timedelta
op(td, td)
# ---------------------------------------------------------------
# Timedelta.__div__, __truediv__
def test_td_div_timedeltalike_scalar(self):
# GH#19738
td = Timedelta(10, unit="d")
result = td / offsets.Hour(1)
assert result == 240
assert td / td == 1
assert td / np.timedelta64(60, "h") == 4
assert np.isnan(td / NaT)
def test_td_div_numeric_scalar(self):
# GH#19738
td = Timedelta(10, unit="d")
result = td / 2
assert isinstance(result, Timedelta)
assert result == Timedelta(days=5)
result = td / 5.0
assert isinstance(result, Timedelta)
assert result == Timedelta(days=2)
@pytest.mark.parametrize("nan", [np.nan, np.float64("NaN"), float("nan")])
def test_td_div_nan(self, nan):
# np.float64('NaN') has a 'dtype' attr, avoid treating as array
td = Timedelta(10, unit="d")
result = td / nan
assert result is NaT
result = td // nan
assert result is NaT
# ---------------------------------------------------------------
# Timedelta.__rdiv__
def test_td_rdiv_timedeltalike_scalar(self):
# GH#19738
td = Timedelta(10, unit="d")
result = offsets.Hour(1) / td
assert result == 1 / 240.0
assert np.timedelta64(60, "h") / td == 0.25
# ---------------------------------------------------------------
# Timedelta.__floordiv__
def test_td_floordiv_timedeltalike_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
scalar = Timedelta(hours=3, minutes=3)
assert td // scalar == 1
assert -td // scalar.to_pytimedelta() == -2
assert (2 * td) // scalar.to_timedelta64() == 2
def test_td_floordiv_null_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
assert td // np.nan is NaT
assert np.isnan(td // NaT)
assert np.isnan(td // np.timedelta64("NaT"))
def test_td_floordiv_offsets(self):
# GH#19738
td = Timedelta(hours=3, minutes=4)
assert td // offsets.Hour(1) == 3
assert td // offsets.Minute(2) == 92
def test_td_floordiv_invalid_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
with pytest.raises(TypeError):
td // np.datetime64("2016-01-01", dtype="datetime64[us]")
def test_td_floordiv_numeric_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
expected = Timedelta(hours=1, minutes=32)
assert td // 2 == expected
assert td // 2.0 == expected
assert td // np.float64(2.0) == expected
assert td // np.int32(2.0) == expected
assert td // np.uint8(2.0) == expected
def test_td_floordiv_timedeltalike_array(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
scalar = Timedelta(hours=3, minutes=3)
# Array-like others
assert td // np.array(scalar.to_timedelta64()) == 1
res = (3 * td) // np.array([scalar.to_timedelta64()])
expected = np.array([3], dtype=np.int64)
tm.assert_numpy_array_equal(res, expected)
res = (10 * td) // np.array([scalar.to_timedelta64(), np.timedelta64("NaT")])
expected = np.array([10, np.nan])
tm.assert_numpy_array_equal(res, expected)
def test_td_floordiv_numeric_series(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
ser = pd.Series([1], dtype=np.int64)
res = td // ser
assert res.dtype.kind == "m"
# ---------------------------------------------------------------
# Timedelta.__rfloordiv__
def test_td_rfloordiv_timedeltalike_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=3)
scalar = Timedelta(hours=3, minutes=4)
# scalar others
# x // Timedelta is defined only for timedelta-like x. int-like,
# float-like, and date-like, in particular, should all either
# a) raise TypeError directly or
# b) return NotImplemented, following which the reversed
# operation will raise TypeError.
assert td.__rfloordiv__(scalar) == 1
assert (-td).__rfloordiv__(scalar.to_pytimedelta()) == -2
assert (2 * td).__rfloordiv__(scalar.to_timedelta64()) == 0
def test_td_rfloordiv_null_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=3)
assert np.isnan(td.__rfloordiv__(NaT))
assert np.isnan(td.__rfloordiv__(np.timedelta64("NaT")))
def test_td_rfloordiv_offsets(self):
# GH#19738
assert offsets.Hour(1) // Timedelta(minutes=25) == 2
def test_td_rfloordiv_invalid_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=3)
dt64 = np.datetime64("2016-01-01", "us")
with pytest.raises(TypeError):
td.__rfloordiv__(dt64)
def test_td_rfloordiv_numeric_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=3)
assert td.__rfloordiv__(np.nan) is NotImplemented
assert td.__rfloordiv__(3.5) is NotImplemented
assert td.__rfloordiv__(2) is NotImplemented
with pytest.raises(TypeError):
td.__rfloordiv__(np.float64(2.0))
with pytest.raises(TypeError):
td.__rfloordiv__(np.uint8(9))
with pytest.raises(TypeError, match="Invalid dtype"):
# deprecated GH#19761, enforced GH#29797
td.__rfloordiv__(np.int32(2.0))
def test_td_rfloordiv_timedeltalike_array(self):
# GH#18846
td = Timedelta(hours=3, minutes=3)
scalar = Timedelta(hours=3, minutes=4)
# Array-like others
assert td.__rfloordiv__(np.array(scalar.to_timedelta64())) == 1
res = td.__rfloordiv__(np.array([(3 * scalar).to_timedelta64()]))
expected = np.array([3], dtype=np.int64)
tm.assert_numpy_array_equal(res, expected)
arr = np.array([(10 * scalar).to_timedelta64(), np.timedelta64("NaT")])
res = td.__rfloordiv__(arr)
expected = np.array([10, np.nan])
tm.assert_numpy_array_equal(res, expected)
def test_td_rfloordiv_numeric_series(self):
# GH#18846
td = Timedelta(hours=3, minutes=3)
ser = pd.Series([1], dtype=np.int64)
res = td.__rfloordiv__(ser)
assert res is NotImplemented
with pytest.raises(TypeError, match="Invalid dtype"):
# Deprecated GH#19761, enforced GH#29797
# TODO: GH-19761. Change to TypeError.
ser // td
# ----------------------------------------------------------------
# Timedelta.__mod__, __rmod__
def test_mod_timedeltalike(self):
# GH#19365
td = Timedelta(hours=37)
# Timedelta-like others
result = td % Timedelta(hours=6)
assert isinstance(result, Timedelta)
assert result == Timedelta(hours=1)
result = td % timedelta(minutes=60)
assert isinstance(result, Timedelta)
assert result == Timedelta(0)
result = td % NaT
assert result is NaT
def test_mod_timedelta64_nat(self):
# GH#19365
td = Timedelta(hours=37)
result = td % np.timedelta64("NaT", "ns")
assert result is NaT
def test_mod_timedelta64(self):
# GH#19365
td = Timedelta(hours=37)
result = td % np.timedelta64(2, "h")
assert isinstance(result, Timedelta)
assert result == Timedelta(hours=1)
def test_mod_offset(self):
# GH#19365
td = Timedelta(hours=37)
result = td % offsets.Hour(5)
assert isinstance(result, Timedelta)
assert result == Timedelta(hours=2)
def test_mod_numeric(self):
# GH#19365
td = Timedelta(hours=37)
# Numeric Others
result = td % 2
assert isinstance(result, Timedelta)
assert result == Timedelta(0)
result = td % 1e12
assert isinstance(result, Timedelta)
assert result == Timedelta(minutes=3, seconds=20)
result = td % int(1e12)
assert isinstance(result, Timedelta)
assert result == Timedelta(minutes=3, seconds=20)
def test_mod_invalid(self):
# GH#19365
td = Timedelta(hours=37)
with pytest.raises(TypeError):
td % Timestamp("2018-01-22")
with pytest.raises(TypeError):
td % []
def test_rmod_pytimedelta(self):
# GH#19365
td = Timedelta(minutes=3)
result = timedelta(minutes=4) % td
assert isinstance(result, Timedelta)
assert result == Timedelta(minutes=1)
def test_rmod_timedelta64(self):
# GH#19365
td = Timedelta(minutes=3)
result = np.timedelta64(5, "m") % td
assert isinstance(result, Timedelta)
assert result == Timedelta(minutes=2)
def test_rmod_invalid(self):
# GH#19365
td = Timedelta(minutes=3)
with pytest.raises(TypeError):
Timestamp("2018-01-22") % td
with pytest.raises(TypeError):
15 % td
with pytest.raises(TypeError):
16.0 % td
with pytest.raises(TypeError):
np.array([22, 24]) % td
# ----------------------------------------------------------------
# Timedelta.__divmod__, __rdivmod__
def test_divmod_numeric(self):
# GH#19365
td = Timedelta(days=2, hours=6)
result = divmod(td, 53 * 3600 * 1e9)
assert result[0] == Timedelta(1, unit="ns")
assert isinstance(result[1], Timedelta)
assert result[1] == Timedelta(hours=1)
assert result
result = divmod(td, np.nan)
assert result[0] is NaT
assert result[1] is NaT
def test_divmod(self):
# GH#19365
td = Timedelta(days=2, hours=6)
result = divmod(td, timedelta(days=1))
assert result[0] == 2
assert isinstance(result[1], Timedelta)
assert result[1] == Timedelta(hours=6)
result = divmod(td, 54)
assert result[0] == Timedelta(hours=1)
assert isinstance(result[1], Timedelta)
assert result[1] == Timedelta(0)
result = divmod(td, NaT)
assert np.isnan(result[0])
assert result[1] is NaT
def test_divmod_offset(self):
# GH#19365
td = Timedelta(days=2, hours=6)
result = divmod(td, offsets.Hour(-4))
assert result[0] == -14
assert isinstance(result[1], Timedelta)
assert result[1] == Timedelta(hours=-2)
def test_divmod_invalid(self):
# GH#19365
td = Timedelta(days=2, hours=6)
with pytest.raises(TypeError):
divmod(td, Timestamp("2018-01-22"))
def test_rdivmod_pytimedelta(self):
# GH#19365
result = divmod(timedelta(days=2, hours=6), Timedelta(days=1))
assert result[0] == 2
assert isinstance(result[1], Timedelta)
assert result[1] == Timedelta(hours=6)
def test_rdivmod_offset(self):
result = divmod(offsets.Hour(54), Timedelta(hours=-4))
assert result[0] == -14
assert isinstance(result[1], Timedelta)
assert result[1] == Timedelta(hours=-2)
def test_rdivmod_invalid(self):
# GH#19365
td = Timedelta(minutes=3)
with pytest.raises(TypeError):
divmod(Timestamp("2018-01-22"), td)
with pytest.raises(TypeError):
divmod(15, td)
with pytest.raises(TypeError):
divmod(16.0, td)
with pytest.raises(TypeError):
divmod(np.array([22, 24]), td)
# ----------------------------------------------------------------
@pytest.mark.parametrize(
"op", [operator.mul, ops.rmul, operator.truediv, ops.rdiv, ops.rsub]
)
@pytest.mark.parametrize(
"arr",
[
np.array([Timestamp("20130101 9:01"), | Timestamp("20121230 9:02") | pandas.Timestamp |
# -*- coding: utf-8 -*-
from warnings import catch_warnings
import numpy as np
from datetime import datetime
from pandas.util import testing as tm
import pandas as pd
from pandas.core import config as cf
from pandas.compat import u
from pandas._libs.tslib import iNaT
from pandas import (NaT, Float64Index, Series,
DatetimeIndex, TimedeltaIndex, date_range)
from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas.core.dtypes.missing import (
array_equivalent, isnull, notnull,
na_value_for_dtype)
def test_notnull():
assert notnull(1.)
assert not notnull(None)
assert not notnull(np.NaN)
with cf.option_context("mode.use_inf_as_null", False):
assert notnull(np.inf)
assert notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.all()
with cf.option_context("mode.use_inf_as_null", True):
assert not notnull(np.inf)
assert not notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.sum() == 2
with cf.option_context("mode.use_inf_as_null", False):
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
tm.makeObjectSeries(), tm.makeTimeSeries(),
tm.makePeriodSeries()]:
assert (isinstance(isnull(s), Series))
class TestIsNull(object):
def test_0d_array(self):
assert isnull(np.array(np.nan))
assert not isnull(np.array(0.0))
assert not isnull(np.array(0))
# test object dtype
assert isnull(np.array(np.nan, dtype=object))
assert not isnull(np.array(0.0, dtype=object))
assert not isnull(np.array(0, dtype=object))
def test_empty_object(self):
for shape in [(4, 0), (4,)]:
arr = np.empty(shape=shape, dtype=object)
result = isnull(arr)
expected = np.ones(shape=shape, dtype=bool)
tm.assert_numpy_array_equal(result, expected)
def test_isnull(self):
assert not isnull(1.)
assert isnull(None)
assert isnull(np.NaN)
assert float('nan')
assert not isnull(np.inf)
assert not isnull(-np.inf)
# series
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
tm.makeObjectSeries(), tm.makeTimeSeries(),
tm.makePeriodSeries()]:
assert isinstance(isnull(s), Series)
# frame
for df in [tm.makeTimeDataFrame(), | tm.makePeriodFrame() | pandas.util.testing.makePeriodFrame |
"""This file contains utility functions used for numpy data manipulation"""
import json
import logging
try:
import dicom
except:
import pydicom as dicom
import matplotlib.pylab as plt
import numpy as np
import pandas as pd
import os
import SimpleITK as sitk
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
class NumpyEncoder(json.JSONEncoder):
"""This is a Encoder used to dump numpy arrays to json files.
It also converts np.int64 (not python serializable) to python int
Example:
a = np.array([1, 2, 3])
print(json.dumps({'aa': [2, (2, 3, 4), a], 'bb': [2]}, cls=NumpyEncoder))
Output:
{"aa": [2, [2, 3, 4], [1, 2, 3]], "bb": [2]}
"""
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
if isinstance(obj, np.integer):
return int(obj)
if isinstance(obj, np.float32) or isinstance(obj, np.float16):
return float(obj)
return json.JSONEncoder.default(self, obj)
def convert_to_unit8(pixel_array, from_bit=16, to_bit=8):
"""
Convert dicom dataset to an uint8 numpy array
Args:
pixel_array: a numpy array
from_bit: bit to convert from
to_bit: bit to convert to
Returns:
pixel_array: a converted pixel_array
"""
if from_bit == to_bit:
return pixel_array
# TODO: this is not exactly right. As 0-255 only has 2**8-1 scales
pixel_array = pixel_array * (2 ** (to_bit - from_bit))
if to_bit == 8:
pixel_array = pixel_array.astype(np.uint8)
else:
raise ValueError('Unsupported bit type {}-bit!'.format(to_bit))
return pixel_array
def get_new_dimensions(orig_shapes, min_dimension, max_dimension):
"""Get new dimensions based on the target shape limits
The output size can be described by two cases:
1. If the image can be rescaled so its minimum dimension is equal to the
provided value without the other dimension exceeding max_dimension,
then do so.
2. Otherwise, resize so the largest dimension is equal to max_dimension.
Args:
orig_shapes:
min_dimension:
max_dimension:
Returns:
new_shapes: a tuple of new dimensions
"""
min_target = min(orig_shapes)
max_target = max(orig_shapes)
if max_target * min_dimension / min_target < max_dimension:
ratio = min_dimension / min_target
else:
ratio = max_dimension / max_target
new_shapes = tuple(int(shape * ratio) for shape in orig_shapes)
return new_shapes
def get_pixel_array_from_dicom_path(filepath, mismatch=1, to_bit=8, floor=None, ceiling=None):
"""
Read image from dicom file and conver to numpy array.
Args:
filepath: dicom filepath
mismatch: number of pixels to drop in pixel_array in case of a shape mismatch
to_bit: bit to convert to, 8 and 16 supported. Return raw array if set to -1.
floor: manually override bit conversion
ceiling: manually override bit conversion
Returns:
pixel_array: a numpy array containing the image stored in dicom file
"""
# read dicom files
ds = dicom.read_file(filepath)
# Get image numpy array
# Image dicom file is in 16 bit and needs to be converted
try:
try:
pixel_array = ds.pixel_array
except:
# pydicom cannot handle lossless jpeg
reader = sitk.ImageSeriesReader()
reader.SetFileNames([filepath])
sitk_img = reader.Execute()
pixel_array = sitk.GetArrayFromImage(sitk_img)[0, ...]
try:
if ds.PresentationLUTShape == 'INVERSE':
pixel_array = pixel_array.max() - pixel_array
except:
logging.debug('PresentationLUTShape is INVERSE!')
if to_bit == -1:
# return the raw image
return pixel_array
if floor is not None and ceiling is not None:
pixel_array = np.clip(ds.pixel_array, a_min=floor, a_max=ceiling)
pixel_array = (pixel_array.astype(float) - floor) / (ceiling - floor) * (2 ** to_bit - 1)
if to_bit == 8:
pixel_array = pixel_array.astype(np.uint8)
elif to_bit == 16:
pixel_array = pixel_array.astype(np.uint16)
else:
raise ValueError('Unsupported bit type {}-bit!'.format(to_bit))
elif ds.BitsStored != to_bit:
print('Converting from {}-bit to {}-bit'.format(ds.BitsStored, to_bit))
pixel_array = convert_to_unit8(pixel_array, to_bit=to_bit)
except:
# Some mask has size mismatch of exactly one, then manually discard one element
try:
# all masks are stored in uint8 format
pixel_array = np.fromstring(ds.PixelData, dtype=np.uint8)
pixel_array = pixel_array[mismatch:].reshape((ds.Rows, ds.Columns))
except:
raise ValueError('The img size mismatches in {} and is not {}'.format(filepath, mismatch))
return pixel_array
def gen_single_input(single_image_path):
"""Read from image path and return a 3 channel color image in the format of numpy array
Args:
single_image_path:
Returns:
img_color: a 3 channel numpy array
"""
filepath = single_image_path
img = plt.imread(filepath) * 255
img = img.astype(np.float32)
# shenzhen dataset has 3 channels
if len(img.shape) == 3:
img_color = img
# some nih png file has four channels RGBA
# e.g., '/data/dataset/images/images_003/00006074_000.png'
# use first 3 channels RGB only
if img.shape[-1] == 4:
img_color = img[:, :, :3]
# most nih dataset has single grayscale channel
elif len(img.shape) == 2:
img_color = np.dstack([img] * 3)
return img_color
def input_generator(filepath_list=[], dirname=None):
"""
Yield a generator of image numpy array and the corresponding filepath
Args:
filepath_list:
dirname:
Yields:
img_color:
filepath
"""
if not filepath_list and dirname:
print('******* dirname specified!')
filepath_list = [os.path.join(dirname, filename)
for filename in os.listdir(dirname) if filename.endswith('.png')]
for filepath in filepath_list:
img_color = gen_single_input(filepath)
img_color = np.reshape(img_color, [-1])
print('************* Input image array:')
print([pix for pix in img_color[:100]])
yield img_color, filepath
def diff_df(df1, df2):
"""Identify differences between two pandas DataFrames"""
assert (df1.columns == df2.columns).all(), "DataFrame column names are different"
if any(df1.dtypes != df2.dtypes):
"Data Types are different, trying to convert"
df2 = df2.astype(df1.dtypes)
if df1.equals(df2):
return None
else:
# need to account for np.nan != np.nan returning True
diff_mask = (df1 != df2) & ~(df1.isnull() & df2.isnull())
ne_stacked = diff_mask.stack()
changed = ne_stacked[ne_stacked]
changed.index.names = ['id', 'col']
difference_locations = np.where(diff_mask)
changed_from = df1.values[difference_locations]
changed_to = df2.values[difference_locations]
return pd.DataFrame({'from': changed_from, 'to': changed_to},
index=changed.index)
def concat_df(input_csv_path_list, output_csv_path=None):
"""Concatenate csv files and return the combined dataframe
Args:
input_csv_path_list:
output_csv_path:
Returns:
"""
df_all = None
for csv_path in input_csv_path_list:
df = pd.read_csv(csv_path)
print('{}: length {}'.format(csv_path, len(df)))
try:
df_all = | pd.concat([df_all, df]) | pandas.concat |
# Copyright 2019 Elasticsearch BV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# File called _pytest for PyCharm compatability
import pandas as pd
import pytest
from eland.dataframe import DEFAULT_NUM_ROWS_DISPLAYED
from eland.tests.common import TestData, assert_pandas_eland_series_equal
class TestDataFrameRepr(TestData):
@classmethod
def setup_class(cls):
# conftest.py changes this default - restore to original setting
pd.set_option("display.max_rows", 60)
"""
to_string
"""
def test_simple_lat_lon(self):
"""
Note on nested object order - this can change when
note this could be a bug in ES...
PUT my_index/doc/1
{
"location": {
"lat": "50.033333",
"lon": "8.570556"
}
}
GET my_index/_search
"_source": {
"location": {
"lat": "50.033333",
"lon": "8.570556"
}
}
GET my_index/_search
{
"_source": "location"
}
"_source": {
"location": {
"lon": "8.570556",
"lat": "50.033333"
}
}
Hence we store the pandas df source json as 'lon', 'lat'
"""
pd_dest_location = self.pd_flights()["DestLocation"].head(1)
ed_dest_location = self.ed_flights()["DestLocation"].head(1)
assert_pandas_eland_series_equal(pd_dest_location, ed_dest_location)
def test_num_rows_to_string(self):
# check setup works
assert pd.get_option("display.max_rows") == 60
# Test eland.DataFrame.to_string vs pandas.DataFrame.to_string
# In pandas calling 'to_string' without max_rows set, will dump ALL rows
# Test n-1, n, n+1 for edge cases
self.num_rows_to_string(DEFAULT_NUM_ROWS_DISPLAYED - 1)
self.num_rows_to_string(DEFAULT_NUM_ROWS_DISPLAYED)
with pytest.warns(UserWarning):
# UserWarning displayed by eland here (compare to pandas with max_rows set)
self.num_rows_to_string(
DEFAULT_NUM_ROWS_DISPLAYED + 1, None, DEFAULT_NUM_ROWS_DISPLAYED
)
# Test for where max_rows lt or gt num_rows
self.num_rows_to_string(10, 5, 5)
self.num_rows_to_string(100, 200, 200)
def num_rows_to_string(self, rows, max_rows_eland=None, max_rows_pandas=None):
ed_flights = self.ed_flights()[["DestLocation", "OriginLocation"]]
pd_flights = self.pd_flights()[["DestLocation", "OriginLocation"]]
ed_head = ed_flights.head(rows)
pd_head = pd_flights.head(rows)
ed_head_str = ed_head.to_string(max_rows=max_rows_eland)
pd_head_str = pd_head.to_string(max_rows=max_rows_pandas)
# print("\n", ed_head_str)
# print("\n", pd_head_str)
assert pd_head_str == ed_head_str
def test_empty_dataframe_string(self):
ed_ecom = self.ed_ecommerce()
pd_ecom = self.pd_ecommerce()
ed_ecom_s = ed_ecom[ed_ecom["currency"] == "USD"].to_string()
pd_ecom_s = pd_ecom[pd_ecom["currency"] == "USD"].to_string()
assert ed_ecom_s == pd_ecom_s
"""
repr
"""
def test_num_rows_repr(self):
self.num_rows_repr(
pd.get_option("display.max_rows") - 1, pd.get_option("display.max_rows") - 1
)
self.num_rows_repr(
pd.get_option("display.max_rows"), pd.get_option("display.max_rows")
)
self.num_rows_repr(
pd.get_option("display.max_rows") + 1, pd.get_option("display.min_rows")
)
def num_rows_repr(self, rows, num_rows_printed):
ed_flights = self.ed_flights()
pd_flights = self.pd_flights()
ed_head = ed_flights.head(rows)
pd_head = pd_flights.head(rows)
ed_head_str = repr(ed_head)
pd_head_str = repr(pd_head)
if num_rows_printed < rows:
# add 1 for ellipsis
num_rows_printed = num_rows_printed + 1
# number of rows is num_rows_printed + 3 (header, summary)
assert (num_rows_printed + 3) == len(ed_head_str.splitlines())
assert pd_head_str == ed_head_str
def test_empty_dataframe_repr(self):
ed_ecom = self.ed_ecommerce()
pd_ecom = self.pd_ecommerce()
ed_ecom_r = repr(ed_ecom[ed_ecom["currency"] == "USD"])
pd_ecom_r = repr(pd_ecom[pd_ecom["currency"] == "USD"])
assert ed_ecom_r == pd_ecom_r
"""
to_html
"""
def test_num_rows_to_html(self):
# check setup works
assert pd.get_option("display.max_rows") == 60
# Test eland.DataFrame.to_string vs pandas.DataFrame.to_string
# In pandas calling 'to_string' without max_rows set, will dump ALL rows
# Test n-1, n, n+1 for edge cases
self.num_rows_to_html(DEFAULT_NUM_ROWS_DISPLAYED - 1)
self.num_rows_to_html(DEFAULT_NUM_ROWS_DISPLAYED)
with pytest.warns(UserWarning):
# UserWarning displayed by eland here
self.num_rows_to_html(
DEFAULT_NUM_ROWS_DISPLAYED + 1, None, DEFAULT_NUM_ROWS_DISPLAYED
)
# Test for where max_rows lt or gt num_rows
self.num_rows_to_html(10, 5, 5)
self.num_rows_to_html(100, 200, 200)
def num_rows_to_html(self, rows, max_rows_eland=None, max_rows_pandas=None):
ed_flights = self.ed_flights()
pd_flights = self.pd_flights()
ed_head = ed_flights.head(rows)
pd_head = pd_flights.head(rows)
ed_head_str = ed_head.to_html(max_rows=max_rows_eland)
pd_head_str = pd_head.to_html(max_rows=max_rows_pandas)
# print(ed_head_str)
# print(pd_head_str)
assert pd_head_str == ed_head_str
def test_empty_dataframe_to_html(self):
ed_ecom = self.ed_ecommerce()
pd_ecom = self.pd_ecommerce()
ed_ecom_h = ed_ecom[ed_ecom["currency"] == "USD"].to_html()
pd_ecom_h = pd_ecom[pd_ecom["currency"] == "USD"].to_html()
assert ed_ecom_h == pd_ecom_h
"""
_repr_html_
"""
def test_num_rows_repr_html(self):
# check setup works
assert | pd.get_option("display.max_rows") | pandas.get_option |
import argparse
import requests
## for working with data in lots of formats
## python3 -m pip install pandas
import pandas
ITEMURL = "http://pokeapi.co/api/v2/item/"
def main():
# Make HTTP GET request using requests
# and decode JSON attachment as pythonic data structure
# Also, append the URL ITEMURL with a parameter to return 1000
# items in one response
items = requests.get(f"{ITEMURL}?limit=1000")
items = items.json()
# create a list to store items with the word searched on
matchedwords = []
# Loop through data, and print pokemon names
# item.get("results") will return the list
# mapped to the key "results"
for item in items.get("results"):
# check to see if the current item's VALUE mapped to item["name"]
# contains the search word
if args.searchword in item.get("name"):
# if TRUE, add that item to the end of list matchedwords
matchedwords.append(item.get("name"))
finishedlist = matchedwords.copy()
## map our matchedword list to a dict with a title
matchedwords = {}
matchedwords["matched"] = finishedlist
## list all words containing matched word
print(f"There are {len(finishedlist)} words that contain the word '{args.searchword}' in the Pokemon Item API!")
print(f"List of Pokemon items containing '{args.searchword}': ")
print(matchedwords)
## export to excel with pandas
# make a dataframe from our data
itemsdf = | pandas.DataFrame(matchedwords) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.