prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import torch
import pandas as pd
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import os
from utils import set_seed, parse_training_args
from dataset import ToxicDataset, PairedToxicDataset
from trainer import PairedTrainer
import wandb
if __name__ == "__main__":
args = parse_training_args()
config = vars(args)
if config["use_extra_data"]:
extra_files = [
os.path.join(config["extra_data_dir"], f)
for f in os.listdir(config["extra_data_dir"])
if f.endswith(".csv")
]
config["extra_files"] = extra_files
wandb.login()
fold = args.fold if args.fold is not None else 0
with wandb.init(
project="jigsaw-paired-train",
group=str(args.group_id),
name=f"{args.group_id}-{args.checkpoint}-fold-{fold}",
config=config,
):
config = wandb.config
set_seed(config.seed)
data = pd.read_csv(config.train_path)
if args.fold is not None:
print(f"Using fold {fold} as validation.")
train_data = data.loc[data.fold != fold].reset_index(drop=True)
valid_data = data.loc[data.fold == fold].reset_index(drop=True)
else:
train_data = pd.read_csv(config.train_path)
valid_data = pd.read_csv(config.valid_path)
if config.use_extra_data:
print("adding extra data:")
for file in extra_files:
print(file)
extra_data = pd.concat([ | pd.read_csv(f) | pandas.read_csv |
# coding: utf-8
"""
Summary
-------
Spatial interpolation functions for random forest interpolation using the scikit-learn package.
"""
# import
import statistics
import Eval as Eval
import make_blocks as mbk
import cluster_3d as c3d
import get_data as GD
from sklearn import metrics
from sklearn.model_selection import ShuffleSplit
from sklearn.ensemble import RandomForestRegressor
import geopandas as gpd
import pandas as pd
import numpy as np
import pyproj
import matplotlib.pyplot as plt
import warnings
# Runtime warning suppress, this suppresses the /0 warning
warnings.filterwarnings("ignore")
def random_forest_interpolator(latlon_dict, Cvar_dict, input_date, var_name, shapefile, show, \
file_path_elev, idx_list, expand_area, res = 10000):
'''Random forest interpolation
Parameters
----------
latlon_dict : dictionary
the latitude and longitudes of the stations
Cvar_dict : dictionary
dictionary of weather variable values for each station
input_date : string
the date you want to interpolate for
var_name : string
the name of the variable you are interpolating
shapefile : string
path to the study area shapefile, including its name
show : bool
whether you want to plot a map
file_path_elev : string
path to the elevation lookup file
idx_list : int
position of the elevation column in the lookup file
expand_area : bool
function will expand the study area so that more stations are taken into account (200 km)
Returns
----------
ndarray
- the array of values for the interpolated surface
list
- the bounds of the array surface, for use in other functions
'''
lat = []
lon = []
Cvar = []
na_map = gpd.read_file(shapefile)
bounds = na_map.bounds
if expand_area:
xmax = bounds['maxx']+200000
xmin = bounds['minx']-200000
ymax = bounds['maxy']+200000
ymin = bounds['miny']-200000
else:
xmax = bounds['maxx']
xmin = bounds['minx']
ymax = bounds['maxy']
ymin = bounds['miny']
for station_name in Cvar_dict.keys():
if station_name in latlon_dict.keys():
loc = latlon_dict[station_name]
latitude = loc[0]
longitude = loc[1]
# Filter out stations outside of grid
proj_coord = pyproj.Proj('esri:102001')(longitude, latitude)
if (proj_coord[1] <= float(ymax[0]) and proj_coord[1] >= float(ymin[0]) and proj_coord[0] <= float(xmax[0]) and proj_coord[0] >= float(xmin[0])):
cvar_val = Cvar_dict[station_name]
lat.append(float(latitude))
lon.append(float(longitude))
Cvar.append(cvar_val)
y = np.array(lat)
x = np.array(lon)
z = np.array(Cvar)
pixelHeight = res
pixelWidth = res
num_col = int((xmax - xmin) / pixelHeight)
num_row = int((ymax - ymin) / pixelWidth)
# We need to project to a projected system before making distance matrix
source_proj = pyproj.Proj(proj='latlong', datum='NAD83')
xProj, yProj = pyproj.Proj('esri:102001')(x, y)
df_trainX = pd.DataFrame({'xProj': xProj, 'yProj': yProj, 'var': z})
if expand_area:
yProj_extent = np.append(
yProj, [bounds['maxy']+200000, bounds['miny']-200000])
xProj_extent = np.append(
xProj, [bounds['maxx']+200000, bounds['minx']-200000])
else:
yProj_extent = np.append(yProj, [bounds['maxy'], bounds['miny']])
xProj_extent = np.append(xProj, [bounds['maxx'], bounds['minx']])
Yi = np.linspace(np.min(yProj_extent), np.max(yProj_extent), num_row+1)
Xi = np.linspace(np.min(xProj_extent), np.max(xProj_extent), num_col+1)
Xi, Yi = np.meshgrid(Xi, Yi)
Xi, Yi = Xi.flatten(), Yi.flatten()
maxmin = [np.min(yProj_extent), np.max(yProj_extent),
np.max(xProj_extent), np.min(xProj_extent)]
# Elevation
# Preparing the coordinates to send to the function that will get the elevation grid
concat = np.array((Xi.flatten(), Yi.flatten())).T
send_to_list = concat.tolist()
# The elevation function takes a tuple
send_to_tuple = [tuple(x) for x in send_to_list]
Xi1_grd = []
Yi1_grd = []
elev_grd = []
# Get the elevations from the lookup file
elev_grd_dict = GD.finding_data_frm_lookup(
send_to_tuple, file_path_elev, idx_list)
for keys in elev_grd_dict.keys(): # The keys are each lat lon pair
x = keys[0]
y = keys[1]
Xi1_grd.append(x)
Yi1_grd.append(y)
# Append the elevation data to the empty list
elev_grd.append(elev_grd_dict[keys])
elev_array = np.array(elev_grd) # make an elevation array
elev_dict = GD.finding_data_frm_lookup(zip(
xProj, yProj), file_path_elev, idx_list) # Get the elevations for the stations
xProj_input = []
yProj_input = []
e_input = []
for keys in zip(xProj, yProj): # Repeat process for just the stations not the whole grid
x = keys[0]
y = keys[1]
xProj_input.append(x)
yProj_input.append(y)
e_input.append(elev_dict[keys])
source_elev = np.array(e_input)
Xi1_grd = np.array(Xi1_grd)
Yi1_grd = np.array(Yi1_grd)
df_trainX = pd.DataFrame(
{'xProj': xProj, 'yProj': yProj, 'elevS': source_elev, 'var': z})
df_testX = pd.DataFrame(
{'Xi': Xi1_grd, 'Yi': Yi1_grd, 'elev': elev_array})
reg = RandomForestRegressor(
n_estimators=100, max_features='sqrt', random_state=1)
y = np.array(df_trainX['var']).reshape(-1, 1)
X_train = np.array(df_trainX[['xProj', 'yProj', 'elevS']])
X_test = np.array(df_testX[['Xi', 'Yi', 'elev']])
reg.fit(X_train, y)
Zi = reg.predict(X_test)
rf_grid = Zi.reshape(num_row+1, num_col+1)
if show:
fig, ax = plt.subplots(figsize=(15, 15))
crs = {'init': 'esri:102001'}
na_map = gpd.read_file(shapefile)
plt.imshow(rf_grid, extent=(xProj_extent.min(
)-1, xProj_extent.max()+1, yProj_extent.max()-1, yProj_extent.min()+1))
na_map.plot(ax= ax, color='white', edgecolor='k', linewidth=2, zorder=10, alpha=0.1)
plt.scatter(xProj, yProj, c=z, edgecolors='k')
plt.gca().invert_yaxis()
cbar = plt.colorbar()
cbar.set_label(var_name)
title = 'RF Interpolation for %s on %s' % (var_name, input_date)
fig.suptitle(title, fontsize=14)
plt.xlabel('Longitude')
plt.ylabel('Latitude')
plt.show()
return rf_grid, maxmin
def cross_validate_rf(latlon_dict, Cvar_dict, shapefile, file_path_elev, elev_array, idx_list, pass_to_plot):
'''Leave-one-out cross-validation procedure for RF
Parameters
----------
latlon_dict : dictionary
the latitude and longitudes of the stations
Cvar_dict : dictionary
dictionary of weather variable values for each station
shapefile : string
path to the study area shapefile, including its name
file_path_elev : string
path to the elevation lookup file
elev_array : ndarray
array for elevation, create using IDEW interpolation (this is a trick to speed up code)
idx_list : int
position of the elevation column in the lookup file
pass_to_plot : bool
whether you will be plotting the error and need a version without absolute value error (i.e. fire season days)
Returns
----------
dictionary
- a dictionary of the absolute error at each station when it was left out
dictionary
- if pass_to_plot = True, returns a dictionary without the absolute value of the error, for example for plotting fire season error
'''
x_origin_list = []
y_origin_list = []
absolute_error_dictionary = {} # for plotting
no_absolute_value_dict = {} # to see whether under or over estimation
station_name_list = []
projected_lat_lon = {}
for station_name in Cvar_dict.keys():
if station_name in latlon_dict.keys():
station_name_list.append(station_name)
loc = latlon_dict[station_name]
latitude = loc[0]
longitude = loc[1]
Plat, Plon = pyproj.Proj('esri:102001')(longitude, latitude)
Plat = float(Plat)
Plon = float(Plon)
projected_lat_lon[station_name] = [Plat, Plon]
for station_name_hold_back in station_name_list:
lat = []
lon = []
Cvar = []
for station_name in sorted(Cvar_dict.keys()):
if station_name in latlon_dict.keys():
if station_name != station_name_hold_back:
loc = latlon_dict[station_name]
latitude = loc[0]
longitude = loc[1]
cvar_val = Cvar_dict[station_name]
lat.append(float(latitude))
lon.append(float(longitude))
Cvar.append(cvar_val)
else:
pass
y = np.array(lat)
x = np.array(lon)
z = np.array(Cvar)
na_map = gpd.read_file(shapefile)
bounds = na_map.bounds
xmax = bounds['maxx']
xmin = bounds['minx']
ymax = bounds['maxy']
ymin = bounds['miny']
pixelHeight = 10000
pixelWidth = 10000
num_col = int((xmax - xmin) / pixelHeight)
num_row = int((ymax - ymin) / pixelWidth)
# We need to project to a projected system before making distance matrix
source_proj = pyproj.Proj(proj='latlong', datum='NAD83')
xProj, yProj = pyproj.Proj('esri:102001')(x, y)
df_trainX = pd.DataFrame({'xProj': xProj, 'yProj': yProj, 'var': z})
yProj_extent = np.append(yProj, [bounds['maxy'], bounds['miny']])
xProj_extent = np.append(xProj, [bounds['maxx'], bounds['minx']])
Yi = np.linspace(np.min(yProj_extent), np.max(yProj_extent), num_row)
Xi = np.linspace(np.min(xProj_extent), np.max(xProj_extent), num_col)
Xi, Yi = np.meshgrid(Xi, Yi)
Xi, Yi = Xi.flatten(), Yi.flatten()
maxmin = [np.min(yProj_extent), np.max(yProj_extent),
np.max(xProj_extent), np.min(xProj_extent)]
# Elevation
# Preparing the coordinates to send to the function that will get the elevation grid
concat = np.array((Xi.flatten(), Yi.flatten())).T
send_to_list = concat.tolist()
# The elevation function takes a tuple
send_to_tuple = [tuple(x) for x in send_to_list]
Xi1_grd = []
Yi1_grd = []
elev_grd = []
# Get the elevations from the lookup file
elev_grd_dict = GD.finding_data_frm_lookup(
send_to_tuple, file_path_elev, idx_list)
for keys in elev_grd_dict.keys(): # The keys are each lat lon pair
x = keys[0]
y = keys[1]
Xi1_grd.append(x)
Yi1_grd.append(y)
# Append the elevation data to the empty list
elev_grd.append(elev_grd_dict[keys])
elev_array = np.array(elev_grd) # make an elevation array
elev_dict = GD.finding_data_frm_lookup(zip(
xProj, yProj), file_path_elev, idx_list) # Get the elevations for the stations
xProj_input = []
yProj_input = []
e_input = []
for keys in zip(xProj, yProj): # Repeat process for just the stations not the whole grid
x = keys[0]
y = keys[1]
xProj_input.append(x)
yProj_input.append(y)
e_input.append(elev_dict[keys])
source_elev = np.array(e_input)
Xi1_grd = np.array(Xi1_grd)
Yi1_grd = np.array(Yi1_grd)
df_trainX = pd.DataFrame(
{'xProj': xProj, 'yProj': yProj, 'elevS': source_elev, 'var': z})
df_testX = pd.DataFrame(
{'Xi': Xi1_grd, 'Yi': Yi1_grd, 'elev': elev_array})
reg = RandomForestRegressor(
n_estimators=100, max_features='sqrt', random_state=1)
y = np.array(df_trainX['var']).reshape(-1, 1)
X_train = np.array(df_trainX[['xProj', 'yProj', 'elevS']])
X_test = np.array(df_testX[['Xi', 'Yi', 'elev']])
reg.fit(X_train, y)
Zi = reg.predict(X_test)
rf_grid = Zi.reshape(num_row, num_col)
# Calc the RMSE, MAE at the pixel loc
# Delete at a certain point
coord_pair = projected_lat_lon[station_name_hold_back]
x_orig = int(
(coord_pair[0] - float(bounds['minx']))/pixelHeight) # lon
y_orig = int((coord_pair[1] - float(bounds['miny']))/pixelWidth) # lat
x_origin_list.append(x_orig)
y_origin_list.append(y_orig)
interpolated_val = rf_grid[y_orig][x_orig]
original_val = Cvar_dict[station_name_hold_back]
absolute_error = abs(interpolated_val-original_val)
absolute_error_dictionary[station_name_hold_back] = absolute_error
no_absolute_value_dict[station_name_hold_back] = interpolated_val-original_val
if pass_to_plot:
return absolute_error_dictionary, no_absolute_value_dict
else:
return absolute_error_dictionary
def shuffle_split_rf(latlon_dict, Cvar_dict, shapefile, file_path_elev, elev_array, idx_list, rep, res = 10000):
'''Shuffle-split cross-validation with 50/50 training test split
Parameters
----------
loc_dict : dictionary
the latitude and longitudes of the daily/hourly stations
Cvar_dict : dictionary
dictionary of weather variable values for each station
shapefile : string
path to the study area shapefile
file_path_elev : string
path to the elevation lookup file
elev_array : ndarray
array for elevation, create using IDEW interpolation (this is a trick to speed up code)
idx_list : int
position of the elevation column in the lookup file
rep : int
number of replications
Returns
----------
float
- MAE estimate for entire surface (average of replications)
'''
count = 1
error_dictionary = {}
while count <= rep:
x_origin_list = []
y_origin_list = []
absolute_error_dictionary = {} # for plotting
station_name_list = []
projected_lat_lon = {}
for station_name in Cvar_dict.keys():
if station_name in latlon_dict.keys():
station_name_list.append(station_name)
loc = latlon_dict[station_name]
latitude = loc[0]
longitude = loc[1]
Plat, Plon = pyproj.Proj('esri:102001')(longitude, latitude)
Plat = float(Plat)
Plon = float(Plon)
projected_lat_lon[station_name] = [Plat, Plon]
# Split the stations in two
# we can't just use Cvar_dict.keys() because some stations do not have valid lat/lon
stations_input = []
for station_code in Cvar_dict.keys():
if station_code in latlon_dict.keys():
stations_input.append(station_code)
# Split the stations in two
stations = np.array(stations_input)
# Won't be exactly 50/50 if uneven num stations
splits = ShuffleSplit(n_splits=1, train_size=.5)
for train_index, test_index in splits.split(stations):
train_stations = stations[train_index]
# print(train_stations)
test_stations = stations[test_index]
# print(test_stations)
# They can't overlap
for val in train_stations:
if val in test_stations:
print('Error, the train and test sets overlap!')
sys.exit()
lat = []
lon = []
Cvar = []
for station_name in sorted(Cvar_dict.keys()):
if station_name in latlon_dict.keys():
if station_name not in test_stations:
loc = latlon_dict[station_name]
latitude = loc[0]
longitude = loc[1]
cvar_val = Cvar_dict[station_name]
lat.append(float(latitude))
lon.append(float(longitude))
Cvar.append(cvar_val)
else:
pass
y = np.array(lat)
x = np.array(lon)
z = np.array(Cvar)
na_map = gpd.read_file(shapefile)
bounds = na_map.bounds
xmax = bounds['maxx']
xmin = bounds['minx']
ymax = bounds['maxy']
ymin = bounds['miny']
pixelHeight = res
pixelWidth = res
num_col = int((xmax - xmin) / pixelHeight)+1
num_row = int((ymax - ymin) / pixelWidth)+1
# We need to project to a projected system before making distance matrix
source_proj = pyproj.Proj(proj='latlong', datum='NAD83')
xProj, yProj = pyproj.Proj('esri:102001')(x, y)
df_trainX = pd.DataFrame({'xProj': xProj, 'yProj': yProj, 'var': z})
yProj_extent = np.append(yProj, [bounds['maxy'], bounds['miny']])
xProj_extent = np.append(xProj, [bounds['maxx'], bounds['minx']])
Yi = np.linspace(np.min(yProj_extent), np.max(yProj_extent), num_row)
Xi = np.linspace(np.min(xProj_extent), np.max(xProj_extent), num_col)
Xi, Yi = np.meshgrid(Xi, Yi)
Xi, Yi = Xi.flatten(), Yi.flatten()
maxmin = [np.min(yProj_extent), np.max(yProj_extent),
np.max(xProj_extent), np.min(xProj_extent)]
# Elevation
# Preparing the coordinates to send to the function that will get the elevation grid
concat = np.array((Xi.flatten(), Yi.flatten())).T
send_to_list = concat.tolist()
# The elevation function takes a tuple
send_to_tuple = [tuple(x) for x in send_to_list]
Xi1_grd = []
Yi1_grd = []
elev_grd = []
# Get the elevations from the lookup file
elev_grd_dict = GD.finding_data_frm_lookup(
send_to_tuple, file_path_elev, idx_list)
for keys in elev_grd_dict.keys(): # The keys are each lat lon pair
x = keys[0]
y = keys[1]
Xi1_grd.append(x)
Yi1_grd.append(y)
# Append the elevation data to the empty list
elev_grd.append(elev_grd_dict[keys])
elev_array = np.array(elev_grd) # make an elevation array
elev_dict = GD.finding_data_frm_lookup(zip(
xProj, yProj), file_path_elev, idx_list) # Get the elevations for the stations
xProj_input = []
yProj_input = []
e_input = []
for keys in zip(xProj, yProj): # Repeat process for just the stations not the whole grid
x = keys[0]
y = keys[1]
xProj_input.append(x)
yProj_input.append(y)
e_input.append(elev_dict[keys])
source_elev = np.array(e_input)
Xi1_grd = np.array(Xi1_grd)
Yi1_grd = np.array(Yi1_grd)
df_trainX = pd.DataFrame(
{'xProj': xProj, 'yProj': yProj, 'elevS': source_elev, 'var': z})
df_testX = pd.DataFrame(
{'Xi': Xi1_grd, 'Yi': Yi1_grd, 'elev': elev_array})
reg = RandomForestRegressor(
n_estimators=100, max_features='sqrt', random_state=1)
y = np.array(df_trainX['var']).reshape(-1, 1)
X_train = np.array(df_trainX[['xProj', 'yProj', 'elevS']])
X_test = np.array(df_testX[['Xi', 'Yi', 'elev']])
reg.fit(X_train, y)
Zi = reg.predict(X_test)
rf_grid = Zi.reshape(num_row, num_col)
# Calc the RMSE, MAE at the pixel loc
# Delete at a certain point
for statLoc in test_stations:
coord_pair = projected_lat_lon[statLoc]
x_orig = int(
(coord_pair[0] - float(bounds['minx']))/pixelHeight) # lon
y_orig = int(
(coord_pair[1] - float(bounds['miny']))/pixelWidth) # lat
x_origin_list.append(x_orig)
y_origin_list.append(y_orig)
try:
interpolated_val = rf_grid[y_orig][x_orig]
original_val = Cvar_dict[statLoc]
absolute_error = abs(interpolated_val-original_val)
absolute_error_dictionary[statLoc] = absolute_error
except IndexError:
pass
error_dictionary[count] = sum(absolute_error_dictionary.values(
))/len(absolute_error_dictionary.values()) # average of all the withheld stations
count += 1
overall_error = sum(error_dictionary.values())/rep
return overall_error
def spatial_kfold_rf(idw_example_grid, loc_dict, Cvar_dict, shapefile, file_path_elev, elev_array, idx_list,\
block_num, blocking_type, return_error):
'''Spatially blocked k-fold cross-validation procedure for RF
Parameters
----------
idw_example_grid : ndarray
used for reference of study area grid size
loc_dict : dictionary
the latitude and longitudes of the daily/hourly stations
Cvar_dict : dictionary
dictionary of weather variable values for each station
shapefile : string
path to the study area shapefile
file_path_elev : string
path to the elevation lookup file
elev_array : ndarray
array for elevation, create using IDEW interpolation (this is a trick to speed up code)
idx_list : int
position of the elevation column in the lookup file
block_num : int
number of blocks/clusters
blocking_type : string
whether to use clusters or blocks
return_error : bool
whether or not to return the error dictionary
Returns
----------
float
- MAE estimate for entire surface
int
- Return the block number just so we can later write it into the file to keep track
dictionary
- if return_error = True, a dictionary of the absolute error at each fold when it was left out
'''
groups_complete = [] # If not using replacement, keep a record of what we have done
error_dictionary = {}
x_origin_list = []
y_origin_list = []
absolute_error_dictionary = {}
projected_lat_lon = {}
# Selecting blocknum
if blocking_type == 'cluster':
cluster = c3d.spatial_cluster(loc_dict, Cvar_dict, shapefile, block_num, file_path_elev, idx_list, False,False,False)
elif blocking_type == 'block':
# Get the numpy array that delineates the blocks
np_array_blocks = mbk.make_block(idw_example_grid, block_num)
cluster = mbk.sorting_stations(
np_array_blocks, shapefile, loc_dict, Cvar_dict) # Now get the dictionary
else:
print('That is not a valid blocking method')
sys.exit()
for group in cluster.values():
if group not in groups_complete:
station_list = [k for k, v in cluster.items() if v == group]
groups_complete.append(group)
for station_name in Cvar_dict.keys():
if station_name in loc_dict.keys():
loc = loc_dict[station_name]
latitude = loc[0]
longitude = loc[1]
Plat, Plon = pyproj.Proj('esri:102001')(longitude, latitude)
Plat = float(Plat)
Plon = float(Plon)
projected_lat_lon[station_name] = [Plat, Plon]
lat = []
lon = []
Cvar = []
for station_name in sorted(Cvar_dict.keys()):
if station_name in loc_dict.keys():
if station_name not in station_list:
loc = loc_dict[station_name]
latitude = loc[0]
longitude = loc[1]
cvar_val = Cvar_dict[station_name]
lat.append(float(latitude))
lon.append(float(longitude))
Cvar.append(cvar_val)
else:
pass
y = np.array(lat)
x = np.array(lon)
z = np.array(Cvar)
na_map = gpd.read_file(shapefile)
bounds = na_map.bounds
xmax = bounds['maxx']
xmin = bounds['minx']
ymax = bounds['maxy']
ymin = bounds['miny']
pixelHeight = 10000
pixelWidth = 10000
num_col = int((xmax - xmin) / pixelHeight)
num_row = int((ymax - ymin) / pixelWidth)
# We need to project to a projected system before making distance matrix
source_proj = pyproj.Proj(proj='latlong', datum='NAD83')
xProj, yProj = pyproj.Proj('esri:102001')(x, y)
df_trainX = pd.DataFrame({'xProj': xProj, 'yProj': yProj, 'var': z})
yProj_extent = np.append(yProj, [bounds['maxy'], bounds['miny']])
xProj_extent = np.append(xProj, [bounds['maxx'], bounds['minx']])
Yi = np.linspace(np.min(yProj_extent), np.max(yProj_extent), num_row)
Xi = np.linspace(np.min(xProj_extent), np.max(xProj_extent), num_col)
Xi, Yi = np.meshgrid(Xi, Yi)
Xi, Yi = Xi.flatten(), Yi.flatten()
maxmin = [np.min(yProj_extent), np.max(yProj_extent),
np.max(xProj_extent), np.min(xProj_extent)]
# Elevation
# Preparing the coordinates to send to the function that will get the elevation grid
concat = np.array((Xi.flatten(), Yi.flatten())).T
send_to_list = concat.tolist()
# The elevation function takes a tuple
send_to_tuple = [tuple(x) for x in send_to_list]
Xi1_grd = []
Yi1_grd = []
elev_grd = []
# Get the elevations from the lookup file
elev_grd_dict = GD.finding_data_frm_lookup(
send_to_tuple, file_path_elev, idx_list)
for keys in elev_grd_dict.keys(): # The keys are each lat lon pair
x = keys[0]
y = keys[1]
Xi1_grd.append(x)
Yi1_grd.append(y)
# Append the elevation data to the empty list
elev_grd.append(elev_grd_dict[keys])
elev_array = np.array(elev_grd) # make an elevation array
elev_dict = GD.finding_data_frm_lookup(zip(
xProj, yProj), file_path_elev, idx_list) # Get the elevations for the stations
xProj_input = []
yProj_input = []
e_input = []
for keys in zip(xProj, yProj): # Repeat process for just the stations not the whole grid
x = keys[0]
y = keys[1]
xProj_input.append(x)
yProj_input.append(y)
e_input.append(elev_dict[keys])
source_elev = np.array(e_input)
Xi1_grd = np.array(Xi1_grd)
Yi1_grd = np.array(Yi1_grd)
df_trainX = pd.DataFrame(
{'xProj': xProj, 'yProj': yProj, 'elevS': source_elev, 'var': z})
df_testX = | pd.DataFrame({'Xi': Xi1_grd, 'Yi': Yi1_grd, 'elev': elev_array}) | pandas.DataFrame |
#<NAME> (C) MIT
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import sys
import locations as loc
sys.path.append(loc.utility_path)
import Utility
class PopulateGantt:
"""Instantiate this object to preload the csv file and add rows as required."""
def __init__(self, gantt_filename: str, gantt_direc: str = loc.gantt_directory) -> None:
self.directory = gantt_direc
self.csvfile: str = Utility.find(gantt_filename, gantt_direc)
self.columns: tuple[str] = ("Task", "Task Type", "Start Date", "End Date", "Completion Time")
self.template_dict = {
"Task" : "",
"Task Type" : "",
"Start Date" : np.datetime64,
"End Date" : np.datetime64,
"Completion Time" : np.timedelta64
}
pass
@staticmethod
def calc_time_deltas(start_dates, end_dates):
"""Accepts any iterable type or series"""
#TODO make vectorised dataframe if entry types are pd.Series
if((len(start_dates) > len(end_dates)) or (len(start_dates) < len(end_dates))):
raise ValueError(f"Lengths of {type(start_dates)} do not match")
time_deltas: list[np.timedelta64] = []
for index in range(0, len(start_dates)):
#Convert to np datatype in case coming in as string
time_deltas.append(np.datetime64(end_dates[index]) - np.datetime64(start_dates[index]) + 1)
return time_deltas
def add_whole_row(self, dict_row_entry: dict[str, str or np.datetime64]):
if(any([x for x in self.columns if (x not in dict_row_entry.keys())])):
raise ValueError("Dict key not in accepted keys")
try:
opened_csv: pd.DataFrame = | pd.read_csv(self.csvfile) | pandas.read_csv |
"""This script trains and evaluates a predictive model for outcome-oriented predictive process monitoring on inter-run-optimized parameter configurations.
Usage:
experiments_test_interrun_stability_unstructured.py <dataset> <method> <classifier> <auc_weight> <inter-run-stability_weight>
Example:
experiments_test_interrun_stability_unstructured.py bpic2012_cancelled single_laststate xgboost 1 5
Author: <NAME> [<EMAIL>]
"""
import os
import sys
from sys import argv
import pickle
import csv
import re
import pandas as pd
import numpy as np
from sklearn.metrics import roc_auc_score
from sklearn.pipeline import FeatureUnion, Pipeline
from sklearn.preprocessing import StandardScaler
from DatasetManager import DatasetManager
import EncoderFactory
import BucketFactory
import ClassifierFactory
from sklearn.calibration import CalibratedClassifierCV
def extract_event_nr(s):
m = re.match(r'.*_(\d{1,2})$', s)
if m:
return int(m.group(1))
else:
return 1
def extract_case_id(s):
m = re.match(r'(.*)_\d{1,2}$', s)
if m:
return m.group(1)
else:
return s
dataset_ref = argv[1]
method_name = argv[2]
cls_method = argv[3]
alpha = int(argv[4]) # 0 or 1
beta = int(argv[5]) # 1 or 5
PARAMS_DIR = "val_results_auc%s_rmspd%s" % (alpha, beta)
RESULTS_DIR = "results_stability_interrun"
bucket_method, cls_encoding = method_name.split("_")
bucket_encoding = ("last" if bucket_method == "state" else "agg")
dataset_ref_to_datasets = {
"bpic2011": ["bpic2011_f%s"%formula for formula in range(1,5)],
"bpic2015": ["bpic2015_%s_f2"%(municipality) for municipality in range(1,6)],
"insurance": ["insurance_activity", "insurance_followup"],
"sepsis_cases": ["sepsis_cases_1", "sepsis_cases_2", "sepsis_cases_4"]
}
encoding_dict = {
"laststate": ["static", "last"],
"agg": ["static", "agg"],
"index": ["static", "index"],
"combined": ["static", "last", "agg"]
}
datasets = [dataset_ref] if dataset_ref not in dataset_ref_to_datasets else dataset_ref_to_datasets[dataset_ref]
methods = encoding_dict[cls_encoding]
text_method_enc = "bong"
text_method = text_method_enc
text_enc = cls_encoding
train_ratio = 0.8
val_ratio = 0.2
random_state = 22
min_cases_for_training = 1
def calculate_stability(group):
group["diff"] = abs(group["predicted"].shift(-1) - group["predicted"])
return(group["diff"].mean(skipna=True))
# create results directory
if not os.path.exists(RESULTS_DIR):
os.makedirs(RESULTS_DIR)
for dataset_name in datasets:
detailed_results = pd.DataFrame()
# read the data
dataset_manager = DatasetManager(dataset_name)
data = dataset_manager.read_dataset()
# load optimal params
optimal_params_filename = os.path.join(PARAMS_DIR, "optimal_params_%s_%s_%s.pickle" % (cls_method.replace("_calibrated", ""), dataset_name, method_name))
if not os.path.isfile(optimal_params_filename) or os.path.getsize(optimal_params_filename) <= 0:
continue
with open(optimal_params_filename, "rb") as fin:
args_all = pickle.load(fin)
# fit text models and transform for each event
text_transformer_args = args_all["text_transformer_args"]
# fit text models and transform for each event
if text_method in ["nb", "bong"]:
text_transformer_args["nr_selected"] = 500
if text_method == "nb":
text_transformer_args["pos_label"] = dataset_manager.pos_label
elif text_method in ["pv", "lda"]:
text_transformer_args["random_seed"] = 22
if dataset_name in ["github"]:
text_transformer_args["min_freq"] = 10
elif dataset_name in ["crm2"]:
text_transformer_args["min_freq"] = 20
cls_args = args_all["cls_args"]
cls_args['n_estimators'] = 500
# determine min and max (truncated) prefix lengths
min_prefix_length = 1
if "traffic_fines" in dataset_name:
max_prefix_length = 10
elif "bpic2017" in dataset_name:
max_prefix_length = min(20, dataset_manager.get_pos_case_length_quantile(data, 0.90))
else:
max_prefix_length = min(40, dataset_manager.get_pos_case_length_quantile(data, 0.90))
# split into training and test
train, test = dataset_manager.split_data_strict(data, train_ratio)
if "calibrate" in cls_method:
train, val = dataset_manager.split_val(train, val_ratio)
overall_class_ratio = dataset_manager.get_class_ratio(train)
print("Split data")
if method_name == "prefix_index":
nr_eventss = range(min_prefix_length, max_prefix_length+1)
else:
nr_eventss = [None]
for nr_events in nr_eventss:
text_transformer = EncoderFactory.get_encoder(text_method, text_transformer_args=text_transformer_args)
dt_train_text = text_transformer.fit_transform(train[dataset_manager.static_text_cols+dataset_manager.dynamic_text_cols],
train[dataset_manager.label_col])
static_text_cols = []
dynamic_text_cols = []
for col in dataset_manager.static_text_cols + dataset_manager.dynamic_text_cols:
dt_train_text = text_transformer.transform(train[[col]], train[dataset_manager.label_col])
current_text_cols = ["%s_%s" % (col, text_col) for text_col in dt_train_text.columns]
dt_train_text.columns = current_text_cols
train_current = pd.concat([train.drop(col, axis=1), dt_train_text], axis=1, sort=False)
del train, dt_train_text
dt_test_text = text_transformer.transform(test[[col]])
dt_test_text.columns = current_text_cols
test_current = pd.concat([test.drop(col, axis=1), dt_test_text], axis=1, sort=False)
del test, dt_test_text
if col in dataset_manager.static_text_cols:
static_text_cols.extend(current_text_cols)
else:
dynamic_text_cols.extend(current_text_cols)
if "calibrate" in cls_method:
dt_val_text = text_transformer.transform(val[[col]])
dt_val_text.columns = current_text_cols
val_current = pd.concat([val.drop(col, axis=1), dt_val_text], axis=1, sort=False)
del dt_val_text, val
print("Transformed text")
# generate prefixes
if "single" in method_name:
dt_train_bucket = dataset_manager.generate_prefix_data(train_current, min_prefix_length, max_prefix_length)
del train_current
dt_test_bucket = dataset_manager.generate_prefix_data(test_current, min_prefix_length, max_prefix_length)
del test_current
if "calibrate" in cls_method:
dt_val_bucket = dataset_manager.generate_prefix_data(val_current, min_prefix_length, max_prefix_length)
del val_current
else:
dt_train_bucket = dataset_manager.generate_prefix_data(train_current, nr_events, nr_events)
dt_test_bucket = dataset_manager.generate_prefix_data(test_current, nr_events, nr_events)
if "calibrate" in cls_method:
dt_val_bucket = dataset_manager.generate_prefix_data(val_current, nr_events, nr_events)
print("Generated prefixes")
# set up sequence encoders
encoders = []
for method in methods:
if cls_encoding == text_enc:
cls_encoder_args = {'case_id_col': dataset_manager.case_id_col,
'static_cat_cols': dataset_manager.static_cat_cols,
'static_num_cols': dataset_manager.static_num_cols + static_text_cols,
'dynamic_cat_cols': dataset_manager.dynamic_cat_cols,
'dynamic_num_cols': dataset_manager.dynamic_num_cols + dynamic_text_cols,
'fillna': True}
else:
cls_encoder_args = {'case_id_col': dataset_manager.case_id_col,
'static_cat_cols': dataset_manager.static_cat_cols,
'static_num_cols': dataset_manager.static_num_cols + static_text_cols,
'dynamic_cat_cols': dataset_manager.dynamic_cat_cols,
'dynamic_num_cols': dataset_manager.dynamic_num_cols,
'fillna': True}
encoders.append((method, EncoderFactory.get_encoder(method, **cls_encoder_args)))
if cls_encoding != text_enc and text_enc not in methods:
cls_encoder_args = {'case_id_col': dataset_manager.case_id_col,
'static_cat_cols': [],
'static_num_cols': [],
'dynamic_cat_cols': [],
'dynamic_num_cols': dynamic_text_cols,
'fillna': True}
encoders.append((text_enc, EncoderFactory.get_encoder(text_enc, **cls_encoder_args)))
feature_combiner = FeatureUnion(encoders)
X_train = feature_combiner.fit_transform(dt_train_bucket)
train_y = dataset_manager.get_label_numeric(dt_train_bucket)
del dt_train_bucket
# fit classifier and calibrate
cls = ClassifierFactory.get_classifier(cls_method.replace("_calibrated", ""), cls_args, random_state, min_cases_for_training, overall_class_ratio, binary=(False if "calibrate" in cls_method else True))
cls.fit(X_train, train_y)
del X_train, train_y
print("Trained model")
if "calibrate" in cls_method:
X_val = feature_combiner.transform(dt_val_bucket)
y_val = dataset_manager.get_label_numeric(dt_val_bucket)
del dt_val_bucket
cls = CalibratedClassifierCV(cls, cv="prefit", method='sigmoid')
cls.fit(X_val, np.array(y_val))
del X_val, y_val
print("Calibrated model")
# predict
X_test = feature_combiner.transform(dt_test_bucket)
test_y = dataset_manager.get_label_numeric(dt_test_bucket)
preds = cls.predict_proba(X_test)
if "calibrate" in cls_method:
preds = preds[:,1]
print("Predicted")
case_ids = list(dt_test_bucket.groupby(dataset_manager.case_id_col).first().index)
current_results = | pd.DataFrame({"dataset": dataset_name, "cls": cls_method, "params": method_name, "nr_events": nr_events, "predicted": preds, "actual": test_y, "case_id": case_ids}) | pandas.DataFrame |
from datetime import datetime
import numpy as np
import pandas as pd
import pytest
from numba import njit
import vectorbt as vbt
from tests.utils import record_arrays_close
from vectorbt.generic.enums import range_dt, drawdown_dt
from vectorbt.portfolio.enums import order_dt, trade_dt, log_dt
day_dt = np.timedelta64(86400000000000)
example_dt = np.dtype([
('id', np.int64),
('col', np.int64),
('idx', np.int64),
('some_field1', np.float64),
('some_field2', np.float64)
], align=True)
records_arr = np.asarray([
(0, 0, 0, 10, 21),
(1, 0, 1, 11, 20),
(2, 0, 2, 12, 19),
(3, 1, 0, 13, 18),
(4, 1, 1, 14, 17),
(5, 1, 2, 13, 18),
(6, 2, 0, 12, 19),
(7, 2, 1, 11, 20),
(8, 2, 2, 10, 21)
], dtype=example_dt)
records_nosort_arr = np.concatenate((
records_arr[0::3],
records_arr[1::3],
records_arr[2::3]
))
group_by = pd.Index(['g1', 'g1', 'g2', 'g2'])
wrapper = vbt.ArrayWrapper(
index=['x', 'y', 'z'],
columns=['a', 'b', 'c', 'd'],
ndim=2,
freq='1 days'
)
wrapper_grouped = wrapper.replace(group_by=group_by)
records = vbt.records.Records(wrapper, records_arr)
records_grouped = vbt.records.Records(wrapper_grouped, records_arr)
records_nosort = records.replace(records_arr=records_nosort_arr)
records_nosort_grouped = vbt.records.Records(wrapper_grouped, records_nosort_arr)
# ############# Global ############# #
def setup_module():
vbt.settings.numba['check_func_suffix'] = True
vbt.settings.caching.enabled = False
vbt.settings.caching.whitelist = []
vbt.settings.caching.blacklist = []
def teardown_module():
vbt.settings.reset()
# ############# col_mapper.py ############# #
class TestColumnMapper:
def test_col_arr(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_arr,
np.array([0, 0, 0])
)
np.testing.assert_array_equal(
records.col_mapper.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
)
def test_get_col_arr(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_arr(),
records.col_mapper.col_arr
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_arr(),
np.array([0, 0, 0, 0, 0, 0])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_arr(),
np.array([0, 0, 0, 0, 0, 0, 1, 1, 1])
)
def test_col_range(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_range,
np.array([
[0, 3]
])
)
np.testing.assert_array_equal(
records.col_mapper.col_range,
np.array([
[0, 3],
[3, 6],
[6, 9],
[-1, -1]
])
)
def test_get_col_range(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_range(),
np.array([
[0, 3],
[3, 6],
[6, 9],
[-1, -1]
])
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_range(),
np.array([[0, 6]])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_range(),
np.array([[0, 6], [6, 9]])
)
def test_col_map(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_map[0],
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
records['a'].col_mapper.col_map[1],
np.array([3])
)
np.testing.assert_array_equal(
records.col_mapper.col_map[0],
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
np.testing.assert_array_equal(
records.col_mapper.col_map[1],
np.array([3, 3, 3, 0])
)
def test_get_col_map(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_map()[0],
records.col_mapper.col_map[0]
)
np.testing.assert_array_equal(
records.col_mapper.get_col_map()[1],
records.col_mapper.col_map[1]
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_map()[0],
np.array([0, 1, 2, 3, 4, 5])
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_map()[1],
np.array([6])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_map()[0],
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_map()[1],
np.array([6, 3])
)
def test_is_sorted(self):
assert records.col_mapper.is_sorted()
assert not records_nosort.col_mapper.is_sorted()
# ############# mapped_array.py ############# #
mapped_array = records.map_field('some_field1')
mapped_array_grouped = records_grouped.map_field('some_field1')
mapped_array_nosort = records_nosort.map_field('some_field1')
mapped_array_nosort_grouped = records_nosort_grouped.map_field('some_field1')
mapping = {x: 'test_' + str(x) for x in pd.unique(mapped_array.values)}
mp_mapped_array = mapped_array.replace(mapping=mapping)
mp_mapped_array_grouped = mapped_array_grouped.replace(mapping=mapping)
class TestMappedArray:
def test_config(self, tmp_path):
assert vbt.MappedArray.loads(mapped_array.dumps()) == mapped_array
mapped_array.save(tmp_path / 'mapped_array')
assert vbt.MappedArray.load(tmp_path / 'mapped_array') == mapped_array
def test_mapped_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].values,
np.array([10., 11., 12.])
)
np.testing.assert_array_equal(
mapped_array.values,
np.array([10., 11., 12., 13., 14., 13., 12., 11., 10.])
)
def test_id_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].id_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
mapped_array.id_arr,
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
def test_col_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].col_arr,
np.array([0, 0, 0])
)
np.testing.assert_array_equal(
mapped_array.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
)
def test_idx_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].idx_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
mapped_array.idx_arr,
np.array([0, 1, 2, 0, 1, 2, 0, 1, 2])
)
def test_is_sorted(self):
assert mapped_array.is_sorted()
assert mapped_array.is_sorted(incl_id=True)
assert not mapped_array_nosort.is_sorted()
assert not mapped_array_nosort.is_sorted(incl_id=True)
def test_sort(self):
assert mapped_array.sort().is_sorted()
assert mapped_array.sort().is_sorted(incl_id=True)
assert mapped_array.sort(incl_id=True).is_sorted(incl_id=True)
assert mapped_array_nosort.sort().is_sorted()
assert mapped_array_nosort.sort().is_sorted(incl_id=True)
assert mapped_array_nosort.sort(incl_id=True).is_sorted(incl_id=True)
def test_apply_mask(self):
mask_a = mapped_array['a'].values >= mapped_array['a'].values.mean()
np.testing.assert_array_equal(
mapped_array['a'].apply_mask(mask_a).id_arr,
np.array([1, 2])
)
mask = mapped_array.values >= mapped_array.values.mean()
filtered = mapped_array.apply_mask(mask)
np.testing.assert_array_equal(
filtered.id_arr,
np.array([2, 3, 4, 5, 6])
)
np.testing.assert_array_equal(filtered.col_arr, mapped_array.col_arr[mask])
np.testing.assert_array_equal(filtered.idx_arr, mapped_array.idx_arr[mask])
assert mapped_array_grouped.apply_mask(mask).wrapper == mapped_array_grouped.wrapper
assert mapped_array_grouped.apply_mask(mask, group_by=False).wrapper.grouper.group_by is None
def test_map_to_mask(self):
@njit
def every_2_nb(inout, idxs, col, mapped_arr):
inout[idxs[::2]] = True
np.testing.assert_array_equal(
mapped_array.map_to_mask(every_2_nb),
np.array([True, False, True, True, False, True, True, False, True])
)
def test_top_n_mask(self):
np.testing.assert_array_equal(
mapped_array.top_n_mask(1),
np.array([False, False, True, False, True, False, True, False, False])
)
def test_bottom_n_mask(self):
np.testing.assert_array_equal(
mapped_array.bottom_n_mask(1),
np.array([True, False, False, True, False, False, False, False, True])
)
def test_top_n(self):
np.testing.assert_array_equal(
mapped_array.top_n(1).id_arr,
np.array([2, 4, 6])
)
def test_bottom_n(self):
np.testing.assert_array_equal(
mapped_array.bottom_n(1).id_arr,
np.array([0, 3, 8])
)
def test_to_pd(self):
target = pd.DataFrame(
np.array([
[10., 13., 12., np.nan],
[11., 14., 11., np.nan],
[12., 13., 10., np.nan]
]),
index=wrapper.index,
columns=wrapper.columns
)
pd.testing.assert_series_equal(
mapped_array['a'].to_pd(),
target['a']
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(),
target
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(fill_value=0.),
target.fillna(0.)
)
mapped_array2 = vbt.MappedArray(
wrapper,
records_arr['some_field1'].tolist() + [1],
records_arr['col'].tolist() + [2],
idx_arr=records_arr['idx'].tolist() + [2]
)
with pytest.raises(Exception):
_ = mapped_array2.to_pd()
pd.testing.assert_series_equal(
mapped_array['a'].to_pd(ignore_index=True),
pd.Series(np.array([10., 11., 12.]), name='a')
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(ignore_index=True),
pd.DataFrame(
np.array([
[10., 13., 12., np.nan],
[11., 14., 11., np.nan],
[12., 13., 10., np.nan]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(fill_value=0, ignore_index=True),
pd.DataFrame(
np.array([
[10., 13., 12., 0.],
[11., 14., 11., 0.],
[12., 13., 10., 0.]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.to_pd(ignore_index=True),
pd.DataFrame(
np.array([
[10., 12.],
[11., 11.],
[12., 10.],
[13., np.nan],
[14., np.nan],
[13., np.nan],
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_apply(self):
@njit
def cumsum_apply_nb(idxs, col, a):
return np.cumsum(a)
np.testing.assert_array_equal(
mapped_array['a'].apply(cumsum_apply_nb).values,
np.array([10., 21., 33.])
)
np.testing.assert_array_equal(
mapped_array.apply(cumsum_apply_nb).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
mapped_array_grouped.apply(cumsum_apply_nb, apply_per_group=False).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
mapped_array_grouped.apply(cumsum_apply_nb, apply_per_group=True).values,
np.array([10., 21., 33., 46., 60., 73., 12., 23., 33.])
)
assert mapped_array_grouped.apply(cumsum_apply_nb).wrapper == \
mapped_array.apply(cumsum_apply_nb, group_by=group_by).wrapper
assert mapped_array.apply(cumsum_apply_nb, group_by=False).wrapper.grouper.group_by is None
def test_reduce(self):
@njit
def mean_reduce_nb(col, a):
return np.mean(a)
assert mapped_array['a'].reduce(mean_reduce_nb) == 11.
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb),
pd.Series(np.array([11., 13.333333333333334, 11., np.nan]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, fill_value=0.),
pd.Series(np.array([11., 13.333333333333334, 11., 0.]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, fill_value=0., wrap_kwargs=dict(dtype=np.int_)),
pd.Series(np.array([11., 13.333333333333334, 11., 0.]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, wrap_kwargs=dict(to_timedelta=True)),
pd.Series(np.array([11., 13.333333333333334, 11., np.nan]), index=wrapper.columns).rename('reduce') * day_dt
)
pd.testing.assert_series_equal(
mapped_array_grouped.reduce(mean_reduce_nb),
pd.Series([12.166666666666666, 11.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('reduce')
)
assert mapped_array_grouped['g1'].reduce(mean_reduce_nb) == 12.166666666666666
pd.testing.assert_series_equal(
mapped_array_grouped[['g1']].reduce(mean_reduce_nb),
pd.Series([12.166666666666666], index= | pd.Index(['g1'], dtype='object') | pandas.Index |
import util
import options
import os, sys
try:
import pandas as pd
except:
print("Pandas not available\n")
# def plot_numba_CPU(app_name, cmds):
# util.chdir("CPU")
# df = pd.read_csv('perf_output.csv',names=['input_size','throughput'], index_col='input_size')
# # this is needed for setting the layout to show complete figure
# #from matplotlib import rcParams
# #rcParams.update({'figure.autolayout': True})
# bar_chart = df.plot.bar(legend=False,rot=45,fontsize=10)
# #bar_chart.set(xlabel='Input size', ylabel='Thoughput in input elements Processed per second')
# bar_chart.set_ylabel('Thoughput in input elements processed per second',fontsize=10)
# bar_chart.set_xlabel('Input size',fontsize=10)
# fig = bar_chart.get_figure()
# fig_filename = str(app_name) + "_numba_CPU_performance.pdf"
# fig.savefig(fig_filename,bbox_inches="tight")
# #print(df)
# return df.loc[cmds['ref_input'],'throughput']
# def plot_numba_GPU(app_name, cmds):
# util.chdir("GPU")
# df = pd.read_csv('perf_output.csv',names=['input_size','throughput'], index_col='input_size')
# # this is needed for setting the layout to show complete figure
# #from matplotlib import rcParams
# #rcParams.update({'figure.autolayout': True})
# bar_chart = df.plot.bar(legend=False,rot=45,fontsize=10)
# #bar_chart.set(xlabel='Input size', ylabel='Thoughput in input elements Processed per second')
# bar_chart.set_ylabel('Thoughput in input elements processed per second',fontsize=10)
# bar_chart.set_xlabel('Input size',fontsize=10)
# fig = bar_chart.get_figure()
# fig_filename = str(app_name) + "_numba_GPU_performance.pdf"
# fig.savefig(fig_filename,bbox_inches="tight")
# #print(df)
# return df.loc[cmds['ref_input'],'throughput']
# def plot_native(opts, all_plot_data):
# util.chdir("native")
# native_dir = os.getcwd();
# for app, cmds in opts.wls.wl_list.items():
# if cmds['execute'] is True:
# plot_data_entry = {}
# if app in all_plot_data:
# plot_data_entry = all_plot_data[app]
# util.chdir(app)
# app_dir = os.getcwd();
# if opts.platform == options.platform.cpu or opts.platform == options.platform.all:
# cpu_perf = get_runtime_data(app, cmds, "CPU")
# plot_data_entry['native_cpu'] = cpu_perf
# util.chdir(app_dir)
# if opts.platform == options.platform.gpu or opts.platform == options.platform.all:
# gpu_perf = get_runtime_data(app, cmds, "GPU")
# plot_data_entry['native_gpu'] = gpu_perf
# util.chdir(native_dir)
# all_plot_data[app] = plot_data_entry
def get_runtime_data(app_name, cmds, platform):
util.chdir(platform)
df = pd.read_csv('runtimes.csv',names=['input_size','runtime'], index_col='input_size')
# bar_chart = df.plot.bar(legend=False,rot=45,fontsize=10)
# bar_chart.set_ylabel('Thoughput in input elements processed per second',fontsize=10)
# bar_chart.set_xlabel('Input size',fontsize=10)
# fig = bar_chart.get_figure()
# fig_filename = str(app_name) + "_native_CPU_performance.pdf"
# fig.savefig(fig_filename,bbox_inches="tight")
#print(df)
return df.loc[cmds['ref_input'],'runtime']
def get_runtimes(opts, all_plot_data, impl):
util.chdir(impl)
numba_dir = os.getcwd();
for app, cmds in opts.wls.wl_list.items():
if cmds['execute'] is True:
plot_data_entry = {}
if app in all_plot_data:
plot_data_entry = all_plot_data[app]
util.chdir(app)
app_dir = os.getcwd();
if opts.platform == options.platform.cpu or opts.platform == options.platform.all:
cpu_perf = get_runtime_data(app, cmds, "CPU")
plot_data_entry[impl + '_cpu'] = cpu_perf
util.chdir(app_dir)
if opts.platform == options.platform.gpu or opts.platform == options.platform.all:
gpu_perf = get_runtime_data(app, cmds, "GPU")
plot_data_entry[impl + '_gpu'] = gpu_perf
util.chdir(numba_dir)
all_plot_data[app] = plot_data_entry
def check_envvars_tools(opts):
if opts.analysis is not options.analysis.all and opts.analysis is not options.analysis.perf:
print("Plotting can be run only with option --analysis(-a) set to all or perf. Exiting")
sys.exit()
try:
import pandas
except:
print ("Pandas not available. Plotting disabled\n")
sys.exit()
def plot_efficiency_graph(all_plot_data):
df = | pd.DataFrame.from_dict(all_plot_data, orient='index') | pandas.DataFrame.from_dict |
#!/usr/bin/env python3
import os
import subprocess
import glob
import pandas as pd
def mkdir(dir):
if os.path.isdir(dir):
print("Directory %s exists" % dir)
return
try:
os.mkdir(dir)
except OSError:
print("Creation of the directory %s failed" % dir)
else:
print("Successfully created the directory %s " % dir)
def merge_bed(bed_list, bed_out):
with open(bed_out, "w") as output:
for bed in bed_list:
sample_name = os.path.basename(bed).replace(".nonref.bed", "")
with open(bed, "r") as input:
for line in input:
entry = line.replace("\n", "").split("\t")
chrom = entry[0]
start = entry[1]
end = entry[2]
family = entry[3]
score = entry[4]
strand = entry[5]
info = "|".join([family, sample_name])
out_line = "\t".join([chrom, start, end, info, score, strand])
output.write(out_line + "\n")
def filter_region_bed(bed_in, region_filter, bed_out):
with open(bed_out, "w") as output:
subprocess.call(
["bedtools", "intersect", "-a", bed_in, "-b", region_filter, "-u"],
stdout=output,
)
def filter_family_bed(bed_in, family_filter, bed_out, method):
families = set(family_filter.split(","))
with open(bed_out, "w") as output, open(bed_in, "r") as input:
for line in input:
entry = line.replace("\n", "").split("\t")
family = entry[3].split("|")[0]
if method == "include":
if family in families:
output.write(line)
else:
if family not in families:
output.write(line)
def sort_bed(bed_in, bed_out):
with open(bed_out, "w") as output:
subprocess.call(["bedtools", "sort", "-i", bed_in], stdout=output)
def cluster_bed(bed_in, bed_out):
window = 0
with open(bed_out, "w") as output:
subprocess.call(
["bedtools", "cluster", "-s", "-d", str(window), "-i", bed_in],
stdout=output,
)
def get_te_info(meta):
te_info_dict = dict()
with open(meta, "r") as input:
for line in input:
entry = line.replace("\n", "").split("\t")
te_info_dict[entry[0]] = entry[1]
return te_info_dict
def get_method_bed(
te_out_dirs,
filter_region,
outdir,
include_families,
exclude_families,
exclude_samples,
prefix,
):
output_prefix = prefix
pattern = "/**/detect/*nonref.bed"
bed_list = []
for te_out_dir in te_out_dirs:
bed_files = glob.glob(te_out_dir + pattern, recursive=True)
bed_list = bed_list + bed_files
# exclude samples
if exclude_samples is not None:
exclude_sample_list = exclude_samples.replace(" ", "").split(",")
new_bed_list = []
for bed in bed_list:
include = True
for exclude_sample in exclude_sample_list:
if exclude_sample in bed:
include = False
if include:
new_bed_list.append(bed)
bed_list = new_bed_list
# keep only nonref, merge per method
bed_merged = os.path.join(outdir, output_prefix + ".merge.bed")
merge_bed(bed_list=bed_list, bed_out=bed_merged)
# filter by region
bed_filtered = os.path.join(outdir, output_prefix + ".filter.bed")
filter_region_bed(
bed_in=bed_merged,
region_filter=filter_region,
bed_out=bed_filtered,
)
# filter by family
if include_families is not None:
bed_filtered_tmp = bed_filtered + ".tmp"
filter_family_bed(
bed_in=bed_filtered,
family_filter=include_families,
bed_out=bed_filtered_tmp,
method="include",
)
os.rename(bed_filtered_tmp, bed_filtered)
if exclude_families is not None:
bed_filtered_tmp = bed_filtered + ".tmp"
filter_family_bed(
bed_in=bed_filtered,
family_filter=exclude_families,
bed_out=bed_filtered_tmp,
method="exclude",
)
os.rename(bed_filtered_tmp, bed_filtered)
# sort and cluster, use method specific window
bed_sort = os.path.join(outdir, output_prefix + ".sort.bed")
sort_bed(bed_in=bed_filtered, bed_out=bed_sort)
bed_cluster = os.path.join(outdir, output_prefix + ".cluster.bed")
cluster_bed(bed_in=bed_sort, bed_out=bed_cluster)
# os.remove(bed_merged)
# os.remove(bed_filtered)
# os.remove(bed_sort)
return bed_cluster
def bed2matrix(bed, outgroup):
# from clustered bed to binary data matrix
header = [
"chr",
"start",
"end",
"info",
"score",
"strand",
"cluster",
]
df = | pd.read_csv(bed, delimiter="\t", names=header) | pandas.read_csv |
import warnings
from datetime import datetime
import pytest
import pandas as pd
from mssql_dataframe.connect import connect
from mssql_dataframe.core import custom_warnings, custom_errors, create, conversion, conversion_rules
from mssql_dataframe.core.write import insert, _exceptions
pd.options.mode.chained_assignment = "raise"
class package:
def __init__(self, connection):
self.connection = connection.connection
self.create = create.create(self.connection)
self.create_meta = create.create(self.connection, include_metadata_timestamps=True)
self.insert = insert.insert(self.connection, autoadjust_sql_objects=True)
self.insert_meta = insert.insert(self.connection, include_metadata_timestamps=True, autoadjust_sql_objects=True)
@pytest.fixture(scope="module")
def sql():
db = connect(database="tempdb", server="localhost")
yield package(db)
db.connection.close()
def test_insert_autoadjust_errors(sql):
table_name = "##test_insert_autoadjust_errors"
# create table with column for each conversion rule
columns = conversion_rules.rules['sql_type'].to_numpy()
columns = {'_'+x:x for x in columns}
sql.create.table(table_name, columns=columns)
# create dataframes for each conversion rule that should fail insert
boolean = [3]
exact_numeric = ['a', '2-1', 1.1, datetime.now()]
approximate_numeric = ['a', '2-1',datetime.now()]
date_time = ['a', 1, 1.1]
character_string = [1, datetime.now()]
dataframe = [
pd.DataFrame({'_bit': boolean}),
pd.DataFrame({'_tinyint': exact_numeric}),
pd.DataFrame({'_smallint': exact_numeric}),
pd.DataFrame({'_int': exact_numeric}),
pd.DataFrame({'_bigint': exact_numeric}),
pd.DataFrame({'_float': approximate_numeric}),
| pd.DataFrame({'_time': date_time}) | pandas.DataFrame |
import numpy as np
import pandas as pd
def mean(x):
return x.mean()
def std(x):
if np.iscomplexobj(x.iloc[0]):
return x.agg(lambda y: np.real(y.to_numpy()).std() + 1.0j * np.imag(y.to_numpy()).std())
else:
return x.std()
def var(x):
if np.iscomplexobj(x.iloc[0]):
return x.apply(lambda y: np.real(y.to_numpy()).std() + 1.0j * np.imag(y.to_numpy()).var())
else:
return x.var()
def quant25(x):
return x.quantile(0.25)
def quant75(x):
return x.quantile(0.75)
def secondmoment(x):
return pow(x, 2).mean()
def fourthmoment(x):
return pow(x, 4).mean()
class ExpectationValue:
def __init__(self, **kwargs):
self.data = kwargs.pop("data", None)
self.computed_expectation_values = None
self.errors = None
def compute_expectation_value(self, columns,
exp_values=['mean', 'max', 'min', 'median', 'quant25', 'quant75', 'std'],
transform="lin"):
self.computed_expectation_values = ExpectationValue.__evaluate_expectation_values(data=self.data,
computed_expectation_values=self.computed_expectation_values,
columns=columns, exp_values=exp_values, transform=transform)
@property
def expectation_values(self):
return self.computed_expectation_values
def compute_error_with_bootstrap(self, n_means_boostrap, number_of_measurements, columns, exp_values, running_parameter="default",
transform="lin"):
split_data = [ | pd.concat([tup[1], tup[1]]) | pandas.concat |
#!/usr/bin/env python3
import sys
import numpy as np
import pandas as pd
import os, shutil, zipfile
from numpy import array
import csv
from pandas import DataFrame
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from scipy.stats import entropy
import scipy as sc
from zipfile import ZipFile
import joblib
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Activation,Conv2D,MaxPooling2D,Flatten,Conv1D, GlobalMaxPooling1D,MaxPooling1D, Convolution2D,Reshape, InputLayer,LSTM, Embedding
from keras.optimizers import SGD
from sklearn import preprocessing
from keras.callbacks import EarlyStopping
from numpy import array
from keras.preprocessing import sequence
from keras.layers import Embedding
from keras.preprocessing.sequence import pad_sequences
from keras.layers import TimeDistributed
def load_sepsis_model():
# Load the saved model pickle file
Trained_model = joblib.load('saved_model.pkl')
return Trained_model
def get_sepsis_score(data1, Trained_model):
#Testing
t=1
df_test = np.array([], dtype=np.float64)
df_test1 = pd.DataFrame()
l = len(data1)
df_test = data1
df_test1 = pd.DataFrame(df_test)
df_test2 = df_test1
df_test2.columns = ['HR','O2Sat','Temp','SBP','MAP','DBP','Resp','EtCO2','BaseExcess','HCO3','FiO2','pH','PaCO2','SaO2','AST','BUN','Alkalinephos','Calcium','Chloride','Creatinine','Bilirubin_direct','Glucose','Lactate','Magnesium','Phosphate','Potassium','Bilirubin_total','TroponinI','Hct','Hgb','PTT','WBC','Fibrinogen','Platelets','Age','Gender','Unit1','Unit2','HospAdmTime','ICULOS']
#Forward fill missing values
df_test2.fillna(method='ffill', axis=0, inplace=True)
df_test3 = df_test2.fillna(0)
df_test = df_test3
df_test['ID'] = 0
DBP = pd.pivot_table(df_test,values='DBP',index='ID',columns='ICULOS')
O2Sat = pd.pivot_table(df_test,values='O2Sat',index='ID',columns='ICULOS')
Temp = pd.pivot_table(df_test,values='Temp',index='ID',columns='ICULOS')
RR = | pd.pivot_table(df_test,values='Resp',index='ID',columns='ICULOS') | pandas.pivot_table |
from datetime import date
import click
import numpy as np
from pandas import concat
from pandas import DataFrame
from pandas import json_normalize
from pandas import Series
from pandas import to_datetime
from py42 import exceptions
from py42.exceptions import Py42NotFoundError
from code42cli.bulk import generate_template_cmd_factory
from code42cli.bulk import run_bulk_process
from code42cli.click_ext.groups import OrderedGroup
from code42cli.click_ext.options import incompatible_with
from code42cli.click_ext.types import MagicDate
from code42cli.date_helper import round_datetime_to_day_end
from code42cli.date_helper import round_datetime_to_day_start
from code42cli.errors import Code42CLIError
from code42cli.file_readers import read_csv_arg
from code42cli.options import format_option
from code42cli.options import sdk_options
from code42cli.output_formats import DataFrameOutputFormatter
from code42cli.output_formats import OutputFormat
from code42cli.output_formats import OutputFormatter
from code42cli.worker import create_worker_stats
@click.group(cls=OrderedGroup)
@sdk_options(hidden=True)
def devices(state):
"""Manage devices within your Code42 environment."""
pass
device_guid_argument = click.argument(
"device-guid", type=str, callback=lambda ctx, param, arg: _verify_guid_type(arg),
)
def change_device_name_option(help_msg):
return click.option(
"--change-device-name",
required=False,
is_flag=True,
default=False,
help=help_msg,
)
DATE_FORMAT = "%Y-%m-%d"
purge_date_option = click.option(
"--purge-date",
required=False,
type=click.DateTime(formats=[DATE_FORMAT]),
default=None,
help="The date on which the archive should be purged from cold storage in yyyy-MM-dd format. "
"If not provided, the date will be set according to the appropriate organization settings.",
)
@devices.command()
@device_guid_argument
@change_device_name_option(
"Prepend 'deactivated_<current_date>' to the name of the device if deactivation is successful."
)
@purge_date_option
@sdk_options()
def deactivate(state, device_guid, change_device_name, purge_date):
"""Deactivate a device within Code42. Requires the device GUID to deactivate."""
_deactivate_device(state.sdk, device_guid, change_device_name, purge_date)
@devices.command()
@device_guid_argument
@sdk_options()
def reactivate(state, device_guid):
"""Reactivate a device within Code42. Requires the device GUID to reactivate."""
_reactivate_device(state.sdk, device_guid)
def _deactivate_device(sdk, device_guid, change_device_name, purge_date):
try:
device = _change_device_activation(sdk, device_guid, "deactivate")
except exceptions.Py42BadRequestError:
raise Code42CLIError(f"The device with GUID '{device_guid}' is in legal hold.")
if purge_date:
_update_cold_storage_purge_date(sdk, device_guid, purge_date)
if change_device_name and not device.data["name"].startswith("deactivated_"):
_change_device_name(
sdk,
device_guid,
"deactivated_"
+ date.today().strftime("%Y-%m-%d")
+ "_"
+ device.data["name"],
)
def _reactivate_device(sdk, device_guid):
_change_device_activation(sdk, device_guid, "reactivate")
def _change_device_activation(sdk, device_guid, cmd_str):
try:
device = sdk.devices.get_by_guid(device_guid)
device_id = device.data["computerId"]
if cmd_str == "reactivate":
sdk.devices.reactivate(device_id)
elif cmd_str == "deactivate":
sdk.devices.deactivate(device_id)
return device
except exceptions.Py42NotFoundError:
raise Code42CLIError(f"The device with GUID '{device_guid}' was not found.")
except exceptions.Py42ForbiddenError:
raise Code42CLIError(
f"Unable to {cmd_str} the device with GUID '{device_guid}'."
)
def _verify_guid_type(device_guid):
if device_guid is None:
return
try:
int(device_guid)
return device_guid
except ValueError:
raise Code42CLIError("Not a valid GUID.")
def _update_cold_storage_purge_date(sdk, guid, purge_date):
archives_response = sdk.archive.get_all_by_device_guid(guid)
archive_guid_list = [
archive["archiveGuid"]
for page in archives_response
for archive in page["archives"]
if archive["format"] != "ARCHIVE_V2"
]
for archive_guid in archive_guid_list:
sdk.archive.update_cold_storage_purge_date(
archive_guid, purge_date.strftime("%Y-%m-%d")
)
def _change_device_name(sdk, guid, name):
device_settings = sdk.devices.get_settings(guid)
device_settings.name = name
sdk.devices.update_settings(device_settings)
@devices.command()
@device_guid_argument
@sdk_options()
def show(state, device_guid):
"""Print individual device details. Requires device GUID."""
formatter = OutputFormatter(OutputFormat.TABLE, _device_info_keys_map())
backup_set_formatter = OutputFormatter(OutputFormat.TABLE, _backup_set_keys_map())
device_info = _get_device_info(state.sdk, device_guid)
formatter.echo_formatted_list([device_info])
backup_usage = device_info.get("backupUsage")
if backup_usage:
click.echo()
backup_set_formatter.echo_formatted_list(backup_usage)
def _device_info_keys_map():
return {
"name": "Name",
"osHostname": "Hostname",
"guid": "GUID",
"status": "Status",
"lastConnected": "Last Connected Date",
"productVersion": "Code42 Version",
"osName": "Operating System",
"osVersion": "Operating System Version",
}
def _backup_set_keys_map():
return {
"targetComputerName": "Destination",
"lastBackup": "Last Backup Activity",
"lastCompleted": "Last Completed Backup",
"archiveBytes": "Archive Size in Bytes",
"archiveGuid": "Archive GUID",
}
def _get_device_info(sdk, device_guid):
return sdk.devices.get_by_guid(device_guid, include_backup_usage=True).data
active_option = click.option(
"--active",
is_flag=True,
help="Limits results to only active devices.",
default=None,
)
inactive_option = click.option(
"--inactive",
is_flag=True,
help="Limits results to only deactivated devices.",
cls=incompatible_with("active"),
)
org_uid_option = click.option(
"--org-uid",
required=False,
type=str,
default=None,
help="Limit devices to only those in the organization you specify. "
"Note that child organizations will be included.",
)
include_usernames_option = click.option(
"--include-usernames",
required=False,
type=bool,
default=False,
is_flag=True,
help="Add the username associated with a device to the output.",
)
@devices.command(name="list")
@active_option
@inactive_option
@org_uid_option
@click.option(
"--include-backup-usage",
required=False,
type=bool,
default=False,
is_flag=True,
help="Return backup usage information for each device (may significantly lengthen the size "
"of the return).",
)
@include_usernames_option
@click.option(
"--include-settings",
required=False,
type=bool,
default=False,
is_flag=True,
help="Include device settings in output.",
)
@click.option(
"--include-legal-hold-membership",
required=False,
type=bool,
default=False,
is_flag=True,
help="Include legal hold membership in output.",
)
@click.option(
"--include-total-storage",
required=False,
type=bool,
default=False,
is_flag=True,
help="Include backup archive count and total storage in output.",
)
@click.option(
"--exclude-most-recently-connected",
type=int,
help="Filter out the N most recently connected devices per user. "
"Useful for identifying duplicate and/or replaced devices that are no longer needed across "
"an environment. If a user has 2 devices and N=1, the one device with the most recent "
"'lastConnected' date will not show up in the result list.",
)
@click.option(
"--last-connected-before",
type=MagicDate(rounding_func=round_datetime_to_day_start),
help=f"Include devices only when the 'lastConnected' field is after the provided value. {MagicDate.HELP_TEXT}",
)
@click.option(
"--last-connected-after",
type=MagicDate(rounding_func=round_datetime_to_day_end),
help="Include devices only when 'lastConnected' field is after the provided value. "
"Argument format options are the same as --last-connected-before.",
)
@click.option(
"--created-before",
type=MagicDate(rounding_func=round_datetime_to_day_start),
help="Include devices only when 'creationDate' field is less than the provided value. "
"Argument format options are the same as --last-connected-before.",
)
@click.option(
"--created-after",
type=MagicDate(rounding_func=round_datetime_to_day_end),
help="Include devices only when 'creationDate' field is greater than the provided value. "
"Argument format options are the same as --last-connected-before.",
)
@format_option
@sdk_options()
def list_devices(
state,
active,
inactive,
org_uid,
include_backup_usage,
include_usernames,
include_settings,
include_legal_hold_membership,
include_total_storage,
exclude_most_recently_connected,
last_connected_after,
last_connected_before,
created_after,
created_before,
format,
):
"""Get information about many devices."""
if inactive:
active = False
columns = [
"computerId",
"guid",
"name",
"osHostname",
"status",
"lastConnected",
"creationDate",
"productVersion",
"osName",
"osVersion",
"userUid",
]
df = _get_device_dataframe(
state.sdk,
columns,
active,
org_uid,
(include_backup_usage or include_total_storage),
)
if exclude_most_recently_connected:
most_recent = (
df.sort_values(["userUid", "lastConnected"], ascending=False)
.groupby("userUid")
.head(exclude_most_recently_connected)
)
df = df.drop(most_recent.index)
if last_connected_after:
df = df.loc[to_datetime(df.lastConnected) > last_connected_after]
if last_connected_before:
df = df.loc[to_datetime(df.lastConnected) < last_connected_before]
if created_after:
df = df.loc[to_datetime(df.creationDate) > created_after]
if created_before:
df = df.loc[to_datetime(df.creationDate) < created_before]
if include_total_storage:
df = _add_storage_totals_to_dataframe(df, include_backup_usage)
if include_settings:
df = _add_settings_to_dataframe(state.sdk, df)
if include_usernames:
df = _add_usernames_to_device_dataframe(state.sdk, df)
if include_legal_hold_membership:
df = _add_legal_hold_membership_to_device_dataframe(state.sdk, df)
formatter = DataFrameOutputFormatter(format)
formatter.echo_formatted_dataframes(df)
def _add_legal_hold_membership_to_device_dataframe(sdk, df):
columns = ["legalHold.legalHoldUid", "legalHold.name", "user.userUid"]
legal_hold_member_dataframe = (
json_normalize(list(_get_all_active_hold_memberships(sdk)))[columns]
.groupby(["user.userUid"])
.agg(",".join)
.rename(
{
"legalHold.legalHoldUid": "legalHoldUid",
"legalHold.name": "legalHoldName",
},
axis=1,
)
)
df = df.merge(
legal_hold_member_dataframe,
how="left",
left_on="userUid",
right_on="user.userUid",
)
df.loc[df["status"] == "Deactivated", ["legalHoldUid", "legalHoldName"]] = np.nan
return df
def _get_all_active_hold_memberships(sdk):
for page in sdk.legalhold.get_all_matters(active=True):
for matter in page["legalHolds"]:
for _page in sdk.legalhold.get_all_matter_custodians(
legal_hold_uid=matter["legalHoldUid"], active=True
):
yield from _page["legalHoldMemberships"]
def _get_device_dataframe(
sdk, columns, active=None, org_uid=None, include_backup_usage=False
):
devices_generator = sdk.devices.get_all(
active=active, include_backup_usage=include_backup_usage, org_uid=org_uid
)
devices_list = []
if include_backup_usage:
columns.append("backupUsage")
for page in devices_generator:
devices_list.extend(page["computers"])
return DataFrame.from_records(devices_list, columns=columns)
def _add_settings_to_dataframe(sdk, device_dataframe):
macos_guids = device_dataframe.loc[
device_dataframe["osName"] == "mac", "guid"
].values
def handle_row(guid):
try:
full_disk_access_status = sdk.devices.get_agent_full_disk_access_state(
guid
).data[
"value"
] # returns 404 error if device isn't a Mac or doesn't have full disk access
except Py42NotFoundError:
full_disk_access_status = False
return {
"guid": guid,
"full disk access status": full_disk_access_status,
}
result_list = DataFrame.from_records(
run_bulk_process(
handle_row, macos_guids, progress_label="Getting device settings"
)
)
try:
return device_dataframe.merge(result_list, how="left", on="guid")
except KeyError:
return device_dataframe
def _add_usernames_to_device_dataframe(sdk, device_dataframe):
users_generator = sdk.users.get_all()
users_list = []
for page in users_generator:
users_list.extend(page["users"])
users_dataframe = DataFrame.from_records(
users_list, columns=["username", "userUid"]
)
return device_dataframe.merge(users_dataframe, how="left", on="userUid")
def _add_storage_totals_to_dataframe(df, include_backup_usage):
df[["archiveCount", "totalStorageBytes"]] = df["backupUsage"].apply(
_break_backup_usage_into_total_storage
)
if not include_backup_usage:
df = df.drop("backupUsage", axis=1)
return df
def _break_backup_usage_into_total_storage(backup_usage):
total_storage = 0
archive_count = 0
for archive in backup_usage:
if archive["archiveFormat"] != "ARCHIVE_V2":
archive_count += 1
total_storage += archive["archiveBytes"]
return | Series([archive_count, total_storage]) | pandas.Series |
import os
from operator import itemgetter
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import warnings
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.tree import DecisionTreeRegressor
from sklearn import preprocessing
# from sklearn.preprocessing import Imputer
from pandas.plotting import scatter_matrix
from sklearn.preprocessing import RobustScaler, StandardScaler, LabelEncoder, MinMaxScaler, OneHotEncoder, \
LabelBinarizer
from sklearn.metrics import mean_squared_error, accuracy_score, mean_absolute_error
from sklearn.model_selection import KFold, cross_val_score
from sklearn.model_selection import cross_val_score, GridSearchCV, RandomizedSearchCV, KFold, cross_val_predict, \
StratifiedKFold, train_test_split, learning_curve, ShuffleSplit
from sklearn.model_selection import train_test_split
from sklearn import model_selection, preprocessing
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
from sklearn.model_selection import cross_val_score, GridSearchCV, RandomizedSearchCV, KFold, cross_val_predict, \
StratifiedKFold, train_test_split, learning_curve, ShuffleSplit
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.linear_model import SGDClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV, ShuffleSplit
from sklearn.metrics import confusion_matrix, precision_score, recall_score, accuracy_score, f1_score
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import confusion_matrix
from sklearn.metrics import precision_recall_curve, average_precision_score, auc
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from mlxtend.plotting import plot_learning_curves
from mlxtend.preprocessing import shuffle_arrays_unison
import keras
import keras.utils
from keras.utils import to_categorical
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.optimizers import SGD
from keras import regularizers,optimizers
print(os.getcwd())
print("Modules imported \n")
import os
def data_loader():
# Load MIMIC2 data
data = pd.read_csv('mimic3d.csv')
print("With id", data.shape)
data_full = data.drop('hadm_id', 1)
print("No id", data_full.shape)
print(data_full.shape)
data_full.head(10)
# Label = LOS
y = data_full['LOSgroupNum']
X = data_full.drop('LOSgroupNum', 1)
X = X.drop('LOSdays', 1)
X = X.drop('ExpiredHospital', 1)
X = X.drop('AdmitDiagnosis', 1)
X = X.drop('AdmitProcedure', 1)
X = X.drop('marital_status', 1)
X = X.drop('ethnicity', 1)
X = X.drop('religion', 1)
X = X.drop('insurance', 1)
print("y - Labels", y.shape)
print("X - No Label No id ", X.shape)
print(X.columns)
data_full.groupby('LOSgroupNum').size().plot.bar()
plt.show()
data_full.groupby('admit_type').size().plot.bar()
plt.show()
data_full.groupby('admit_location').size().plot.bar()
plt.show()
# MAP Text to Numerical Data
# Use one-hot-encoding to convert categorical features to numerical
print(X.shape)
categorical_columns = [
'gender',
'admit_type',
'admit_location'
]
for col in categorical_columns:
# if the original column is present replace it with a one-hot
if col in X.columns:
one_hot_encoded = pd.get_dummies(X[col])
X = X.drop(col, axis=1)
X = X.join(one_hot_encoded, lsuffix='_left', rsuffix='_right')
print(X.shape)
print(data_full.shape)
print(X.shape)
# XnotNorm = np.array(X.copy())
XnotNorm = X.copy()
print('XnotNorm ', XnotNorm.shape)
ynotNorm = y.copy()
print('ynotNorm ', ynotNorm.shape)
# Normalize X
x = XnotNorm.values # returns a numpy array
scaler = preprocessing.StandardScaler()
x_scaled = scaler.fit_transform(x)
XNorm = | pd.DataFrame(x_scaled, columns=XnotNorm.columns) | pandas.DataFrame |
"""
SIR 3S Logfile Utilities (short: Lx)
"""
__version__='192.168.3.11.dev1'
import os
import sys
import logging
logger = logging.getLogger(__name__)
import argparse
import unittest
import doctest
import nbformat
from nbconvert.preprocessors import ExecutePreprocessor
from nbconvert.preprocessors.execute import CellExecutionError
import timeit
import xml.etree.ElementTree as ET
import re
import struct
import collections
import zipfile
import py7zr
import pandas as pd
import h5py
import subprocess
import csv
import glob
import warnings
#warnings.simplefilter(action='ignore', category=PerformanceWarning)
# pd.set_option("max_rows", None)
# pd.set_option("max_columns", None)
# pd.reset_option('max_rows')
# ...
class LxError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def fTCCast(x):
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
v=x
try:
if x in ['true','True']:
v=1
elif x in ['false','False','']:
v=0
else:
try:
v = float(x)
except Exception as e:
#logStrTmp="{:s}{!s:s}: Konvertierung zu float schlaegt fehl! - Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,x,sys.exc_info()[-1].tb_lineno,type(e),str(e))
#logger.debug(logStrTmp)
try:
v = pd.to_numeric(x,errors='raise',downcast='float')
#logStrTmp="{:s}{!s:s}: Konvertierung mit pd.to_numeric liefert: {!s:s}".format(logStr,x,v)
#logger.debug(logStrTmp)
except Exception as e:
#logStrTmp="{:s}{!s:s}: Konvertierung zu float mit pd.to_numeric schlaegt auch fehl! - Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,x,sys.exc_info()[-1].tb_lineno,type(e),str(e))
#logger.debug(logStrTmp)
#x='2021-04-20 10:56:12.000'
#t = pd.Timestamp(x)
#t # Timestamp('2021-04-20 10:56:12')
#i=int(t.to_datetime64())/1000000000
#i # 1618916172.0
#pd.to_datetime(i,unit='s',errors='coerce'): Timestamp('2021-04-20 10:56:12')
try:
t = pd.Timestamp(x)
i=int(t.to_datetime64())/1000000000
v=pd.to_numeric(i,errors='raise',downcast='float')
except Exception as e:
logStrTmp="{:s}{!s:s}: Konvertierung zu float (mit pd.to_numeric) schlaegt (auch nach Annahme vaulue=Zeitstring) fehl! - Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,x,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.debug(logStrTmp)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return v
def getTCsOPCDerivative(TCsOPC,col,shiftSize,windowSize,fct=None):
"""
returns a df
index: ProcessTime
cols:
col
dt
dValue
dValueDt
dValueDtRollingMean
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
mDf=pd.DataFrame()
try:
s=TCsOPC[col].dropna()
mDf=pd.DataFrame(s)
dt=mDf.index.to_series().diff(periods=shiftSize)
mDf['dt']=dt
mDf['dValue']=mDf[col].diff(periods=shiftSize)
mDf=mDf.iloc[shiftSize:]
mDf['dValueDt']=mDf.apply(lambda row: row['dValue']/row['dt'].total_seconds(),axis=1)
if fct != None:
mDf['dValueDt']=mDf['dValueDt'].apply(fct)
mDf['dValueDtRollingMean']=mDf['dValueDt'].rolling(window=windowSize).mean()
mDf=mDf.iloc[windowSize-1:]
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return mDf
logFilenamePattern='([0-9]+)(_)+([0-9]+)(\.log)' # group(3) ist Postfix und Nr.
logFilenameHeadPattern='([0-9,_]+)(\.log)' # group(1) ist Head und H5-Key
# nicht alle IDs werden von RE pID erfasst
# diese werden mit pID2, getDfFromODIHelper und in getDfFromODI "nachbehandelt"
pID=re.compile('(?P<Prae>IMDI\.)?(?P<A>[a-z,A-Z,0-9,_]+)\.(?P<B>[a-z,A-Z,0-9,_]+)\.(?P<C1>[a-z,A-Z,0-9]+)_(?P<C2>[a-z,A-Z,0-9]+)_(?P<C3>[a-z,A-Z,0-9]+)_(?P<C4>[a-z,A-Z,0-9]+)_(?P<C5>[a-z,A-Z,0-9]+)(?P<C6>_[a-z,A-Z,0-9]+)?(?P<C7>_[a-z,A-Z,0-9]+)?\.(?P<D>[a-z,A-Z,0-9,_]+)\.(?P<E>[a-z,A-Z,0-9,_]+)(?P<Post>\.[a-z,A-Z,0-9,_]+)?')
pID2='(?P<Prae>IMDI\.)?(?P<A>[a-z,A-Z,0-9,_]+)(?P<Post>\.[a-z,A-Z,0-9,_]+)?'
def getDfFromODIHelper(row,col,colCheck,pID2=pID2):
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
if not pd.isnull(row[colCheck]):
res= row[col]
resStr='ColCheckOk'
elif pd.isnull(row[col]):
res=re.search(pID2,row['ID']).group(col)
if res != None:
resStr='ColNowOk'
else:
resStr='ColStillNotOk'
else:
res = row[col]
resStr='ColWasOk'
except:
res = row[col]
resStr='ERROR'
finally:
if resStr not in ['ColCheckOk','ColNowOk']:
logger.debug("{:s}col: {:s} resStr: {:s} row['ID']: {:s} res: {:s}".format(logStr,col, resStr,row['ID'],str(res)))
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return res
def getDfFromODI(ODIFile,pID=pID):
"""
returns a defined df from ODIFile
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfID=None
try:
df=pd.read_csv(ODIFile,delimiter=';')
s = pd.Series(df['ID'].unique())
dfID=s.str.extract(pID.pattern,expand=True)
dfID['ID']=s
dfC=dfID['C1']+'_'+dfID['C2']+'_'+dfID['C3']+'_'+dfID['C4']+'_'+dfID['C5']+'_'+dfID['C6']#+'_'+dfID['C7']
dfID.loc[:,'C']=dfC.values
dfID['C']=dfID.apply(lambda row: row['C']+'_'+row['C7'] if not pd.isnull(row['C7']) else row['C'],axis=1)
dfID=dfID[['ID','Prae','A','B','C','C1','C2','C3','C4','C5','C6','C7','D','E','Post']]
for col in ['Prae','Post','A']:
dfID[col]=dfID.apply(lambda row: getDfFromODIHelper(row,col,'A'),axis=1)
dfID.sort_values(by=['ID'], axis=0,ignore_index=True,inplace=True)
dfID.set_index('ID',verify_integrity=True,inplace=True)
dfID.loc['Objects.3S_XYZ_PUMPE.3S_XYZ_GSI_01.Out.EIN','Post']='.EIN'
dfID.loc['Objects.3S_XYZ_PUMPE.3S_XYZ_GSI_01.Out.EIN','A']='Objects'
dfID.loc['Objects.3S_XYZ_PUMPE.3S_XYZ_GSI_01.Out.EIN','B']='3S_XYZ_PUMPE'
dfID.loc['Objects.3S_XYZ_PUMPE.3S_XYZ_GSI_01.Out.EIN','C']='3S_XYZ_GSI_01'
dfID.loc['Objects.3S_XYZ_PUMPE.3S_XYZ_GSI_01.Out.EIN','D']='Out'
#dfID.loc['Objects.3S_XYZ_PUMPE.3S_XYZ_GSI_01.Out.EIN',:]
dfID.loc['Objects.3S_XYZ_RSCHIEBER.3S_XYZ_PCV_01.Out.SOLLW','Post']='.SOLLW'
dfID.loc['Objects.3S_XYZ_RSCHIEBER.3S_XYZ_PCV_01.Out.SOLLW','A']='Objects'
dfID.loc['Objects.3S_XYZ_RSCHIEBER.3S_XYZ_PCV_01.Out.SOLLW','B']='3S_XYZ_RSCHIEBER'
dfID.loc['Objects.3S_XYZ_RSCHIEBER.3S_XYZ_PCV_01.Out.SOLLW','C']='3S_XYZ_PCV_01'
dfID.loc['Objects.3S_XYZ_RSCHIEBER.3S_XYZ_PCV_01.Out.SOLLW','D']='Out'
#dfID.loc['Objects.3S_XYZ_RSCHIEBER.3S_XYZ_PCV_01.Out.SOLLW',:]
dfID['yUnit']=dfID.apply(lambda row: getDfFromODIHelperyUnit(row),axis=1)
dfID['yDesc']=dfID.apply(lambda row: getDfFromODIHelperyDesc(row),axis=1)
dfID=dfID[['yUnit','yDesc','Prae','A','B','C','C1','C2','C3','C4','C5','C6','C7','D','E','Post']]
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return dfID
def addInitvalueToDfFromODI(INITFile,dfID):
"""
returns dfID extended with new Cols Initvalue and NumOfInits
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfIDext=dfID
try:
df=pd.read_csv(INITFile,delimiter=';',header=None,names=['ID','Value'])#,index_col=0)
dfGrped=df.groupby(by=['ID'])['Value'].agg(['count','min','max','mean','last'])
dfIDext=dfID.merge(dfGrped,left_index=True,right_index=True,how='left').filter(items=dfID.columns.to_list()+['last','count']).rename(columns={'last':'Initvalue','count':'NumOfInits'})
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return dfIDext
def fODIMatch(dfODI,TYPE=None,OBJTYPE=None,NAME1=None,NAME2=None):
df=dfODI
if TYPE != None:
df=df[df['TYPE']==TYPE]
if OBJTYPE != None:
df=df[df['OBJTYPE']==OBJTYPE]
if NAME1 != None:
df=df[df['NAME1']==NAME1]
if NAME2 != None:
df=df[df['NAME2']==NAME2]
return df
def fODIFindAllSchieberSteuerungsIDs(dfODI,NAME1=None,NAME2=None): # dfODI: pd.read_csv(ODI,delimiter=';')
df=fODIMatch(dfODI,TYPE='OL_2',OBJTYPE='VENT',NAME1=NAME1,NAME2=NAME2)
return sorted(list(df['ID'].unique())+[ID for ID in df['REF_ID'].unique() if not pd.isnull(ID)])
def fODIFindAllZeilenWithIDs(dfODI,IDs):
return dfODI[dfODI['ID'].isin(IDs) | dfODI['REF_ID'].isin(IDs)]
def getDfFromODIHelperyUnit(row):
"""
returns Unit
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
unit=None
try:
if row['E'] in ['AL_S','SB_S']:
unit='[-]'
elif row['E'] in ['LR_AV','LP_AV','QD_AV','SD_AV','AM_AV','FZ_AV','MZ_AV','NG_AV']:
unit='[Nm³/h]'
elif row['E'] in ['AC_AV','LR_AV']:
unit='[mm/s²]'
else:
unit='TBD in Lx'
except:
unit='ERROR'
finally:
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return unit
def getDfFromODIHelperyDesc(row):
"""
returns Desc
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
desc=None
try:
if row['E'] in ['AL_S','SB_S']:
desc='Status'
elif row['E'] in ['LR_AV','LP_AV','QD_AV','SD_AV','AM_AV','FZ_AV','MZ_AV','NG_AV']:
desc='Fluss'
elif row['E'] in ['AC_AV','LR_AV']:
desc='Beschleunigung'
else:
desc='TBD in Lx'
except:
desc='ERROR'
finally:
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return desc
def getDfIDUniqueCols(dfID):
"""
returns df with uniques
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfIDUniqueCols=pd.DataFrame()
try:
# Spalte mit der groessten Anzahl von Auspraegungen feststellen
lenMax=0
colMax=''
# ueber alle Spalten
for idx,col in enumerate(dfID):
s=pd.Series(dfID[col].unique())
if len(s) > lenMax:
lenMax=len(s)
colMax=col
s=pd.Series(dfID[colMax].unique(),name=colMax)
s.sort_values(inplace=True)
s=pd.Series(s.values,name=colMax)
dfIDUniqueCols=pd.DataFrame(s)
# ueber alle weiteren Spalten
for idx,col in enumerate([col for col in dfID.columns if col != colMax]):
# s unique erzeugen
s=pd.Series(dfID[col].unique(),name=col)
# s sortieren
s.sort_values(inplace=True)
s=pd.Series(s.values,name=col)
dfIDUniqueCols=pd.concat([dfIDUniqueCols,s],axis=1)
dfIDUniqueCols=dfIDUniqueCols[dfID.columns]
except:
logger.error("{0:s}".format(logStr))
finally:
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return dfIDUniqueCols
def getIDsFromID(ID='Objects.3S_XYZ_SEG_INFO.3S_L_6_KED_39_EL1.In.AL_S',dfID=None,matchCols=['B','C1','C2','C3','C4','C5','D'],any=False):
"""
returns IDs matching ID
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
IDsMatching=[]
s=dfID.loc[ID,:]
for ID,row in dfID.iterrows():
match=True
for col in [col for col in row.index.values if col in matchCols]:
#if str(row[col])!=str(s[col]):
if row[col]!=s[col]:
match=False
break
else:
if any:
break
if match:
IDsMatching.append(ID)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
#except:
# logger.error("{0:s}".format(logStr))
finally:
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return sorted(IDsMatching)
def getLDSResVecDf(
ID # ResVec-Defining-Channel; i.e. for Segs Objects.3S_XYZ_SEG_INFO.3S_L_6_EL1_39_TUD.In.AL_S / i.e. for Drks Objects.3S_XYZ_DRUCK.3S_6_EL1_39_PTI_02_E.In.AL_S
,dfID
,TCsLDSResDf
,matchCols # i.e. ['B','C1','C2','C3','C4','C5','C6','D'] for Segs; i.e. ['B','C','D'] for Drks
):
"""
returns a df with LDSResChannels as columns (AL_S, ...); derived by Filtering columns from TCsLDSResDf and renaming them
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfResVec=pd.DataFrame()
try:
IDs=getIDsFromID(ID=ID,dfID=dfID,matchCols=matchCols)
dfFiltered=TCsLDSResDf.filter(items=IDs)
colDct={}
for col in dfFiltered.columns:
m=re.search(pID,col)
colDct[col]=m.group('E')
dfResVec=dfFiltered.rename(columns=colDct)
except:
logger.error("{0:s}".format(logStr))
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return dfResVec
def fGetFirstAndLastValidIdx(df):
"""
returns (tFirst,tLast)
"""
for idx,col in enumerate(df.columns):
tF=df[col].first_valid_index()
tL=df[col].last_valid_index()
if idx==0:
tFirst=tF
tLast=tL
else:
if tF < tFirst:
tFirst=tF
if tL > tLast:
tLast=tL
return (tFirst,tLast)
def fGetIDSets(
dfID
,divNr #'7'
,pipelineNrLst #['43','44']
,fctIn=None # Funktion von ID die Falsch heraus gibt, wenn ID (doch) nicht in Menge sein soll
):
# returns Dct: key: Bezeichner einer ID-Menge; value: zugeh. IDs
IDSets={}
IDs=[]
for ID in sorted(dfID.index.unique()):
m=re.search(pID,ID)
if m != None:
C1= m.group('C1')
C2= m.group('C2')
C3= m.group('C3')
C4= m.group('C4')
C5= m.group('C5')
if C1 in [divNr] and C3 in pipelineNrLst: # u.a. SEG ErgVecs
IDs.append(ID)
elif C2 in [divNr] and C4 in pipelineNrLst:
IDs.append(ID)
elif C3 in [divNr] and C5 in pipelineNrLst: # FT, PTI, etc.
IDs.append(ID)
if fctIn != None:
IDs=[ID for ID in IDs if fctIn(ID)]
IDSets['IDs']=IDs
IDsAlarm=[ID for ID in IDs if re.search(pID,ID).group('E') == 'AL_S']
IDSets['IDsAlarm']=IDsAlarm
IDsAlarmSEG=[ID for ID in IDsAlarm if re.search(pID,ID).group('C5') != 'PTI']
IDSets['IDsAlarmSEG']=IDsAlarmSEG
IDsAlarmDruck=[ID for ID in IDsAlarm if re.search(pID,ID).group('C5') == 'PTI']
IDSets['IDsAlarmDruck']=IDsAlarmDruck
IDsStat=[ID for ID in IDs if re.search(pID,ID).group('E') == 'STAT_S']
IDSets['IDsStat']=IDsStat
IDsStatSEG=[ID for ID in IDsStat if re.search(pID,ID).group('C5') != 'PTI']
IDSets['IDsStatSEG']=IDsStatSEG
IDsStatDruck=[ID for ID in IDsStat if re.search(pID,ID).group('C5') == 'PTI']
IDSets['IDsStatDruck']=IDsStatDruck
###
IDsSb=[ID for ID in IDs if re.search(pID,ID).group('E') == 'SB_S']
IDSets['IDsSb']=IDsSb
IDsSbSEG=[ID for ID in IDsSb if re.search(pID,ID).group('C5') != 'PTI']
IDSets['IDsSbSEG']=IDsSbSEG
IDsSbDruck=[ID for ID in IDsSb if re.search(pID,ID).group('C5') == 'PTI']
IDSets['IDsSbDruck']=IDsSbDruck
###
IDsZHK=[ID for ID in IDs if re.search(pID,ID).group('E') == 'ZHKNR_S']
IDSets['IDsZHK']=IDsZHK
IDsZHKSEG=[ID for ID in IDsZHK if re.search(pID,ID).group('C5') != 'PTI']
IDSets['IDsZHKSEG']=IDsZHKSEG
IDsZHKDruck=[ID for ID in IDsZHK if re.search(pID,ID).group('C5') == 'PTI']
IDSets['IDsZHKDruck']=IDsZHKDruck
IDsFT=[ID for ID in IDs if re.search(pID,ID).group('C4') == 'FT']
IDSets['IDsFT']=IDsFT
IDsPT=[ID for ID in IDs if re.search(pID,ID).group('C4') == 'PTI']
IDSets['IDsPT']=IDsPT
IDsPT_BCIND=[ID for ID in IDs if re.search(pID,ID).group('C5') == 'PTI' and re.search(pID,ID).group('E') == 'BCIND_S' ]
IDSets['IDsPT_BCIND']=IDsPT_BCIND
### Schieber
IDsZUST=[ID for ID in IDs if re.search(pID,ID).group('E') == 'ZUST']
IDsZUST=sorted(IDsZUST,key=lambda x: re.match(pID,x).group('C5'))
IDSets['IDsZUST']=IDsZUST
IDs_3S_XYZ_ESCHIEBER=[ID for ID in IDs if re.search(pID,ID).group('B') == '3S_FBG_ESCHIEBER']
IDs_3S_XYZ_ESCHIEBER=sorted(IDs_3S_XYZ_ESCHIEBER,key=lambda x: re.match(pID,x).group('C6'))
IDSets['IDs_3S_XYZ_ESCHIEBER']=IDs_3S_XYZ_ESCHIEBER
IDs_XYZ_ESCHIEBER=[ID for ID in IDs if re.search(pID,ID).group('B') == 'FBG_ESCHIEBER']
IDs_XYZ_ESCHIEBER=sorted(IDs_XYZ_ESCHIEBER,key=lambda x: re.match(pID,x).group('C5')) #
IDSets['IDs_XYZ_ESCHIEBER']=IDs_XYZ_ESCHIEBER
IDs_XYZ_ESCHIEBER_Ohne_ZUST=[ID for ID in IDs_XYZ_ESCHIEBER if re.search(pID,ID).group('E') != 'ZUST']
IDs_XYZ_ESCHIEBER_Ohne_ZUST=sorted(IDs_XYZ_ESCHIEBER_Ohne_ZUST,key=lambda x: re.match(pID,x).group('C5'))
IDSets['IDs_XYZ_ESCHIEBER_Ohne_ZUST']=IDs_XYZ_ESCHIEBER_Ohne_ZUST
IDsSchieberAlle=IDsZUST+IDs_XYZ_ESCHIEBER_Ohne_ZUST+IDs_3S_XYZ_ESCHIEBER
IDSets['IDsSchieberAlle']=IDsSchieberAlle
IDsSchieberAlleOhneLAEUFT=[ID for ID in IDsSchieberAlle if re.search('LAEUFT$',ID) == None]
IDsSchieberAlleOhneLAEUFT=[ID for ID in IDsSchieberAlleOhneLAEUFT if re.search('LAEUFT_NICHT$',ID) == None]
IDSets['IDsSchieberAlleOhneLAEUFT']=IDsSchieberAlleOhneLAEUFT
return IDSets
h5KeySep='/'
def fValueFct(x):
return pd.to_numeric(x,errors='ignore',downcast='float')
class AppLog():
"""
SIR 3S App Log (SQC Log)
Maintains a H5-File.
Existing H5-File will be deleted (if not initialized with h5File=...).
H5-Keys are:
* init
* lookUpDf
* lookUpDfZips (if initialized with zip7Files=...)
* Logfilenames praefixed by Log without extension
Attributes:
* h5File
* lookUpDf
zipName
logName
FirstTime (ScenTime - not #LogTime)
LastTime (ScenTime - mot #LogTime)
* lookUpDfZips
"""
TCsdfOPCFill=False # wenn Wahr, werden in TCsdfOPCFill die NULLen aufgefuellt; default: Falsch
@classmethod
def getTCsFromDf(cls,df,dfID=pd.DataFrame(),TCsdfOPCFill=TCsdfOPCFill):
"""
returns several TC-dfs from df
Verarbeitung von dfs gemaess extractTCsToH5s; siehe dort
Args:
* df: a df with Log-Data
* columns: ['ID','ProcessTime','ScenTime','SubSystem','Value','Direction']
* dfID
* index: ID
* erf. nur, wenn IDs nach Res1 und Res2 aufgeteilt werden sollen
* TCsdfOPCFill: if True (default): fill NaNs in this df
Time curve dfs: cols:
* Time (TCsdfOPC: ProcessTime, other: ScenTime)
* ID
* Value
Time curve dfs:
* TCsdfOPC
* TCsSirCalc
* TCsLDSIn
* TCsLDSRes (dfID empty) or TCsLDSRes1, TCsLDSRes2
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
TCsdfOPC=pd.DataFrame()
TCsdfSirCalc=pd.DataFrame()
TCsdfLDSIn=pd.DataFrame()
if not dfID.empty:
TCsdfLDSRes1=pd.DataFrame()
TCsdfLDSRes2=pd.DataFrame()
else:
TCsdfLDSRes=pd.DataFrame()
if not dfID.empty:
df=df.merge(dfID,how='left',left_on='ID',right_index=True,suffixes=('','_r'))
logger.debug("{0:s}{1:s}".format(logStr,'TCsdfOPC ...'))
TCsdfOPC=df[(df['SubSystem'].str.contains('^OPC'))
### & ~(df['Value'].isnull()) # ueberfluessig, wenn df dies bereits erfuellt
][['ProcessTime','ID','Value']].pivot_table(index='ProcessTime', columns='ID', values='Value',aggfunc='last')
if TCsdfOPCFill:
for col in TCsdfOPC.columns:
TCsdfOPC[col]=TCsdfOPC[col].fillna(method='ffill')
TCsdfOPC[col]=TCsdfOPC[col].fillna(method='bfill')
logger.debug("{0:s}{1:s}".format(logStr,'TCsdfSirCalc ...'))
TCsdfSirCalc=df[(df['SubSystem'].str.contains('^SirCalc')) | (df['SubSystem'].str.contains('^RTTM')) ][['ScenTime','ID','Value']].pivot_table(index='ScenTime', columns='ID', values='Value',aggfunc='last')
logger.debug("{0:s}{1:s}".format(logStr,'TCsdfLDSIn ...'))
TCsdfLDSIn=df[(df['SubSystem'].str.contains('^LDS')) & (df['Direction'].str.contains('^<-'))][['ScenTime','ID','Value']].pivot_table(index='ScenTime', columns='ID', values='Value',aggfunc='last')
if not dfID.empty:
logger.debug("{0:s}{1:s}".format(logStr,'TCsdfLDSRes1 ...'))
TCsdfLDSRes1=df[(df['SubSystem'].str.contains('^LDS')) & (df['Direction'].str.contains('^->')) & (df['B'].str.contains('^3S_FBG_SEG_INFO'))][['ScenTime','ID','Value']].pivot_table(index='ScenTime', columns='ID', values='Value',aggfunc='last')
logger.debug("{0:s}{1:s}".format(logStr,'TCsdfLDSRes2 ...'))
TCsdfLDSRes2=df[(df['SubSystem'].str.contains('^LDS')) & (df['Direction'].str.contains('^->')) & (df['B'].str.contains('^3S_FBG_DRUCK'))][['ScenTime','ID','Value']].pivot_table(index='ScenTime', columns='ID', values='Value',aggfunc='last')
else:
logger.debug("{0:s}{1:s}".format(logStr,'TCsdfLDSRes ...'))
TCsdfLDSRes=df[(df['SubSystem'].str.contains('^LDS')) & (df['Direction'].str.contains('^->'))][['ScenTime','ID','Value']].pivot_table(index='ScenTime', columns='ID', values='Value',aggfunc='last')
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
if not dfID.empty:
return TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn,TCsdfLDSRes1,TCsdfLDSRes2
else:
return TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn,TCsdfLDSRes
def __init__(self,logFile=None,zip7File=None,h5File=None,h5FileName=None,readWithDictReader=False,nRows=None,readWindowsLog=False):
"""
(re-)initialize
logFile:
wird gelesen und in H5 abgelegt
addZip7File(zip7File) liest alle Logs eines zipFiles und legt diese in H5 ab
zipFile:
1. logFile wird gelesen und in H5 abgelegt
addZip7File(zip7File) liest alle Logs eines zipFiles und legt diese in H5 ab
die Initialisierung mit zipFile ist identisch mit der Initialisierung mit logFile wenn logFile das 1. logFile des Zips ist
nach addZip7File(zip7File) - ggf. mehrfach fuer mehrere Zips:
koennen Daten mit self.get(...) gelesen werden (liefert 1 df)
koennen Daten mit self.getTCs(...) gelesen werden (liefert mehrere dfs in TC-Form)
koennen Daten mit self.getTCsSpecified(...) gelesen werden (liefert 1 df in TC-Form)
koennen Daten in TC-Form mit self.extractTCsToH5s(...) in separate H5s gelesen werden
mit self.getTCsFromH5s(...) koennen die TCs wieder gelesen werden
=== addZip7File(zip7File) - ggf. mehrfach - und extractTCsToH5s(...) sind Bestandteil einer 7Zip-Verarbeitung vor der eigentlichen Analyse ===
h5File:
die lookUp-Dfs vom H5-File werden gelesen
die zum H5-File zugehoerigen TC-H5-Filenamen werden belegt
die TC-H5-Files werden nicht auf Existenz geprüft oder gar gelesen
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
self.lookUpDf=pd.DataFrame()
self.lookUpDfZips=pd.DataFrame()
try:
if logFile != None and zip7File != None and h5File != None:
logger.debug("{0:s}{1:s}".format(logStr,'3 Files (logFile and zip7File and h5File) specified.'))
elif logFile != None and zip7File != None:
logger.debug("{0:s}{1:s}".format(logStr,'2 Files (logFile and zip7File) specified.'))
elif logFile != None and h5File != None:
logger.debug("{0:s}{1:s}".format(logStr,'2 Files (logFile and h5File) specified.'))
elif h5File != None and zip7File != None:
logger.debug("{0:s}{1:s}".format(logStr,'2 Files (h5File and zip7File) specified.'))
elif logFile != None:
self.__initlogFile(logFile,h5FileName=h5FileName,readWithDictReader=readWithDictReader,readWindowsLog=readWindowsLog)
elif zip7File != None:
self.__initzip7File(zip7File,h5FileName=h5FileName,readWithDictReader=readWithDictReader,readWindowsLog=readWindowsLog)
elif h5File != None:
self.__initWithH5File(h5File)
else:
logger.debug("{0:s}{1:s}".format(logStr,'No File (logFile XOR zip7File XOR h5File) specified.'))
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
def __initlogFile(self,logFile,h5FileName=None,readWithDictReader=False,readWindowsLog=False):
"""
(re-)initialize with logFile
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
# wenn logFile nicht existiert ...
if not os.path.exists(logFile):
logger.debug("{0:s}logFile {1:s} not existing.".format(logStr,logFile))
else:
df = self.__processALogFile(logFile=logFile,readWithDictReader=readWithDictReader,readWindowsLog=readWindowsLog)
self.__initH5File(logFile,df,h5FileName=h5FileName)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
def __initH5File(self,h5File,df,h5FileName=None):
"""
creates self.h5File and writes 'init'-Key Logfile df to it
Args:
* h5File: name of logFile or zip7File; the Dir is the Dir of the H5-File
* df
* h5FileName: the H5-FileName without Dir and Extension; if None (default), "Log ab ..." is used
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
(h5FileHead,h5FileTail)=os.path.split(h5File)
# H5-File
if h5FileName==None:
h5FileTail="Log ab {0:s}.h5".format(str(df['#LogTime'].min())).replace(':',' ').replace('-',' ')
else:
h5FileTail=h5FileName+'.h5'
self.h5File=os.path.join(h5FileHead,h5FileTail)
# wenn H5 existiert wird es geloescht
if os.path.exists(self.h5File):
os.remove(self.h5File)
logger.debug("{0:s}Existing H5-File {1:s} deleted.".format(logStr,h5FileTail))
# init-Logfile schreiben
self.__toH5('init',df)
logger.debug("{0:s}'init'-Key Logfile done.".format(logStr))
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
def __initWithH5File(self,h5File,useRawHdfAPI=False):
"""
self.h5File=h5File
self.lookUpDf
self.lookUpDfZips
die lookUp-Dfs werden gelesen vom H5-File
die zum H5-File zugehoerigen TC-H5-Filenamen werden belegt, wenn diese H5-Files existieren
die TC-H5-Files werden nicht gelesen
der zum H5-File zugehoerige CVD-Filename wird belegt, wenn das H5-File existiert
das H5-File wird nicht gelesen
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
# H5 existiert
if os.path.exists(h5File):
self.h5File=h5File
# Keys available
with pd.HDFStore(self.h5File) as h5Store:
h5Keys=sorted(h5Store.keys())
logger.debug("{0:s}h5Keys available: {1:s}".format(logStr,str(h5Keys)))
h5KeysStripped=[item.replace(h5KeySep,'') for item in h5Keys]
if useRawHdfAPI:
with pd.HDFStore(self.h5File) as h5Store:
if 'lookUpDf' in h5KeysStripped:
self.lookUpDf=h5Store['lookUpDf']
if 'lookUpDfZips' in h5KeysStripped:
self.lookUpDfZips=h5Store['lookUpDfZips']
else:
if 'lookUpDf' in h5KeysStripped:
self.lookUpDf=pd.read_hdf(self.h5File, key='lookUpDf')
if 'lookUpDfZips' in h5KeysStripped:
self.lookUpDfZips=pd.read_hdf(self.h5File, key='lookUpDfZips')
else:
logStrFinal="{0:s}h5File {1:s} not existing.".format(logStr,h5File)
logger.debug(logStrFinal)
raise LxError(logStrFinal)
#TC-H5s
(name,ext)=os.path.splitext(self.h5File)
TCPost='_TC'
h5FileOPC=name+TCPost+'OPC'+ext
h5FileSirCalc=name+TCPost+'SirCalc'+ext
h5FileLDSIn=name+TCPost+'LDSIn'+ext
h5FileLDSRes1=name+TCPost+'LDSRes1'+ext
h5FileLDSRes2=name+TCPost+'LDSRes2'+ext
h5FileLDSRes=name+TCPost+'LDSRes'+ext
if os.path.exists(h5FileOPC):
self.h5FileOPC=h5FileOPC
logger.debug("{0:s}Existing H5-File {1:s}.".format(logStr,self.h5FileOPC))
if os.path.exists(h5FileSirCalc):
self.h5FileSirCalc=h5FileSirCalc
logger.debug("{0:s}Existing H5-File {1:s}.".format(logStr,self.h5FileSirCalc))
if os.path.exists(h5FileLDSIn):
self.h5FileLDSIn=h5FileLDSIn
logger.debug("{0:s}Existing H5-File {1:s}.".format(logStr,self.h5FileLDSIn))
if os.path.exists(h5FileLDSRes):
self.h5FileLDSRes=h5FileLDSRes
logger.debug("{0:s}Existing H5-File {1:s}.".format(logStr,self.h5FileLDSRes))
if os.path.exists(h5FileLDSRes1):
self.h5FileLDSRes1=h5FileLDSRes1
logger.debug("{0:s}Existing H5-File {1:s}.".format(logStr,self.h5FileLDSRes1))
if os.path.exists(h5FileLDSRes2):
self.h5FileLDSRes2=h5FileLDSRes2
logger.debug("{0:s}Existing H5-File {1:s}.".format(logStr,self.h5FileLDSRes2))
h5FileCVD=name+'_'+'CVD'+ext
if os.path.exists(h5FileCVD):
self.h5FileCVD=h5FileCVD
logger.debug("{0:s}Existing H5-File {1:s}.".format(logStr,self.h5FileCVD))
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
def getInitDf(self,useRawHdfAPI=False):
"""
returns InitDf from H5-File
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
df=pd.DataFrame()
# H5 existiert
if os.path.exists(self.h5File):
# Keys available
with pd.HDFStore(self.h5File) as h5Store:
h5Keys=sorted(h5Store.keys())
logger.debug("{0:s}h5Keys available: {1:s}".format(logStr,str(h5Keys)))
h5KeysStripped=[item.replace(h5KeySep,'') for item in h5Keys]
if useRawHdfAPI:
with pd.HDFStore(self.h5File) as h5Store:
if 'init' in h5KeysStripped:
df=h5Store['init']
else:
if 'init' in h5KeysStripped:
df=pd.read_hdf(self.h5File, key='init')
else:
logStrFinal="{0:s}h5File {1:s} not existing.".format(logStr,h5File)
logger.debug(logStrFinal)
raise LxError(logStrFinal)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return df
def __initzip7File(self,zip7File,h5FileName=None,nRows=None,readWithDictReader=False,readWindowsLog=False):
"""
(re-)initialize with zip7File
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
# wenn zip7File nicht existiert ...
if not os.path.exists(zip7File):
logStrFinal="{0:s}zip7File {1:s} not existing.".format(logStr,zip7File)
logger.debug(logStrFinal)
raise LxError(logStrFinal)
else:
(zip7FileHead, zip7FileTail)=os.path.split(zip7File)
zipFileDirname=os.path.dirname(zip7File)
logger.debug("{0:s}zipFileDirname: {1:s}".format(logStr,zipFileDirname))
aDfRead=False
with py7zr.SevenZipFile(zip7File, 'r') as zip7FileObj:
allLogFiles = zip7FileObj.getnames()
logger.debug("{0:s}{1:s}: len(getnames()): {2:d}.".format(logStr,zip7FileTail,len(allLogFiles)))
logger.debug("{0:s}getnames(): {1:s}.".format(logStr,str(allLogFiles)))
extDirLstTBDeleted=[]
extDirLstExistingLogged=[]
for idx,logFileNameInZip in enumerate(allLogFiles):
logger.debug("{0:s}idx: {1:d} logFileNameInZip: {2:s}".format(logStr,idx,logFileNameInZip))
# die Datei die 7Zip bei extract erzeugen wird
logFile=os.path.join(zipFileDirname,logFileNameInZip)
(logFileHead, logFileTail)=os.path.split(logFile) # logFileHead == dirname()
logger.debug("{0:s}idx: {1:d} logFileHead: {2:s} logFileTail: {3:s}".format(logStr,idx,logFileHead,logFileTail))
(name, ext)=os.path.splitext(logFile)
logger.debug("{0:s}idx: {1:d} name: {2:s} ext: {3:s}".format(logStr,idx,name,ext))
if logFileHead!='': # logFileHead == dirname()
if os.path.exists(logFileHead) and logFileHead not in extDirLstExistingLogged:
logger.debug("{0:s}idx: {1:d} Verz. logFileHead: {2:s} existiert bereits.".format(logStr,idx,logFileHead))
extDirLstExistingLogged.append(logFileHead)
elif not os.path.exists(logFileHead):
logger.debug("{0:s}idx: {1:d} Verz. logFileHead: {2:s} existiert noch nicht.".format(logStr,idx,logFileHead))
extDirLstTBDeleted.append(logFileHead)
# kein Logfile zu prozessieren ...
if ext == '':
continue
# Logfile prozessieren ...
if os.path.exists(logFile):
isFile = os.path.isfile(logFile)
if isFile:
logger.debug("{0:s}idx: {1:d} Log: {2:s} existiert bereits. Wird durch Extrakt ueberschrieben werden.".format(logStr,idx,logFileTail))
logFileTBDeleted=False
else:
logFileTBDeleted=False
else:
logger.debug("{0:s}idx: {1:d} Log: {2:s} existiert nicht. Wird extrahiert, dann prozessiert und dann wieder geloescht.".format(logStr,idx,logFileTail))
logFileTBDeleted=True
# extrahieren
zip7FileObj.extract(path=zipFileDirname,targets=logFileNameInZip)
if os.path.exists(logFile):
pass
else:
logger.warning("{0:s}idx: {1:d} Log: {2:s} NOT extracted?! Continue with next Name in 7Zip.".format(logStr,idx,logFileTail))
# nichts zu prozessieren ...
continue
# ...
if os.path.isfile(logFile):
df = self.__processALogFile(logFile=logFile,nRows=nRows,readWithDictReader=readWithDictReader,readWindowsLog=readWindowsLog)
if df is None:
logger.warning("{0:s}idx: {1:d} Log: {2:s} NOT processed?! Continue with next Name in 7Zip.".format(logStr,idx,logFileTail))
# nichts zu prozessieren ...
continue
else:
aDfRead=True
# ...
# gleich wieder loeschen
if os.path.exists(logFile) and logFileTBDeleted:
if os.path.isfile(logFile):
os.remove(logFile)
logger.debug("{0:s}idx: {1:d} Log: {2:s} wieder geloescht.".format(logStr,idx,logFileTail))
# wir wollen nur das 1. File lesen ...
if aDfRead:
break;
for dirName in extDirLstTBDeleted:
if os.path.exists(dirName):
if os.path.isdir(dirName):
(dirNameHead, dirNameTail)=os.path.split(dirName)
if len(os.listdir(dirName)) == 0:
os.rmdir(dirName)
logger.debug("{0:s}dirName: {1:s} existierte nicht und wurde wieder geloescht.".format(logStr,dirNameTail))
else:
logger.info("{0:s}dirName: {1:s} existiert mit nicht leerem Inhalt?!".format(logStr,dirNameTail))
self.__initH5File(zip7File,df,h5FileName=h5FileName)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
def __toH5(self,key,df,useRawHdfAPI=False,updLookUpDf=False,logName='',zipName='',noDfStorage=False):
"""
write df with key to H5-File (if not noDfStorage)
Args:
* updLookUpDf: if True, self.lookUpDf is updated with
* zipName (the Zip of logFile)
* logName (the name of the logFile i.e. 20201113_0000004.log)
* FirstTime (the first ScenTime in df)
* LastTime (the last ScenTime in df)
self.lookUpDf is not wriiten to H5
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
(h5FileHead,h5FileTail)=os.path.split(self.h5File)
if not noDfStorage:
if useRawHdfAPI:
with pd.HDFStore(self.h5File) as h5Store:
try:
h5Store.put(key,df)
except Exception as e:
logger.error("{0:s}Writing df with h5Key={1:s} to {2:s} FAILED!".format(logStr,key,h5FileTail))
raise e
else:
df.to_hdf(self.h5File, key=key)
logger.debug("{0:s}Writing df with h5Key={1:s} to {2:s} done.".format(logStr,key,h5FileTail))
if updLookUpDf:
s=df['ScenTime']#['#LogTime']
FirstTime=s.iloc[0]
LastTime=s.iloc[-1]
if self.lookUpDf.empty:
data={ 'zipName': [zipName]
,'logName': [logName]
,'FirstTime' : [FirstTime]
,'LastTime' : [LastTime]
}
self.lookUpDf = pd.DataFrame (data, columns = ['zipName','logName','FirstTime','LastTime'])
self.lookUpDf['zipName']=self.lookUpDf['zipName'].astype(str)
self.lookUpDf['logName']=self.lookUpDf['logName'].astype(str)
else:
data={ 'zipName': zipName
,'logName': logName
,'FirstTime' : FirstTime
,'LastTime' : LastTime
}
self.lookUpDf=self.lookUpDf.append(data,ignore_index=True)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
def __processALogFile(self,logFile=None,delimiter='\t',nRows=None,readWithDictReader=False,fValueFct=fValueFct,readWindowsLog=False):
"""
process logFile
Args:
* logFile: logFile to be processed
* nRows: number of logFile rows to be processed; default: None (:= all rows are processed); if readWithDictReader: last row is also processed
* readWithDictReader: if True, csv.DictReader is used; default: None (:= pd.read_csv is used)
Returns:
* df: logFile processed to df
* converted:
* #LogTime: to datetime
* ProcessTime: to datetime
* Value: to float64
* ID,Direction,SubSystem,LogLevel,State,Remark: to str
* new:
* ScenTime datetime
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
df=None
try:
with open(logFile,'r') as f:
pass
(logFileHead,logFileTail)=os.path.split(logFile)
if readWithDictReader:
restkey='+'
with open(logFile,"r") as csvFile: # 1. Zeile enthaelt die Ueberschrift
reader = csv.DictReader(csvFile,delimiter=delimiter,restkey=restkey)
logger.debug("{0:s}{1:s} csv.DictReader reader processed.".format(logStr,logFileTail))
# If a row has more fields than fieldnames, the remaining data is put in a list and stored with the fieldname specified by restkey.
colNames=reader.fieldnames
dcts = [dct for dct in reader] # alle Zeilen lesen
logger.debug("{0:s}{1:s} csv.DictReader-Ergebnis processed.".format(logStr,logFileTail))
if nRows!=None:
dcts=dcts[0:nRows]+[dcts[-1]]
# nur die Spaltennamen werden als row-Spalten erzeugt
rows = [[dct[colName] for colName in colNames] for dct in dcts]
logger.debug("{0:s}{1:s} rows processed.".format(logStr,logFileTail))
# die "ueberfluessigen" Spalten an die letzte Spalte dranhaengen
for i, dct in enumerate(dcts):
if restkey in dct:
restValue=dct[restkey]
restValueStr = delimiter.join(restValue)
newValue=rows[i][-1]+delimiter+restValueStr
#logger.debug("{0:s}{1:s} restValueStr: {2:s} - Zeile {3:10d}: {4:s} - neuer Wert letzte Spalte: {5:s}.".format(logStr,logFileTail,restValueStr,i,str(rows[i]),newValue))
rows[i][-1]=rows[i][-1]+newValue
logger.debug("{0:s}{1:s} restkey processed.".format(logStr,logFileTail))
index=range(len(rows))
df = pd.DataFrame(rows,columns=colNames,index=index)
else:
if nRows==None:
df=pd.read_csv(logFile,delimiter=delimiter,error_bad_lines=False,warn_bad_lines=True,low_memory=False)
else:
df=pd.read_csv(logFile,delimiter=delimiter,error_bad_lines=False,warn_bad_lines=True,low_memory=False,nrows=nRows)
logger.debug("{0:s}{1:s} pd.DataFrame processed.".format(logStr,logFileTail))
#logger.debug("{0:s}df: {1:s}".format(logStr,str(df)))
#LogTime
df['#LogTime']=pd.to_datetime(df['#LogTime'],unit='ms',errors='coerce') # NaT
#ProcessTime
df['ProcessTime']=pd.to_datetime(df['ProcessTime'],unit='ms',errors='coerce') # NaT
logger.debug("{0:s}{1:s} col ProcessTime processed.".format(logStr,logFileTail))
#Value
df['Value']=df.Value.str.replace(',', '.') # Exception: Line: 1137: <class 'AttributeError'>: Can only use .str accessor with string values!
df['Value']=fValueFct(df['Value'].values) # df['ValueProcessed'].apply(fValueFct)
logger.debug("{0:s}{1:s} col Value processed.".format(logStr,logFileTail))
#Strings
for col in ['ID','Direction','SubSystem','LogLevel','State','Remark']:
df[col]=df[col].astype(str)
logger.debug("{0:s}{1:s} String-cols processed.".format(logStr,logFileTail))
#1618249551621 STD CVD 1615442324000 p-p BEGIN_OF_NEW_CONTROL_VOLUME 6-10-SV1-RB~6-10-BID-RB NULL NULL # String in beiden Faellen (Linux und Windows) gleich?
#1618249551621 STD CVD <- 156 CV_ID
##ScenTime
## SubSystem Direction ProcessTime ID Value State Remark
## Linux ---
## 1615029280000 INF SQC Starting cycle for 2021-03-06 12:14:38.000
## 1615029280000 STD LDS MCL 1615029278000 Main cycle loop 06.03.2021 12:14:38.000 (ScenTime: Tag und Zeit in Klartext; Spalte ProcessTime ScenTime!)
## Windows ---
## 1618256150711 STD SQC 1615457121000 Main cycle loop 11:05:21.000 (ScenTime-Zeit in Klartext; Spalte ProcessTime ScenTime!)
dfScenTime=df[df['ID']=='Main cycle loop'][['ProcessTime']]
dfScenTime.rename(columns={'ProcessTime':'ScenTime'},inplace=True)
df=df.join(dfScenTime)
df['ScenTime']=df['ScenTime'].fillna(method='ffill')
df['ScenTime']=df['ScenTime'].fillna(method='bfill')
if df['ScenTime'].isnull().values.all():
logger.debug("{0:s}Keine Zeile mit ID=='Main cycle loop' gefunden. ScenTime zu #LogTime gesetzt.".format(logStr))
df['ScenTime']=df['#LogTime'] # wenn keine Zeile mit ID=='Main cycle loop' gefunden wurde, wird ScenTime zu #LogTime gesetzt
# finalisieren
df=df[['#LogTime','LogLevel','SubSystem','Direction','ProcessTime','ID','Value','ScenTime','State','Remark']]
logger.debug("{0:s}{1:s} processed with nRows: {2:s} (None if all).".format(logStr,logFileTail,str(nRows)))
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return df
def rebuildLookUpDfZips(self,zip7Files,readWithDictReader=True,readWindowsLog=False):
"""
(re-)initialize with zip7Files
only persistent outcome is lookUpDfZips (Attribute and H5-Persistence)
lookUpdf is changed but not H5-stored
(Re-)Init with AppLog(h5File=...) after using rebuildLookUpDfZips to obtain old lookUpdf
main Usage of rebuildLookUpDfZips is to determine which zip7Files to add by i.e.:
zip7FilesToAdd=lx.lookUpDfZips[~(lx.lookUpDfZips['LastTime']<timeStartAusschnitt) & ~(lx.lookUpDfZips['FirstTime']>timeEndAusschnitt)].index.to_list()
"""
#noDfStorage=False
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
#self.__initzip7File(zip7File=zip7Files[0],h5FileName=h5FileName,nRows=1,readWithDictReader=True)
for zip7File in zip7Files:
logger.info("{0:s}addZip7File: {1:s}".format(logStr,zip7File))
self.addZip7File(zip7File,firstsAndLastsLogsOnly=True,nRows=1,readWithDictReader=readWithDictReader,noDfStorage=True,readWindowsLog=readWindowsLog)
logger.debug("{0:s}lookUpDf: {1:s}".format(logStr,self.lookUpDf.to_string()))
df=self.lookUpDf.groupby(by='zipName').agg(['min', 'max'])
logger.debug("{0:s}df: {1:s}".format(logStr,df.to_string()))
minTime=df.loc[:,('FirstTime','min')]
maxTime=df.loc[:,('LastTime','max')]
minFileNr=df.loc[:,('logName','min')].apply(lambda x: int(re.search(logFilenamePattern,x).group(3)))
maxFileNr=df.loc[:,('logName','max')].apply(lambda x: int(re.search(logFilenamePattern,x).group(3)))
s=(maxTime-minTime)/(maxFileNr-minFileNr)
lookUpDfZips=s.to_frame().rename(columns={0:'TimespanPerLog'})
lookUpDfZips['NumOfFiles']=maxFileNr-minFileNr
lookUpDfZips['FirstTime']=minTime
lookUpDfZips['LastTime']=maxTime
lookUpDfZips['minFileNr']=minFileNr
lookUpDfZips['maxFileNr']=maxFileNr
lookUpDfZips=lookUpDfZips[['FirstTime','LastTime','TimespanPerLog','NumOfFiles','minFileNr','maxFileNr']]
# lookUpDfZips schreiben
self.lookUpDfZips=lookUpDfZips
self.__toH5('lookUpDfZips',self.lookUpDfZips)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
def addZip7File(self,zip7File,firstsAndLastsLogsOnly=False,nRows=None,readWithDictReader=False,noDfStorage=False,readWindowsLog=False):
"""
add zip7File
Args:
* zipFile: zipFile which LogFiles shall be added
* Args for internal Usage:
* firstsAndLastsLogsOnly (True dann)
* nRows (1 dann)
* readWithDictReader (True dann)
d.h. es werden nur die ersten und letzten Logs pro Zip angelesen und dort auch nur die 1. und letzte Zeile und das mit DictReader
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
# wenn zip7File nicht existiert ...
if not os.path.exists(zip7File):
logStrFinal="{0:s}zip7File {1:s} not existing.".format(logStr,zip7File)
logger.debug(logStrFinal)
raise LxError(logStrFinal)
else:
(zip7FileHead, zip7FileTail)=os.path.split(zip7File)
logger.debug("{0:s}zip7FileHead (leer wenn zip7 im selben Verz.): {1:s} zip7FileTail: {2:s}.".format(logStr,zip7FileHead,zip7FileTail))
logger.info("{0:s}zip7File: {1:s} ...".format(logStr,zip7File))
tmpDir=os.path.dirname(zip7File)
tmpDirContent=glob.glob(tmpDir)
with py7zr.SevenZipFile(zip7File, 'r') as zip7FileObj:
allLogFiles = zip7FileObj.getnames()
allLogFilesLen=len(allLogFiles)
logger.debug("{0:s}{1:s}: len(getnames()): {2:d}.".format(logStr,zip7FileTail,allLogFilesLen))
extDirLstTBDeleted=[]
extDirLstExistingLogged=[]
for idx,logFileNameInZip in enumerate(allLogFiles):
if firstsAndLastsLogsOnly:
if idx not in [0,1,allLogFilesLen-2,allLogFilesLen-1]:
#logger.debug("{0:s}idx: {1:d} item: {2:s} NOT processed ...".format(logStr,idx,logFileNameInZip))
continue
logger.info("{0:s}idx: {1:d} item: {2:s} ...".format(logStr,idx,logFileNameInZip))
# die Datei die 7Zip bei extract erzeugen wird
logFile=os.path.join(tmpDir,logFileNameInZip)
(logFileHead, logFileTail)=os.path.split(logFile)
# evtl. bezeichnet logFileNameInZip keine Datei sondern ein Verzeichnis
(name, ext)=os.path.splitext(logFileNameInZip)
if ext == '':
# Verzeichnis!
extDir=os.path.join(tmpDir,logFileNameInZip)
(extDirHead, extDirTail)=os.path.split(extDir)
if os.path.exists(extDir) and extDir in tmpDirContent:
logger.debug("{0:s}idx: {1:d} extDir: {2:s} existiert(e) bereits.".format(logStr,idx,extDirTail))
extDirLstExistingLogged.append(extDir)
elif os.path.exists(extDir) and extDir not in tmpDirContent:
logger.debug("{0:s}idx: {1:d} extDir: {2:s} existiert(e) noch nicht.".format(logStr,idx,extDirTail))
extDirLstTBDeleted.append(extDir)
elif not os.path.exists(extDir) and extDir not in tmpDirContent:
logger.debug("{0:s}idx: {1:d} extDir: {2:s} existiert(e) noch nicht.".format(logStr,idx,extDirTail))
extDirLstTBDeleted.append(extDir)
# kein Logfile zu prozessieren ...
continue
# logFileNameInZip bezeichnet eine Datei
if os.path.exists(logFile):
isFile = os.path.isfile(logFile)
if isFile:
logger.debug("{0:s}idx: {1:d} Log: {2:s} existiert bereits. Wird durch Extrakt ueberschrieben werden.".format(logStr,idx,logFileTail))
logFileTBDeleted=False
else:
logFileTBDeleted=False
else:
logger.debug("{0:s}idx: {1:d} Log: {2:s} existiert nicht. Wird extrahiert, dann prozessiert und dann wieder geloescht.".format(logStr,idx,logFileTail))
logFileTBDeleted=True
# extrahieren
logger.debug("{0:s}Log: {1:s} wird extrahiert ... ".format(logStr,logFileTail))
import lzma
try:
zip7FileObj.extract(path=tmpDir,targets=logFileNameInZip)
except lzma.LZMAError:
logger.warning("{0:s}Log: {1:s} nicht erfolgreich extrahiert - continue ... ".format(logStr,logFileTail))
continue
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
logger.debug("{0:s}Log: {1:s} wurde extrahiert. ".format(logStr,logFileTail))
if os.path.exists(logFile):
pass
else:
logger.warning("{0:s}idx: {1:d} Log: {2:s} NOT extracted?! Continue with next Name in 7Zip.".format(logStr,idx,logFileTail))
# nichts zu prozessieren ...
continue
# ...
if os.path.isfile(logFile):
df = self.__processALogFile(logFile=logFile,nRows=nRows,readWithDictReader=readWithDictReader,readWindowsLog=readWindowsLog)
if df is None:
logger.warning("{0:s}idx: {1:d} Log: {2:s} NOT processed?! Continue with next Name in 7Zip.".format(logStr,idx,logFileTail))
# nichts zu prozessieren ...
continue
# ...
# gleich wieder loeschen
if os.path.exists(logFile) and logFileTBDeleted:
if os.path.isfile(logFile):
os.remove(logFile)
logger.debug("{0:s}idx: {1:d} Log: {2:s} wieder geloescht.".format(logStr,idx,logFileTail))
# ...
(name, ext)=os.path.splitext(logFileTail)
key='Log'+name
if zip7FileHead != '':
zipName=os.path.join(os.path.relpath(zip7FileHead),zip7FileTail)
else:
zipName=zip7FileTail
# df schreiben
self.__toH5(key,df,updLookUpDf=True,logName=logFileTail,zipName=zipName,noDfStorage=noDfStorage)#os.path.join(os.path.relpath(zip7FileHead),zip7FileTail))
# danach gleich lookUpDf schreiben ...
self.__toH5('lookUpDf',self.lookUpDf,noDfStorage=noDfStorage)
for dirName in extDirLstTBDeleted:
if os.path.exists(dirName):
if os.path.isdir(dirName):
(dirNameHead, dirNameTail)=os.path.split(dirName)
if len(os.listdir(dirName)) == 0:
os.rmdir(dirName)
logger.debug("{0:s}dirName: {1:s} existierte nicht und wurde wieder geloescht.".format(logStr,dirNameTail))
else:
logger.info("{0:s}dirName: {1:s} existiert mit nicht leerem Inhalt?!".format(logStr,dirNameTail))
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
def getTotalLogTime(self):
"""
Returns Tuple: firstTime,lastTime,tdTotalGross,tdTotal,tdBetweenFilesTotal # Brutto-Logzeit, Netto-Logzeit, Summe aller Zeiten zwischen 2 Logdateien (sollte = Brutto-Netto sein)
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
# Inhalt der Logs
tdTotal=pd.Timedelta('0 Seconds')
tdBetweenFilesTotal=pd.Timedelta('0 Seconds')
for idx,(index,row) in enumerate(self.lookUpDf.iterrows()):
if idx > 0:
tdBetweenFiles=row["FirstTime"]-lastTime
tdBetweenFilesTotal=tdBetweenFilesTotal+tdBetweenFiles
if tdBetweenFiles > pd.Timedelta('0 second'):
if tdBetweenFiles > pd.Timedelta('1 second'):
logger.info("{:s}Zeitdifferenz: {!s:s} zwischen {:s} ({:s}) und {:s} ({:s})".format(logStr,
str(tdBetweenFiles).replace('days','Tage')
,lastFile,lastZip
,row["logName"],row["zipName"]
))
pass
if tdBetweenFiles < pd.Timedelta('0 second'):
if tdBetweenFiles < -pd.Timedelta('1 second'):
pass
logger.info("{:s}Zeitueberlappung > 1s: {!s:s} zwischen {:s} ({:s}) und {:s} ({:s})".format(logStr,
str(tdBetweenFiles).replace('days','Tage')
,lastFile,lastZip
,row["logName"],row["zipName"]
))
td=row["LastTime"]-row["FirstTime"]
if type(td) == pd.Timedelta:
tdTotal=tdTotal+td
else:
print(index)# Fehler!
lastTime=row["LastTime"]
lastFile=row["logName"]
lastZip=row["zipName"]
firstTime=self.lookUpDf.iloc[0]["FirstTime"]
lastTime=self.lookUpDf.iloc[-1]["LastTime"]
tdTotalGross=lastTime-firstTime
tdTotalGross,tdTotal,tdBetweenFilesTotal
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return firstTime,lastTime,tdTotalGross,tdTotal,tdBetweenFilesTotal
def extractTCsToH5s(self,dfID=pd.DataFrame(),timeStart=None,timeEnd=None,TCsdfOPCFill=TCsdfOPCFill):
"""
extracts TC-Data (and CVD-Data) from H5 to seperate H5-Files (Postfixe: _TCxxx.h5 and _CVD.h5)
TCsdfOPCFill: wenn Wahr, werden in TCsdfOPCFill die NULLen aufgefuellt; default: Falsch
wenn timeStart != None: es wird an exisitierende .h5s angehaengt; sonst werden diese ueberschrieben
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
# _TCxxx.h5 anlegen (OPC, SirCalc, LDSIn, LDSRes1, LDSRes2 (,LDSRes)) and _CVD.h5
# ueber alle dfs in H5 (unter Berücksichtigung von timeStart und timeEnd)
# lesen
# TC-Teilmenge ermitteln: 'ID','ProcessTime','ScenTime','SubSystem','Value','Direction'
# Zeilen mit 'Value' isnull() werden NICHT gelesen
# d.h. bei einer Logfile-Semantik welche durch NULL-Zeilen einen Wert auf (was auch immer) zuruecksetzt wuerde der Wert bei einer Stop-Plot-Ausgabe auf dem letzten Nicht-NULL Wert verharren ...
# ... zunaechst ...
# Untermengen bilden: ['TCsdfOPC','TCsdfSirCalc','TCsdfLDSIn','TCsdfLDSRes1','TCsdfLDSRes2' (,'TCsdfLDSRes')]
# ... NULLen (NaNs) entstehen durch die Pivotierung mit Index = Time: nicht fuer alles Times (Obermenge) gibt es fuer jede ID Values
# speichern
(name,ext)=os.path.splitext(self.h5File)
TCPost='_TC'
self.h5FileOPC=name+TCPost+'OPC'+ext
self.h5FileSirCalc=name+TCPost+'SirCalc'+ext
self.h5FileLDSIn=name+TCPost+'LDSIn'+ext
if not dfID.empty:
# Attribute
self.h5FileLDSRes1=name+TCPost+'LDSRes1'+ext
self.h5FileLDSRes2=name+TCPost+'LDSRes2'+ext
# Komplement wird geloescht
h5FileLDSRes=name+TCPost+'LDSRes'+ext
try:
# wenn TC-H5 existiert wird es geloescht
if os.path.exists(h5FileLDSRes):
os.remove(h5FileLDSRes)
logger.debug("{0:s}Existing H5-File {1:s} deleted.".format(logStr,h5FileLDSRes))
del self.h5FileLDSRes
except:
pass
else:
# Attribut
self.h5FileLDSRes=name+TCPost+'LDSRes'+ext
# Komplemente werden geloescht
h5FileLDSRes1=name+TCPost+'LDSRes1'+ext
h5FileLDSRes2=name+TCPost+'LDSRes2'+ext
try:
# wenn TC-H5 existiert wird es geloescht
if os.path.exists(h5FileLDSRes1):
os.remove(h5FileLDSRes1)
logger.debug("{0:s}Existing H5-File {1:s} deleted.".format(logStr,h5FileLDSRes1))
# wenn TC-H5 existiert wird es geloescht
if os.path.exists(h5FileLDSRes2):
os.remove(h5FileLDSRes2)
logger.debug("{0:s}Existing H5-File {1:s} deleted.".format(logStr,h5FileLDSRes2))
del self.h5FileLDSRes1
del self.h5FileLDSRes2
except:
pass
self.h5FileCVD=name+'_'+'CVD'+ext
h5Keys,h5KeysPost=self.__getH5Keys(timeStart=timeStart,timeEnd=timeEnd)
h5KeysOPC=['TCsOPC'+x for x in h5KeysPost]
h5KeysSirCalc=['TCsSirCalc'+x for x in h5KeysPost]
h5KeysLDSIn=['TCsLDSIn'+x for x in h5KeysPost]
h5KeysLDSRes1=['TCsLDSRes1'+x for x in h5KeysPost]
h5KeysLDSRes2=['TCsLDSRes2'+x for x in h5KeysPost]
h5KeysLDSRes=['TCsLDSRes'+x for x in h5KeysPost]
h5KeysCVD=['CVDRes'+x for x in h5KeysPost]
h5KeysAll=zip(h5Keys,h5KeysOPC,h5KeysSirCalc,h5KeysLDSIn,h5KeysLDSRes1,h5KeysLDSRes2,h5KeysLDSRes,h5KeysCVD)
for idx,(h5Key,h5KeyOPC,h5KeySirCalc,h5KeyLDSIn,h5KeyLDSRes1,h5KeyLDSRes2,h5KeyLDSRes,h5KeyCVD) in enumerate(h5KeysAll):
#H5-Write-Modus
if idx==0:
if timeStart!=None:
mode='a'
else:
mode='w'
else:
mode='a'
logger.info("{0:s}Get (read_hdf) df with h5Key: {1:s} ...".format(logStr,h5Key))
df=pd.read_hdf(self.h5File, key=h5Key)
# CVD -------------------------------------------------------------------------------------------------
dfCVD=df[df['SubSystem']=='CVD']
df=df[['ID','ProcessTime','ScenTime','SubSystem','Value','Direction']]
df['Value']=df['Value'].apply(lambda x: fTCCast(x))
df=df[~(df['Value'].isnull())]
if not dfID.empty:
TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn,TCsdfLDSRes1,TCsdfLDSRes2=self.getTCsFromDf(df,dfID=dfID,TCsdfOPCFill=TCsdfOPCFill)
else:
TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn,TCsdfLDSRes=self.getTCsFromDf(df,dfID=dfID,TCsdfOPCFill=TCsdfOPCFill)
logger.debug("{0:s}{1:s}".format(logStr,'Write ...'))
TCsdfOPC.to_hdf(self.h5FileOPC,h5KeyOPC, mode=mode)
TCsdfSirCalc.to_hdf(self.h5FileSirCalc,h5KeySirCalc, mode=mode)
TCsdfLDSIn.to_hdf(self.h5FileLDSIn,h5KeyLDSIn, mode=mode)
if not dfID.empty:
TCsdfLDSRes1.to_hdf(self.h5FileLDSRes1,h5KeyLDSRes1, mode=mode)
TCsdfLDSRes2.to_hdf(self.h5FileLDSRes2,h5KeyLDSRes2, mode=mode)
else:
TCsdfLDSRes.to_hdf(self.h5FileLDSRes,h5KeyLDSRes, mode=mode)
# ---
dfCVD.to_hdf(self.h5FileCVD,h5KeyCVD, mode=mode)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return
def shrinkH5File(self):
"""
die dfs werden geloescht im H5-File
extract TCs to H5s ### MUSS ### vorher gelaufen sein
nach shrinkH5File stehen im Master-H5 die eigentlichen Daten nicht mehr zur Verfuegung
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
# H5 existiert
if os.path.exists(self.h5File):
# Keys available
with pd.HDFStore(self.h5File) as h5Store:
h5Keys=sorted(h5Store.keys()) # /Log20201216_0000001
logger.debug("{0:s}h5Keys available: {1:s}".format(logStr,str(h5Keys)))
for key in h5Keys:
if re.match('(^/Log)',key):
logger.debug("{0:s}key removed: {1:s}".format(logStr,str(key)))
h5Store.remove(key.replace(h5KeySep,''))
else:
logger.debug("{0:s}key NOT removed: {1:s}".format(logStr,str(key)))
with pd.HDFStore(self.h5File) as h5Store:
pass
shrinkCmd="ptrepack --chunkshape=auto --propindexes --complib=blosc "+self.h5File+" "+self.h5File+".Shrinked"
logger.debug("{0:s}shrinkCmd: {1:s}".format(logStr,shrinkCmd))
if os.path.exists(self.h5File+".Shrinked"):
os.remove(self.h5File+".Shrinked")
os.system(shrinkCmd)
os.remove(self.h5File)
os.rename(self.h5File+".Shrinked",self.h5File)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
def get(self,timeStart=None,timeEnd=None,filter_fct=None,filterAfter=True,useRawHdfAPI=False):
"""
returns df with filter_fct applied
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfRet=None
try:
dfLst=[]
dfLookUpTimes=self.lookUpDf
if timeStart!=None:
dfLookUpTimes=dfLookUpTimes[dfLookUpTimes['LastTime']>=timeStart] # endet nach dem Anfang oder EndeFile ist Anfang
if timeEnd!=None:
dfLookUpTimes=dfLookUpTimes[dfLookUpTimes['FirstTime']<=timeEnd] # beginnt vor dem Ende oder AnfangFile ist Ende
dfLookUpTimesIdx=dfLookUpTimes.set_index('logName')
dfLookUpTimesIdx.filter(regex='\.log$',axis=0)
h5Keys=['Log'+re.search(logFilenameHeadPattern,logFile).group(1) for logFile in dfLookUpTimesIdx.index]
logger.debug("{0:s}h5Keys used: {1:s}".format(logStr,str(h5Keys)))
if useRawHdfAPI:
with pd.HDFStore(self.h5File) as h5Store:
for h5Key in h5Keys:
logger.debug("{0:s}Get (pd.HDFStore) df with h5Key: {1:s} ...".format(logStr,h5Key))
df=h5Store[h5Key]
if not filterAfter and filter_fct != None:
logger.debug("{0:s}Apply Filter ...".format(logStr))
df=pd.DataFrame(df[df.apply(filter_fct,axis=1)].values,columns=df.columns)
dfLst.append(df)
else:
for h5Key in h5Keys:
logger.debug("{0:s}Get (read_hdf) df with h5Key: {1:s} ...".format(logStr,h5Key))
df=pd.read_hdf(self.h5File, key=h5Key)
if not filterAfter and filter_fct != None:
logger.debug("{0:s}Apply Filter ...".format(logStr))
df=pd.DataFrame(df[df.apply(filter_fct,axis=1)].values,columns=df.columns)
dfLst.append(df)
logger.debug("{0:s}{1:s}".format(logStr,'Extraction finished. Concat ...'))
dfRet=pd.concat(dfLst)
del dfLst
if filterAfter and filter_fct != None:
logger.debug("{0:s}Apply Filter ...".format(logStr))
dfRet=pd.DataFrame(dfRet[dfRet.apply(filter_fct,axis=1)].values,columns=dfRet.columns)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return dfRet
def getFromZips(self,timeStart=None,timeEnd=None,filter_fct=None,filterAfter=True,readWithDictReader=False,readWindowsLog=False):
"""
returns df from Zips
die Daten werden von den Zips gelesen: Log extrahieren, parsen, wieder loeschen
die Initalisierung muss mit AppLog(zip7Files=...) erfolgt sein da nur dann self.lookUpDfZips existiert
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfRet=None
try:
dfLst=[]
timeStart=pd.Timestamp(timeStart)
timeEnd=pd.Timestamp(timeEnd)
# zips die prozessiert werden muessen
dfLookUpZips=self.lookUpDfZips
if timeStart!=None:
dfLookUpZips=dfLookUpZips[dfLookUpZips['LastTime']>=timeStart] # endet nach dem Anfang oder EndeFile ist Anfang
if timeEnd!=None:
dfLookUpZips=dfLookUpZips[dfLookUpZips['FirstTime']<=timeEnd] # beginnt vor dem Ende oder AnfangFile ist Ende
for index, row in dfLookUpZips.iterrows():
zip7File=index
(zip7FileHead, zip7FileTail)=os.path.split(zip7File)
dTime=timeStart-row['FirstTime']
nStart = int(dTime.total_seconds()/row['TimespanPerLog'].total_seconds())
dTime=timeEnd-timeStart
nDelta = int(dTime.total_seconds()/row['TimespanPerLog'].total_seconds())+1
nEnd=nStart+nDelta
logger.debug("{0:s}zip7File: {1:s}: Start: {2:d}/{3:07d} End: {4:d}/{5:07d}".format(logStr,zip7FileTail
,nStart,nStart+row['minFileNr']
,nStart+nDelta,nStart+row['minFileNr']+nDelta))
try:
# wenn zip7File nicht existiert ...
if not os.path.exists(zip7File):
logStrFinal="{0:s}zip7File {1:s} not existing.".format(logStr,zip7File)
logger.debug(logStrFinal)
raise LxError(logStrFinal)
tmpDir=os.path.dirname(zip7File)
tmpDirContent=glob.glob(tmpDir)
with py7zr.SevenZipFile(zip7File, 'r') as zip7FileObj:
allLogFiles = zip7FileObj.getnames()
allLogFilesLen=len(allLogFiles)
logger.debug("{0:s}{1:s}: len(getnames()): {2:d}.".format(logStr,zip7FileTail,allLogFilesLen))
extDirLstTBDeleted=[]
extDirLstExistingLogged=[]
idxEff=0
for idx,logFileNameInZip in enumerate(allLogFiles):
if idx < nStart-idxEff or idx > nEnd+idxEff:
continue
logger.debug("{0:s}idx: {1:d} item: {2:s} ...".format(logStr,idx,logFileNameInZip))
# die Datei die 7Zip bei extract erzeugen wird
logFile=os.path.join(tmpDir,logFileNameInZip)
(logFileHead, logFileTail)=os.path.split(logFile)
# evtl. bezeichnet logFileNameInZip keine Datei sondern ein Verzeichnis
(name, ext)=os.path.splitext(logFileNameInZip)
if ext == '':
# Verzeichnis!
extDir=os.path.join(tmpDir,logFileNameInZip)
(extDirHead, extDirTail)=os.path.split(extDir)
if os.path.exists(extDir) and extDir in tmpDirContent:
logger.debug("{0:s}idx: {1:d} extDir: {2:s} existiert(e) bereits.".format(logStr,idx,extDirTail))
extDirLstExistingLogged.append(extDir)
elif os.path.exists(extDir) and extDir not in tmpDirContent:
logger.debug("{0:s}idx: {1:d} extDir: {2:s} existiert(e) noch nicht.".format(logStr,idx,extDirTail))
extDirLstTBDeleted.append(extDir)
elif not os.path.exists(extDir) and extDir not in tmpDirContent:
logger.debug("{0:s}idx: {1:d} extDir: {2:s} existiert(e) noch nicht.".format(logStr,idx,extDirTail))
extDirLstTBDeleted.append(extDir)
# kein Logfile zu prozessieren ...
idxEff+=1
continue
# logFileNameInZip bezeichnet eine Datei
if os.path.exists(logFile):
isFile = os.path.isfile(logFile)
if isFile:
logger.debug("{0:s}idx: {1:d} Log: {2:s} existiert bereits. Wird durch Extrakt ueberschrieben werden.".format(logStr,idx,logFileTail))
logFileTBDeleted=False
else:
logFileTBDeleted=False
else:
logger.debug("{0:s}idx: {1:d} Log: {2:s} existiert nicht. Wird extrahiert, dann prozessiert und dann wieder geloescht.".format(logStr,idx,logFileTail))
logFileTBDeleted=True
# extrahieren
zip7FileObj.extract(path=tmpDir,targets=logFileNameInZip)
if os.path.exists(logFile):
pass
else:
logger.warning("{0:s}idx: {1:d} Log: {2:s} NOT extracted?! Continue with next Name in 7Zip.".format(logStr,idx,logFileTail))
# nichts zu prozessieren ...
continue
# ...
if os.path.isfile(logFile):
df = self.__processALogFile(logFile=logFile,readWithDictReader=readWithDictReader,readWindowsLog=readWindowsLog)
if df is None:
logger.warning("{0:s}idx: {1:d} Log: {2:s} NOT processed?! Continue with next Name in 7Zip.".format(logStr,idx,logFileTail))
# nichts zu prozessieren ...
continue
else:
if not filterAfter and filter_fct != None:
logger.debug("{0:s}Apply Filter ...".format(logStr))
df=pd.DataFrame(df[df.apply(filter_fct,axis=1)].values,columns=df.columns)
dfLst.append(df)
# ...
# gleich wieder loeschen
if os.path.exists(logFile) and logFileTBDeleted:
if os.path.isfile(logFile):
os.remove(logFile)
logger.debug("{0:s}idx: {1:d} Log: {2:s} wieder geloescht.".format(logStr,idx,logFileTail))
for dirName in extDirLstTBDeleted:
if os.path.exists(dirName):
if os.path.isdir(dirName):
(dirNameHead, dirNameTail)=os.path.split(dirName)
if len(os.listdir(dirName)) == 0:
os.rmdir(dirName)
logger.debug("{0:s}dirName: {1:s} existierte nicht und wurde wieder geloescht.".format(logStr,dirNameTail))
else:
logger.info("{0:s}dirName: {1:s} existiert mit nicht leerem Inhalt?!".format(logStr,dirNameTail))
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
logger.debug("{0:s}{1:s}".format(logStr,'Extraction finished. Concat ...'))
dfRet=pd.concat(dfLst)
del dfLst
if filterAfter and filter_fct != None:
logger.debug("{0:s}Apply Filter ...".format(logStr))
dfRet=pd.DataFrame(dfRet[dfRet.apply(filter_fct,axis=1)].values,columns=dfRet.columns)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return dfRet
def getTCs(self,dfID=pd.DataFrame(),timeStart=None,timeEnd=None,TCsdfOPCFill=TCsdfOPCFill,persistent=False,overwrite=True):
"""
returns TCs-dfs
Verarbeitung von dfs gemaess extractTCsToH5s; siehe dort
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
TCKeys=['<KEY>','TCsdfSirCalc','TCsdfLDSIn','TCsdfLDSRes1','TCsdfLDSRes2a','TCsdfLDSRes2b','TCsdfLDSRes2c']
if persistent:
with pd.HDFStore(self.h5File) as h5Store:
h5Keys=sorted(h5Store.keys())
#logger.debug("{0:s}h5Keys available: {1:s}".format(logStr,str(h5Keys)))
h5KeysStripped=[item.replace(h5KeySep,'') for item in h5Keys]
if set(TCKeys) & set(h5KeysStripped) == set(TCKeys):
if not overwrite:
logger.debug("{0:s}persistent: TCKeys {1:s} existieren alle bereits - return aus H5-File ...".format(logStr,str(TCKeys)))
TCsdfOPC=pd.read_hdf(self.h5File,key='<KEY>')
TCsdfSirCalc=pd.read_hdf(self.h5File,key='TCsdfSirCalc')
TCsdfLDSIn=pd.read_hdf(self.h5File,key='TCsdfLDSIn')
TCsdfLDSRes1=pd.read_hdf(self.h5File,key='TCsdfLDSRes1')
TCsdfLDSRes2a=pd.read_hdf(self.h5File,key='TCsdfLDSRes2a')
TCsdfLDSRes2b=pd.read_hdf(self.h5File,key='TCsdfLDSRes2b')
TCsdfLDSRes2c=pd.read_hdf(self.h5File,key='TCsdfLDSRes2c')
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn,TCsdfLDSRes1,TCsdfLDSRes2a,TCsdfLDSRes2b,TCsdfLDSRes2c
else:
logger.debug("{0:s}persistent: TCKeys {1:s} existieren alle bereits - sollen aber ueberschrieben werden ...".format(logStr,str(TCKeys)))
else:
logger.debug("{0:s}persistent: TCKeys {1:s} existieren nicht (alle) ...".format(logStr,str(TCKeys)))
dfLookUpTimes=self.lookUpDf
if timeStart!=None:
dfLookUpTimes=dfLookUpTimes[dfLookUpTimes['LastTime']>=timeStart] # endet nach dem Anfang oder EndeFile ist Anfang
if timeEnd!=None:
dfLookUpTimes=dfLookUpTimes[dfLookUpTimes['FirstTime']<=timeEnd] # beginnt vor dem Ende oder AnfangFile ist Ende
dfLookUpTimesIdx=dfLookUpTimes.set_index('logName')
dfLookUpTimesIdx.filter(regex='\.log$',axis=0)
h5Keys=['Log'+re.search(logFilenameHeadPattern,logFile).group(1) for logFile in dfLookUpTimesIdx.index]
logger.debug("{0:s}h5Keys used: {1:s}".format(logStr,str(h5Keys)))
dfLst=[]
for h5Key in h5Keys:
logger.debug("{0:s}Get (read_hdf) df with h5Key: {1:s} ...".format(logStr,h5Key))
dfSingle=pd.read_hdf(self.h5File, key=h5Key)
dfSingle=dfSingle[['ID','ProcessTime','ScenTime','SubSystem','Value','Direction']]
dfSingle=dfSingle[~(dfSingle['Value'].isnull())]
dfLst.append(dfSingle)
logger.debug("{0:s}{1:s}".format(logStr,'Extraction finished. Concat ...'))
df=pd.concat(dfLst)
del dfLst
logger.debug("{0:s}{1:s}".format(logStr,'Concat finished. Filter & Pivot ...'))
if not dfID.empty:
TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn,TCsdfLDSRes1,TCsdfLDSRes2=self.getTCsFromDf(df,dfID=dfID,TCsdfOPCFill=TCsdfOPCFill)
else:
TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn,TCsdfLDSRes=self.getTCsFromDf(df,dfID=dfID,TCsdfOPCFill=TCsdfOPCFill)
if persistent:
logger.debug("{0:s}peristent: TCKeys {1:s} nach H5-File ...".format(logStr,str(TCKeys)))
TCsdfOPC.to_hdf(self.h5File,key='TCsdfOPC')
TCsdfSirCalc.to_hdf(self.h5File,key='TCsdfSirCalc')
TCsdfLDSIn.to_hdf(self.h5File,key='TCsdfLDSIn')
TCsdfLDSRes1.to_hdf(self.h5File,key='TCsdfLDSRes1')
TCsdfLDSRes2a.to_hdf(self.h5File,key='TCsdfLDSRes2a')
TCsdfLDSRes2b.to_hdf(self.h5File,key='TCsdfLDSRes2b')
TCsdfLDSRes2c.to_hdf(self.h5File,key='TCsdfLDSRes2c')
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
if not dfID.empty:
return TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn,TCsdfLDSRes1,TCsdfLDSRes2#a,TCsdfLDSRes2b,TCsdfLDSRes2c
else:
return TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn,TCsdfLDSRes1
def getTCsFromH5s(self,timeStart=None,timeEnd=None, LDSResOnly=False, LDSResColsSpecified=None, LDSResTypeSpecified=None, timeShiftPair=None):
"""
returns several TC-dfs from TC-H5s:
TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn,TCsdfLDSRes1,TCsdfLDSRes2
or
TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn,TCsdfLDSRes
LDSResOnly:
TCsdfLDSRes1,TCsdfLDSRes2
or
TCsdfLDSRes
LDSResColsSpecified:
return in LDSRes df(s) only the specified cols
all cols are returned otherwise
LDSResTypeSpecified:
return TCsdfLDSRes1 (SEG) for 'SEG' or TCsdfLDSRes2 (Druck) for 'Druck'
both are returned otherwise
timeShiftPair: (preriod,freq): i.e. (1,'H'); if not None index is shifted
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
try:
self.h5FileLDSRes1
Res2=True
except:
Res2=False
TCsdfOPC=pd.DataFrame()
TCsdfSirCalc=pd.DataFrame()
TCsdfLDSIn= | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable=W0612,E1101
from datetime import datetime
import operator
import nose
from functools import wraps
import numpy as np
import pandas as pd
from pandas import Series, DataFrame, Index, isnull, notnull, pivot, MultiIndex
from pandas.core.datetools import bday
from pandas.core.nanops import nanall, nanany
from pandas.core.panel import Panel
from pandas.core.series import remove_na
import pandas.core.common as com
from pandas import compat
from pandas.compat import range, lrange, StringIO, OrderedDict, signature
from pandas import SparsePanel
from pandas.util.testing import (assert_panel_equal, assert_frame_equal,
assert_series_equal, assert_almost_equal,
assert_produces_warning, ensure_clean,
assertRaisesRegexp, makeCustomDataframe as
mkdf, makeMixedDataFrame)
import pandas.core.panel as panelm
import pandas.util.testing as tm
def ignore_sparse_panel_future_warning(func):
"""
decorator to ignore FutureWarning if we have a SparsePanel
can be removed when SparsePanel is fully removed
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
if isinstance(self.panel, SparsePanel):
with assert_produces_warning(FutureWarning,
check_stacklevel=False):
return func(self, *args, **kwargs)
else:
return func(self, *args, **kwargs)
return wrapper
class PanelTests(object):
panel = None
def test_pickle(self):
unpickled = self.round_trip_pickle(self.panel)
assert_frame_equal(unpickled['ItemA'], self.panel['ItemA'])
def test_rank(self):
self.assertRaises(NotImplementedError, lambda: self.panel.rank())
def test_cumsum(self):
cumsum = self.panel.cumsum()
assert_frame_equal(cumsum['ItemA'], self.panel['ItemA'].cumsum())
def not_hashable(self):
c_empty = Panel()
c = Panel(Panel([[[1]]]))
self.assertRaises(TypeError, hash, c_empty)
self.assertRaises(TypeError, hash, c)
class SafeForLongAndSparse(object):
_multiprocess_can_split_ = True
def test_repr(self):
repr(self.panel)
@ignore_sparse_panel_future_warning
def test_copy_names(self):
for attr in ('major_axis', 'minor_axis'):
getattr(self.panel, attr).name = None
cp = self.panel.copy()
getattr(cp, attr).name = 'foo'
self.assertIsNone(getattr(self.panel, attr).name)
def test_iter(self):
tm.equalContents(list(self.panel), self.panel.items)
def test_count(self):
f = lambda s: notnull(s).sum()
self._check_stat_op('count', f, obj=self.panel, has_skipna=False)
def test_sum(self):
self._check_stat_op('sum', np.sum)
def test_mean(self):
self._check_stat_op('mean', np.mean)
def test_prod(self):
self._check_stat_op('prod', np.prod)
def test_median(self):
def wrapper(x):
if isnull(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper)
def test_min(self):
self._check_stat_op('min', np.min)
def test_max(self):
self._check_stat_op('max', np.max)
def test_skew(self):
try:
from scipy.stats import skew
except ImportError:
raise nose.SkipTest("no scipy.stats.skew")
def this_skew(x):
if len(x) < 3:
return np.nan
return skew(x, bias=False)
self._check_stat_op('skew', this_skew)
# def test_mad(self):
# f = lambda x: np.abs(x - x.mean()).mean()
# self._check_stat_op('mad', f)
def test_var(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.var(x, ddof=1)
self._check_stat_op('var', alt)
def test_std(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1)
self._check_stat_op('std', alt)
def test_sem(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1) / np.sqrt(len(x))
self._check_stat_op('sem', alt)
# def test_skew(self):
# from scipy.stats import skew
# def alt(x):
# if len(x) < 3:
# return np.nan
# return skew(x, bias=False)
# self._check_stat_op('skew', alt)
def _check_stat_op(self, name, alternative, obj=None, has_skipna=True):
if obj is None:
obj = self.panel
# # set some NAs
# obj.ix[5:10] = np.nan
# obj.ix[15:20, -2:] = np.nan
f = getattr(obj, name)
if has_skipna:
def skipna_wrapper(x):
nona = remove_na(x)
if len(nona) == 0:
return np.nan
return alternative(nona)
def wrapper(x):
return alternative(np.asarray(x))
for i in range(obj.ndim):
result = f(axis=i, skipna=False)
assert_frame_equal(result, obj.apply(wrapper, axis=i))
else:
skipna_wrapper = alternative
wrapper = alternative
for i in range(obj.ndim):
result = f(axis=i)
if not tm._incompat_bottleneck_version(name):
assert_frame_equal(result, obj.apply(skipna_wrapper, axis=i))
self.assertRaises(Exception, f, axis=obj.ndim)
# Unimplemented numeric_only parameter.
if 'numeric_only' in signature(f).args:
self.assertRaisesRegexp(NotImplementedError, name, f,
numeric_only=True)
class SafeForSparse(object):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def test_get_axis(self):
assert (self.panel._get_axis(0) is self.panel.items)
assert (self.panel._get_axis(1) is self.panel.major_axis)
assert (self.panel._get_axis(2) is self.panel.minor_axis)
def test_set_axis(self):
new_items = Index(np.arange(len(self.panel.items)))
new_major = Index(np.arange(len(self.panel.major_axis)))
new_minor = Index(np.arange(len(self.panel.minor_axis)))
# ensure propagate to potentially prior-cached items too
item = self.panel['ItemA']
self.panel.items = new_items
if hasattr(self.panel, '_item_cache'):
self.assertNotIn('ItemA', self.panel._item_cache)
self.assertIs(self.panel.items, new_items)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.major_axis = new_major
self.assertIs(self.panel[0].index, new_major)
self.assertIs(self.panel.major_axis, new_major)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.minor_axis = new_minor
self.assertIs(self.panel[0].columns, new_minor)
self.assertIs(self.panel.minor_axis, new_minor)
def test_get_axis_number(self):
self.assertEqual(self.panel._get_axis_number('items'), 0)
self.assertEqual(self.panel._get_axis_number('major'), 1)
self.assertEqual(self.panel._get_axis_number('minor'), 2)
def test_get_axis_name(self):
self.assertEqual(self.panel._get_axis_name(0), 'items')
self.assertEqual(self.panel._get_axis_name(1), 'major_axis')
self.assertEqual(self.panel._get_axis_name(2), 'minor_axis')
def test_get_plane_axes(self):
# what to do here?
index, columns = self.panel._get_plane_axes('items')
index, columns = self.panel._get_plane_axes('major_axis')
index, columns = self.panel._get_plane_axes('minor_axis')
index, columns = self.panel._get_plane_axes(0)
@ignore_sparse_panel_future_warning
def test_truncate(self):
dates = self.panel.major_axis
start, end = dates[1], dates[5]
trunced = self.panel.truncate(start, end, axis='major')
expected = self.panel['ItemA'].truncate(start, end)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(before=start, axis='major')
expected = self.panel['ItemA'].truncate(before=start)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(after=end, axis='major')
expected = self.panel['ItemA'].truncate(after=end)
assert_frame_equal(trunced['ItemA'], expected)
# XXX test other axes
def test_arith(self):
self._test_op(self.panel, operator.add)
self._test_op(self.panel, operator.sub)
self._test_op(self.panel, operator.mul)
self._test_op(self.panel, operator.truediv)
self._test_op(self.panel, operator.floordiv)
self._test_op(self.panel, operator.pow)
self._test_op(self.panel, lambda x, y: y + x)
self._test_op(self.panel, lambda x, y: y - x)
self._test_op(self.panel, lambda x, y: y * x)
self._test_op(self.panel, lambda x, y: y / x)
self._test_op(self.panel, lambda x, y: y ** x)
self._test_op(self.panel, lambda x, y: x + y) # panel + 1
self._test_op(self.panel, lambda x, y: x - y) # panel - 1
self._test_op(self.panel, lambda x, y: x * y) # panel * 1
self._test_op(self.panel, lambda x, y: x / y) # panel / 1
self._test_op(self.panel, lambda x, y: x ** y) # panel ** 1
self.assertRaises(Exception, self.panel.__add__, self.panel['ItemA'])
@staticmethod
def _test_op(panel, op):
result = op(panel, 1)
assert_frame_equal(result['ItemA'], op(panel['ItemA'], 1))
def test_keys(self):
tm.equalContents(list(self.panel.keys()), self.panel.items)
def test_iteritems(self):
# Test panel.iteritems(), aka panel.iteritems()
# just test that it works
for k, v in self.panel.iteritems():
pass
self.assertEqual(len(list(self.panel.iteritems())),
len(self.panel.items))
@ignore_sparse_panel_future_warning
def test_combineFrame(self):
def check_op(op, name):
# items
df = self.panel['ItemA']
func = getattr(self.panel, name)
result = func(df, axis='items')
assert_frame_equal(result['ItemB'], op(self.panel['ItemB'], df))
# major
xs = self.panel.major_xs(self.panel.major_axis[0])
result = func(xs, axis='major')
idx = self.panel.major_axis[1]
assert_frame_equal(result.major_xs(idx),
op(self.panel.major_xs(idx), xs))
# minor
xs = self.panel.minor_xs(self.panel.minor_axis[0])
result = func(xs, axis='minor')
idx = self.panel.minor_axis[1]
assert_frame_equal(result.minor_xs(idx),
op(self.panel.minor_xs(idx), xs))
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv']
if not compat.PY3:
ops.append('div')
# pow, mod not supported for SparsePanel as flex ops (for now)
if not isinstance(self.panel, SparsePanel):
ops.extend(['pow', 'mod'])
else:
idx = self.panel.minor_axis[1]
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.pow(self.panel.minor_xs(idx), axis='minor')
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.mod(self.panel.minor_xs(idx), axis='minor')
for op in ops:
try:
check_op(getattr(operator, op), op)
except:
com.pprint_thing("Failing operation: %r" % op)
raise
if compat.PY3:
try:
check_op(operator.truediv, 'div')
except:
com.pprint_thing("Failing operation: %r" % 'div')
raise
@ignore_sparse_panel_future_warning
def test_combinePanel(self):
result = self.panel.add(self.panel)
self.assert_panel_equal(result, self.panel * 2)
@ignore_sparse_panel_future_warning
def test_neg(self):
self.assert_panel_equal(-self.panel, self.panel * -1)
# issue 7692
def test_raise_when_not_implemented(self):
p = Panel(np.arange(3 * 4 * 5).reshape(3, 4, 5),
items=['ItemA', 'ItemB', 'ItemC'],
major_axis=pd.date_range('20130101', periods=4),
minor_axis=list('ABCDE'))
d = p.sum(axis=1).ix[0]
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'div', 'mod', 'pow']
for op in ops:
with self.assertRaises(NotImplementedError):
getattr(p, op)(d, axis=0)
@ignore_sparse_panel_future_warning
def test_select(self):
p = self.panel
# select items
result = p.select(lambda x: x in ('ItemA', 'ItemC'), axis='items')
expected = p.reindex(items=['ItemA', 'ItemC'])
self.assert_panel_equal(result, expected)
# select major_axis
result = p.select(lambda x: x >= datetime(2000, 1, 15), axis='major')
new_major = p.major_axis[p.major_axis >= datetime(2000, 1, 15)]
expected = p.reindex(major=new_major)
self.assert_panel_equal(result, expected)
# select minor_axis
result = p.select(lambda x: x in ('D', 'A'), axis=2)
expected = p.reindex(minor=['A', 'D'])
self.assert_panel_equal(result, expected)
# corner case, empty thing
result = p.select(lambda x: x in ('foo', ), axis='items')
self.assert_panel_equal(result, p.reindex(items=[]))
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
@ignore_sparse_panel_future_warning
def test_abs(self):
result = self.panel.abs()
result2 = abs(self.panel)
expected = np.abs(self.panel)
self.assert_panel_equal(result, expected)
self.assert_panel_equal(result2, expected)
df = self.panel['ItemA']
result = df.abs()
result2 = abs(df)
expected = np.abs(df)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
s = df['A']
result = s.abs()
result2 = abs(s)
expected = np.abs(s)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
self.assertEqual(result.name, 'A')
self.assertEqual(result2.name, 'A')
class CheckIndexing(object):
_multiprocess_can_split_ = True
def test_getitem(self):
self.assertRaises(Exception, self.panel.__getitem__, 'ItemQ')
def test_delitem_and_pop(self):
expected = self.panel['ItemA']
result = self.panel.pop('ItemA')
assert_frame_equal(expected, result)
self.assertNotIn('ItemA', self.panel.items)
del self.panel['ItemB']
self.assertNotIn('ItemB', self.panel.items)
self.assertRaises(Exception, self.panel.__delitem__, 'ItemB')
values = np.empty((3, 3, 3))
values[0] = 0
values[1] = 1
values[2] = 2
panel = Panel(values, lrange(3), lrange(3), lrange(3))
# did we delete the right row?
panelc = panel.copy()
del panelc[0]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[1]
assert_frame_equal(panelc[0], panel[0])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[2]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[0], panel[0])
def test_setitem(self):
# LongPanel with one item
lp = self.panel.filter(['ItemA', 'ItemB']).to_frame()
with tm.assertRaises(ValueError):
self.panel['ItemE'] = lp
# DataFrame
df = self.panel['ItemA'][2:].filter(items=['A', 'B'])
self.panel['ItemF'] = df
self.panel['ItemE'] = df
df2 = self.panel['ItemF']
assert_frame_equal(df, df2.reindex(index=df.index, columns=df.columns))
# scalar
self.panel['ItemG'] = 1
self.panel['ItemE'] = True
self.assertEqual(self.panel['ItemG'].values.dtype, np.int64)
self.assertEqual(self.panel['ItemE'].values.dtype, np.bool_)
# object dtype
self.panel['ItemQ'] = 'foo'
self.assertEqual(self.panel['ItemQ'].values.dtype, np.object_)
# boolean dtype
self.panel['ItemP'] = self.panel['ItemA'] > 0
self.assertEqual(self.panel['ItemP'].values.dtype, np.bool_)
self.assertRaises(TypeError, self.panel.__setitem__, 'foo',
self.panel.ix[['ItemP']])
# bad shape
p = Panel(np.random.randn(4, 3, 2))
with tm.assertRaisesRegexp(ValueError,
"shape of value must be \(3, 2\), "
"shape of given object was \(4, 2\)"):
p[0] = np.random.randn(4, 2)
def test_setitem_ndarray(self):
from pandas import date_range, datetools
timeidx = date_range(start=datetime(2009, 1, 1),
end=datetime(2009, 12, 31),
freq=datetools.MonthEnd())
lons_coarse = np.linspace(-177.5, 177.5, 72)
lats_coarse = np.linspace(-87.5, 87.5, 36)
P = Panel(items=timeidx, major_axis=lons_coarse,
minor_axis=lats_coarse)
data = np.random.randn(72 * 36).reshape((72, 36))
key = datetime(2009, 2, 28)
P[key] = data
assert_almost_equal(P[key].values, data)
def test_set_minor_major(self):
# GH 11014
df1 = DataFrame(['a', 'a', 'a', np.nan, 'a', np.nan])
df2 = DataFrame([1.0, np.nan, 1.0, np.nan, 1.0, 1.0])
panel = Panel({'Item1': df1, 'Item2': df2})
newminor = notnull(panel.iloc[:, :, 0])
panel.loc[:, :, 'NewMinor'] = newminor
assert_frame_equal(panel.loc[:, :, 'NewMinor'],
newminor.astype(object))
newmajor = notnull(panel.iloc[:, 0, :])
panel.loc[:, 'NewMajor', :] = newmajor
assert_frame_equal(panel.loc[:, 'NewMajor', :],
newmajor.astype(object))
def test_major_xs(self):
ref = self.panel['ItemA']
idx = self.panel.major_axis[5]
xs = self.panel.major_xs(idx)
result = xs['ItemA']
assert_series_equal(result, ref.xs(idx), check_names=False)
self.assertEqual(result.name, 'ItemA')
# not contained
idx = self.panel.major_axis[0] - bday
self.assertRaises(Exception, self.panel.major_xs, idx)
def test_major_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.major_xs(self.panel.major_axis[0])
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_minor_xs(self):
ref = self.panel['ItemA']
idx = self.panel.minor_axis[1]
xs = self.panel.minor_xs(idx)
assert_series_equal(xs['ItemA'], ref[idx], check_names=False)
# not contained
self.assertRaises(Exception, self.panel.minor_xs, 'E')
def test_minor_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.minor_xs('D')
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_xs(self):
itemA = self.panel.xs('ItemA', axis=0)
expected = self.panel['ItemA']
assert_frame_equal(itemA, expected)
# get a view by default
itemA_view = self.panel.xs('ItemA', axis=0)
itemA_view.values[:] = np.nan
self.assertTrue(np.isnan(self.panel['ItemA'].values).all())
# mixed-type yields a copy
self.panel['strings'] = 'foo'
result = self.panel.xs('D', axis=2)
self.assertIsNotNone(result.is_copy)
def test_getitem_fancy_labels(self):
p = self.panel
items = p.items[[1, 0]]
dates = p.major_axis[::2]
cols = ['D', 'C', 'F']
# all 3 specified
assert_panel_equal(p.ix[items, dates, cols],
p.reindex(items=items, major=dates, minor=cols))
# 2 specified
assert_panel_equal(p.ix[:, dates, cols],
p.reindex(major=dates, minor=cols))
assert_panel_equal(p.ix[items, :, cols],
p.reindex(items=items, minor=cols))
assert_panel_equal(p.ix[items, dates, :],
p.reindex(items=items, major=dates))
# only 1
assert_panel_equal(p.ix[items, :, :], p.reindex(items=items))
assert_panel_equal(p.ix[:, dates, :], p.reindex(major=dates))
assert_panel_equal(p.ix[:, :, cols], p.reindex(minor=cols))
def test_getitem_fancy_slice(self):
pass
def test_getitem_fancy_ints(self):
p = self.panel
# #1603
result = p.ix[:, -1, :]
expected = p.ix[:, p.major_axis[-1], :]
assert_frame_equal(result, expected)
def test_getitem_fancy_xs(self):
p = self.panel
item = 'ItemB'
date = p.major_axis[5]
col = 'C'
# get DataFrame
# item
assert_frame_equal(p.ix[item], p[item])
assert_frame_equal(p.ix[item, :], p[item])
assert_frame_equal(p.ix[item, :, :], p[item])
# major axis, axis=1
assert_frame_equal(p.ix[:, date], p.major_xs(date))
assert_frame_equal(p.ix[:, date, :], p.major_xs(date))
# minor axis, axis=2
assert_frame_equal(p.ix[:, :, 'C'], p.minor_xs('C'))
# get Series
assert_series_equal(p.ix[item, date], p[item].ix[date])
assert_series_equal(p.ix[item, date, :], p[item].ix[date])
assert_series_equal(p.ix[item, :, col], p[item][col])
assert_series_equal(p.ix[:, date, col], p.major_xs(date).ix[col])
def test_getitem_fancy_xs_check_view(self):
item = 'ItemB'
date = self.panel.major_axis[5]
# make sure it's always a view
NS = slice(None, None)
# DataFrames
comp = assert_frame_equal
self._check_view(item, comp)
self._check_view((item, NS), comp)
self._check_view((item, NS, NS), comp)
self._check_view((NS, date), comp)
self._check_view((NS, date, NS), comp)
self._check_view((NS, NS, 'C'), comp)
# Series
comp = assert_series_equal
self._check_view((item, date), comp)
self._check_view((item, date, NS), comp)
self._check_view((item, NS, 'C'), comp)
self._check_view((NS, date, 'C'), comp)
def test_ix_setitem_slice_dataframe(self):
a = Panel(items=[1, 2, 3], major_axis=[11, 22, 33],
minor_axis=[111, 222, 333])
b = DataFrame(np.random.randn(2, 3), index=[111, 333],
columns=[1, 2, 3])
a.ix[:, 22, [111, 333]] = b
assert_frame_equal(a.ix[:, 22, [111, 333]], b)
def test_ix_align(self):
from pandas import Series
b = Series(np.random.randn(10), name=0)
b.sort()
df_orig = Panel(np.random.randn(3, 10, 2))
df = df_orig.copy()
df.ix[0, :, 0] = b
assert_series_equal(df.ix[0, :, 0].reindex(b.index), b)
df = df_orig.swapaxes(0, 1)
df.ix[:, 0, 0] = b
assert_series_equal(df.ix[:, 0, 0].reindex(b.index), b)
df = df_orig.swapaxes(1, 2)
df.ix[0, 0, :] = b
assert_series_equal(df.ix[0, 0, :].reindex(b.index), b)
def test_ix_frame_align(self):
p_orig = tm.makePanel()
df = p_orig.ix[0].copy()
assert_frame_equal(p_orig['ItemA'], df)
p = p_orig.copy()
p.ix[0, :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.ix[0] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0, :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.loc['ItemA'] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.loc['ItemA', :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p['ItemA'] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.ix[0, [0, 1, 3, 5], -2:] = df
out = p.ix[0, [0, 1, 3, 5], -2:]
assert_frame_equal(out, df.iloc[[0, 1, 3, 5], [2, 3]])
# GH3830, panel assignent by values/frame
for dtype in ['float64', 'int64']:
panel = Panel(np.arange(40).reshape((2, 4, 5)),
items=['a1', 'a2'], dtype=dtype)
df1 = panel.iloc[0]
df2 = panel.iloc[1]
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by Value Passes for 'a2'
panel.loc['a2'] = df1.values
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df1)
# Assignment by DataFrame Ok w/o loc 'a2'
panel['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by DataFrame Fails for 'a2'
panel.loc['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
def _check_view(self, indexer, comp):
cp = self.panel.copy()
obj = cp.ix[indexer]
obj.values[:] = 0
self.assertTrue((obj.values == 0).all())
comp(cp.ix[indexer].reindex_like(obj), obj)
def test_logical_with_nas(self):
d = Panel({'ItemA': {'a': [np.nan, False]},
'ItemB': {'a': [True, True]}})
result = d['ItemA'] | d['ItemB']
expected = DataFrame({'a': [np.nan, True]})
assert_frame_equal(result, expected)
# this is autodowncasted here
result = d['ItemA'].fillna(False) | d['ItemB']
expected = DataFrame({'a': [True, True]})
assert_frame_equal(result, expected)
def test_neg(self):
# what to do?
assert_panel_equal(-self.panel, -1 * self.panel)
def test_invert(self):
assert_panel_equal(-(self.panel < 0), ~(self.panel < 0))
def test_comparisons(self):
p1 = tm.makePanel()
p2 = tm.makePanel()
tp = p1.reindex(items=p1.items + ['foo'])
df = p1[p1.items[0]]
def test_comp(func):
# versus same index
result = func(p1, p2)
self.assert_numpy_array_equal(result.values,
func(p1.values, p2.values))
# versus non-indexed same objs
self.assertRaises(Exception, func, p1, tp)
# versus different objs
self.assertRaises(Exception, func, p1, df)
# versus scalar
result3 = func(self.panel, 0)
self.assert_numpy_array_equal(result3.values,
func(self.panel.values, 0))
test_comp(operator.eq)
test_comp(operator.ne)
test_comp(operator.lt)
test_comp(operator.gt)
test_comp(operator.ge)
test_comp(operator.le)
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
with tm.assertRaisesRegexp(TypeError,
"There must be an argument for each axis"):
self.panel.get_value('a')
def test_set_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
self.panel.set_value(item, mjr, mnr, 1.)
assert_almost_equal(self.panel[item][mnr][mjr], 1.)
# resize
res = self.panel.set_value('ItemE', 'foo', 'bar', 1.5)
tm.assertIsInstance(res, Panel)
self.assertIsNot(res, self.panel)
self.assertEqual(res.get_value('ItemE', 'foo', 'bar'), 1.5)
res3 = self.panel.set_value('ItemE', 'foobar', 'baz', 5)
self.assertTrue(com.is_float_dtype(res3['ItemE'].values))
with tm.assertRaisesRegexp(TypeError,
"There must be an argument for each axis"
" plus the value provided"):
self.panel.set_value('a')
_panel = tm.makePanel()
tm.add_nans(_panel)
class TestPanel(tm.TestCase, PanelTests, CheckIndexing, SafeForLongAndSparse,
SafeForSparse):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def setUp(self):
self.panel = _panel.copy()
self.panel.major_axis.name = None
self.panel.minor_axis.name = None
self.panel.items.name = None
def test_panel_warnings(self):
with tm.assert_produces_warning(FutureWarning):
shifted1 = self.panel.shift(lags=1)
with tm.assert_produces_warning(False):
shifted2 = self.panel.shift(periods=1)
tm.assert_panel_equal(shifted1, shifted2)
with tm.assert_produces_warning(False):
shifted3 = self.panel.shift()
tm.assert_panel_equal(shifted1, shifted3)
def test_constructor(self):
# with BlockManager
wp = Panel(self.panel._data)
self.assertIs(wp._data, self.panel._data)
wp = Panel(self.panel._data, copy=True)
self.assertIsNot(wp._data, self.panel._data)
assert_panel_equal(wp, self.panel)
# strings handled prop
wp = Panel([[['foo', 'foo', 'foo', ], ['foo', 'foo', 'foo']]])
self.assertEqual(wp.values.dtype, np.object_)
vals = self.panel.values
# no copy
wp = Panel(vals)
self.assertIs(wp.values, vals)
# copy
wp = Panel(vals, copy=True)
self.assertIsNot(wp.values, vals)
# GH #8285, test when scalar data is used to construct a Panel
# if dtype is not passed, it should be inferred
value_and_dtype = [(1, 'int64'), (3.14, 'float64'),
('foo', np.object_)]
for (val, dtype) in value_and_dtype:
wp = Panel(val, items=range(2), major_axis=range(3),
minor_axis=range(4))
vals = np.empty((2, 3, 4), dtype=dtype)
vals.fill(val)
assert_panel_equal(wp, Panel(vals, dtype=dtype))
# test the case when dtype is passed
wp = Panel(1, items=range(2), major_axis=range(3), minor_axis=range(4),
dtype='float32')
vals = np.empty((2, 3, 4), dtype='float32')
vals.fill(1)
assert_panel_equal(wp, Panel(vals, dtype='float32'))
def test_constructor_cast(self):
zero_filled = self.panel.fillna(0)
casted = Panel(zero_filled._data, dtype=int)
casted2 = Panel(zero_filled.values, dtype=int)
exp_values = zero_filled.values.astype(int)
assert_almost_equal(casted.values, exp_values)
assert_almost_equal(casted2.values, exp_values)
casted = Panel(zero_filled._data, dtype=np.int32)
casted2 = Panel(zero_filled.values, dtype=np.int32)
exp_values = zero_filled.values.astype(np.int32)
assert_almost_equal(casted.values, exp_values)
assert_almost_equal(casted2.values, exp_values)
# can't cast
data = [[['foo', 'bar', 'baz']]]
self.assertRaises(ValueError, Panel, data, dtype=float)
def test_constructor_empty_panel(self):
empty = Panel()
self.assertEqual(len(empty.items), 0)
self.assertEqual(len(empty.major_axis), 0)
self.assertEqual(len(empty.minor_axis), 0)
def test_constructor_observe_dtype(self):
# GH #411
panel = Panel(items=lrange(3), major_axis=lrange(3),
minor_axis=lrange(3), dtype='O')
self.assertEqual(panel.values.dtype, np.object_)
def test_constructor_dtypes(self):
# GH #797
def _check_dtype(panel, dtype):
for i in panel.items:
self.assertEqual(panel[i].values.dtype.name, dtype)
# only nan holding types allowed here
for dtype in ['float64', 'float32', 'object']:
panel = Panel(items=lrange(2), major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.array(np.random.randn(2, 10, 5), dtype=dtype),
items=lrange(2),
major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.array(np.random.randn(2, 10, 5), dtype='O'),
items=lrange(2),
major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.random.randn(2, 10, 5), items=lrange(
2), major_axis=lrange(10), minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
df1 = DataFrame(np.random.randn(2, 5),
index=lrange(2), columns=lrange(5))
df2 = DataFrame(np.random.randn(2, 5),
index=lrange(2), columns=lrange(5))
panel = Panel.from_dict({'a': df1, 'b': df2}, dtype=dtype)
_check_dtype(panel, dtype)
def test_constructor_fails_with_not_3d_input(self):
with tm.assertRaisesRegexp(ValueError,
"The number of dimensions required is 3"):
Panel(np.random.randn(10, 2))
def test_consolidate(self):
self.assertTrue(self.panel._data.is_consolidated())
self.panel['foo'] = 1.
self.assertFalse(self.panel._data.is_consolidated())
panel = self.panel.consolidate()
self.assertTrue(panel._data.is_consolidated())
def test_ctor_dict(self):
itema = self.panel['ItemA']
itemb = self.panel['ItemB']
d = {'A': itema, 'B': itemb[5:]}
d2 = {'A': itema._series, 'B': itemb[5:]._series}
d3 = {'A': None,
'B': DataFrame(itemb[5:]._series),
'C': DataFrame(itema._series)}
wp = Panel.from_dict(d)
wp2 = Panel.from_dict(d2) # nested Dict
# TODO: unused?
wp3 = Panel.from_dict(d3) # noqa
self.assertTrue(wp.major_axis.equals(self.panel.major_axis))
assert_panel_equal(wp, wp2)
# intersect
wp = Panel.from_dict(d, intersect=True)
self.assertTrue(wp.major_axis.equals(itemb.index[5:]))
# use constructor
assert_panel_equal(Panel(d), Panel.from_dict(d))
assert_panel_equal(Panel(d2), Panel.from_dict(d2))
assert_panel_equal(Panel(d3), Panel.from_dict(d3))
# a pathological case
d4 = {'A': None, 'B': None}
# TODO: unused?
wp4 = Panel.from_dict(d4) # noqa
assert_panel_equal(Panel(d4), Panel(items=['A', 'B']))
# cast
dcasted = dict((k, v.reindex(wp.major_axis).fillna(0))
for k, v in compat.iteritems(d))
result = Panel(dcasted, dtype=int)
expected = Panel(dict((k, v.astype(int))
for k, v in compat.iteritems(dcasted)))
assert_panel_equal(result, expected)
result = Panel(dcasted, dtype=np.int32)
expected = Panel(dict((k, v.astype(np.int32))
for k, v in compat.iteritems(dcasted)))
assert_panel_equal(result, expected)
def test_constructor_dict_mixed(self):
data = dict((k, v.values) for k, v in self.panel.iteritems())
result = Panel(data)
exp_major = Index(np.arange(len(self.panel.major_axis)))
self.assertTrue(result.major_axis.equals(exp_major))
result = Panel(data, items=self.panel.items,
major_axis=self.panel.major_axis,
minor_axis=self.panel.minor_axis)
assert_panel_equal(result, self.panel)
data['ItemC'] = self.panel['ItemC']
result = Panel(data)
assert_panel_equal(result, self.panel)
# corner, blow up
data['ItemB'] = data['ItemB'][:-1]
self.assertRaises(Exception, Panel, data)
data['ItemB'] = self.panel['ItemB'].values[:, :-1]
self.assertRaises(Exception, Panel, data)
def test_ctor_orderedDict(self):
keys = list(set(np.random.randint(0, 5000, 100)))[
:50] # unique random int keys
d = OrderedDict([(k, mkdf(10, 5)) for k in keys])
p = Panel(d)
self.assertTrue(list(p.items) == keys)
p = Panel.from_dict(d)
self.assertTrue(list(p.items) == keys)
def test_constructor_resize(self):
data = self.panel._data
items = self.panel.items[:-1]
major = self.panel.major_axis[:-1]
minor = self.panel.minor_axis[:-1]
result = Panel(data, items=items, major_axis=major, minor_axis=minor)
expected = self.panel.reindex(items=items, major=major, minor=minor)
assert_panel_equal(result, expected)
result = Panel(data, items=items, major_axis=major)
expected = self.panel.reindex(items=items, major=major)
assert_panel_equal(result, expected)
result = Panel(data, items=items)
expected = self.panel.reindex(items=items)
assert_panel_equal(result, expected)
result = Panel(data, minor_axis=minor)
expected = self.panel.reindex(minor=minor)
assert_panel_equal(result, expected)
def test_from_dict_mixed_orient(self):
df = tm.makeDataFrame()
df['foo'] = 'bar'
data = {'k1': df, 'k2': df}
panel = Panel.from_dict(data, orient='minor')
self.assertEqual(panel['foo'].values.dtype, np.object_)
self.assertEqual(panel['A'].values.dtype, np.float64)
def test_constructor_error_msgs(self):
def testit():
Panel(np.random.randn(3, 4, 5), lrange(4), lrange(5), lrange(5))
assertRaisesRegexp(ValueError,
"Shape of passed values is \(3, 4, 5\), "
"indices imply \(4, 5, 5\)",
testit)
def testit():
Panel(np.random.randn(3, 4, 5), lrange(5), lrange(4), lrange(5))
assertRaisesRegexp(ValueError,
"Shape of passed values is \(3, 4, 5\), "
"indices imply \(5, 4, 5\)",
testit)
def testit():
Panel(np.random.randn(3, 4, 5), lrange(5), lrange(5), lrange(4))
assertRaisesRegexp(ValueError,
"Shape of passed values is \(3, 4, 5\), "
"indices imply \(5, 5, 4\)",
testit)
def test_conform(self):
df = self.panel['ItemA'][:-5].filter(items=['A', 'B'])
conformed = self.panel.conform(df)
assert (conformed.index.equals(self.panel.major_axis))
assert (conformed.columns.equals(self.panel.minor_axis))
def test_convert_objects(self):
# GH 4937
p = Panel(dict(A=dict(a=['1', '1.0'])))
expected = Panel(dict(A=dict(a=[1, 1.0])))
result = p._convert(numeric=True, coerce=True)
assert_panel_equal(result, expected)
def test_dtypes(self):
result = self.panel.dtypes
expected = Series(np.dtype('float64'), index=self.panel.items)
assert_series_equal(result, expected)
def test_apply(self):
# GH1148
# ufunc
applied = self.panel.apply(np.sqrt)
self.assertTrue(assert_almost_equal(applied.values, np.sqrt(
self.panel.values)))
# ufunc same shape
result = self.panel.apply(lambda x: x * 2, axis='items')
expected = self.panel * 2
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2, axis='major_axis')
expected = self.panel * 2
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2, axis='minor_axis')
expected = self.panel * 2
assert_panel_equal(result, expected)
# reduction to DataFrame
result = self.panel.apply(lambda x: x.dtype, axis='items')
expected = DataFrame(np.dtype('float64'), index=self.panel.major_axis,
columns=self.panel.minor_axis)
assert_frame_equal(result, expected)
result = self.panel.apply(lambda x: x.dtype, axis='major_axis')
expected = DataFrame(np.dtype('float64'), index=self.panel.minor_axis,
columns=self.panel.items)
assert_frame_equal(result, expected)
result = self.panel.apply(lambda x: x.dtype, axis='minor_axis')
expected = DataFrame(np.dtype('float64'), index=self.panel.major_axis,
columns=self.panel.items)
assert_frame_equal(result, expected)
# reductions via other dims
expected = self.panel.sum(0)
result = self.panel.apply(lambda x: x.sum(), axis='items')
assert_frame_equal(result, expected)
expected = self.panel.sum(1)
result = self.panel.apply(lambda x: x.sum(), axis='major_axis')
assert_frame_equal(result, expected)
expected = self.panel.sum(2)
result = self.panel.apply(lambda x: x.sum(), axis='minor_axis')
assert_frame_equal(result, expected)
# pass kwargs
result = self.panel.apply(lambda x, y: x.sum() + y, axis='items', y=5)
expected = self.panel.sum(0) + 5
assert_frame_equal(result, expected)
def test_apply_slabs(self):
# same shape as original
result = self.panel.apply(lambda x: x * 2,
axis=['items', 'major_axis'])
expected = (self.panel * 2).transpose('minor_axis', 'major_axis',
'items')
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['major_axis', 'items'])
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['items', 'minor_axis'])
expected = (self.panel * 2).transpose('major_axis', 'minor_axis',
'items')
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['minor_axis', 'items'])
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['major_axis', 'minor_axis'])
expected = self.panel * 2
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['minor_axis', 'major_axis'])
assert_panel_equal(result, expected)
# reductions
result = self.panel.apply(lambda x: x.sum(0), axis=[
'items', 'major_axis'
])
expected = self.panel.sum(1).T
assert_frame_equal(result, expected)
result = self.panel.apply(lambda x: x.sum(1), axis=[
'items', 'major_axis'
])
expected = self.panel.sum(0)
assert_frame_equal(result, expected)
# transforms
f = lambda x: ((x.T - x.mean(1)) / x.std(1)).T
# make sure that we don't trigger any warnings
with tm.assert_produces_warning(False):
result = self.panel.apply(f, axis=['items', 'major_axis'])
expected = Panel(dict([(ax, f(self.panel.loc[:, :, ax]))
for ax in self.panel.minor_axis]))
assert_panel_equal(result, expected)
result = self.panel.apply(f, axis=['major_axis', 'minor_axis'])
expected = Panel(dict([(ax, f(self.panel.loc[ax]))
for ax in self.panel.items]))
assert_panel_equal(result, expected)
result = self.panel.apply(f, axis=['minor_axis', 'items'])
expected = Panel(dict([(ax, f(self.panel.loc[:, ax]))
for ax in self.panel.major_axis]))
assert_panel_equal(result, expected)
# with multi-indexes
# GH7469
index = MultiIndex.from_tuples([('one', 'a'), ('one', 'b'), (
'two', 'a'), ('two', 'b')])
dfa = DataFrame(np.array(np.arange(12, dtype='int64')).reshape(
4, 3), columns=list("ABC"), index=index)
dfb = DataFrame(np.array(np.arange(10, 22, dtype='int64')).reshape(
4, 3), columns=list("ABC"), index=index)
p = Panel({'f': dfa, 'g': dfb})
result = p.apply(lambda x: x.sum(), axis=0)
# on windows this will be in32
result = result.astype('int64')
expected = p.sum(0)
assert_frame_equal(result, expected)
def test_apply_no_or_zero_ndim(self):
# GH10332
self.panel = Panel(np.random.rand(5, 5, 5))
result_int = self.panel.apply(lambda df: 0, axis=[1, 2])
result_float = self.panel.apply(lambda df: 0.0, axis=[1, 2])
result_int64 = self.panel.apply(lambda df: np.int64(0), axis=[1, 2])
result_float64 = self.panel.apply(lambda df: np.float64(0.0),
axis=[1, 2])
expected_int = expected_int64 = Series([0] * 5)
expected_float = expected_float64 = Series([0.0] * 5)
assert_series_equal(result_int, expected_int)
assert_series_equal(result_int64, expected_int64)
assert_series_equal(result_float, expected_float)
assert_series_equal(result_float64, expected_float64)
def test_reindex(self):
ref = self.panel['ItemB']
# items
result = self.panel.reindex(items=['ItemA', 'ItemB'])
assert_frame_equal(result['ItemB'], ref)
# major
new_major = list(self.panel.major_axis[:10])
result = self.panel.reindex(major=new_major)
assert_frame_equal(result['ItemB'], ref.reindex(index=new_major))
# raise exception put both major and major_axis
self.assertRaises(Exception, self.panel.reindex, major_axis=new_major,
major=new_major)
# minor
new_minor = list(self.panel.minor_axis[:2])
result = self.panel.reindex(minor=new_minor)
assert_frame_equal(result['ItemB'], ref.reindex(columns=new_minor))
# this ok
result = self.panel.reindex()
assert_panel_equal(result, self.panel)
self.assertFalse(result is self.panel)
# with filling
smaller_major = self.panel.major_axis[::5]
smaller = self.panel.reindex(major=smaller_major)
larger = smaller.reindex(major=self.panel.major_axis, method='pad')
assert_frame_equal(larger.major_xs(self.panel.major_axis[1]),
smaller.major_xs(smaller_major[0]))
# don't necessarily copy
result = self.panel.reindex(major=self.panel.major_axis, copy=False)
assert_panel_equal(result, self.panel)
self.assertTrue(result is self.panel)
def test_reindex_multi(self):
# with and without copy full reindexing
result = self.panel.reindex(items=self.panel.items,
major=self.panel.major_axis,
minor=self.panel.minor_axis, copy=False)
self.assertIs(result.items, self.panel.items)
self.assertIs(result.major_axis, self.panel.major_axis)
self.assertIs(result.minor_axis, self.panel.minor_axis)
result = self.panel.reindex(items=self.panel.items,
major=self.panel.major_axis,
minor=self.panel.minor_axis, copy=False)
assert_panel_equal(result, self.panel)
# multi-axis indexing consistency
# GH 5900
df = DataFrame(np.random.randn(4, 3))
p = Panel({'Item1': df})
expected = Panel({'Item1': df})
expected['Item2'] = np.nan
items = ['Item1', 'Item2']
major_axis = np.arange(4)
minor_axis = np.arange(3)
results = []
results.append(p.reindex(items=items, major_axis=major_axis,
copy=True))
results.append(p.reindex(items=items, major_axis=major_axis,
copy=False))
results.append(p.reindex(items=items, minor_axis=minor_axis,
copy=True))
results.append(p.reindex(items=items, minor_axis=minor_axis,
copy=False))
results.append(p.reindex(items=items, major_axis=major_axis,
minor_axis=minor_axis, copy=True))
results.append(p.reindex(items=items, major_axis=major_axis,
minor_axis=minor_axis, copy=False))
for i, r in enumerate(results):
assert_panel_equal(expected, r)
def test_reindex_like(self):
# reindex_like
smaller = self.panel.reindex(items=self.panel.items[:-1],
major=self.panel.major_axis[:-1],
minor=self.panel.minor_axis[:-1])
smaller_like = self.panel.reindex_like(smaller)
assert_panel_equal(smaller, smaller_like)
def test_take(self):
# axis == 0
result = self.panel.take([2, 0, 1], axis=0)
expected = self.panel.reindex(items=['ItemC', 'ItemA', 'ItemB'])
assert_panel_equal(result, expected)
# axis >= 1
result = self.panel.take([3, 0, 1, 2], axis=2)
expected = self.panel.reindex(minor=['D', 'A', 'B', 'C'])
assert_panel_equal(result, expected)
# neg indicies ok
expected = self.panel.reindex(minor=['D', 'D', 'B', 'C'])
result = self.panel.take([3, -1, 1, 2], axis=2)
assert_panel_equal(result, expected)
self.assertRaises(Exception, self.panel.take, [4, 0, 1, 2], axis=2)
def test_sort_index(self):
import random
ritems = list(self.panel.items)
rmajor = list(self.panel.major_axis)
rminor = list(self.panel.minor_axis)
random.shuffle(ritems)
random.shuffle(rmajor)
random.shuffle(rminor)
random_order = self.panel.reindex(items=ritems)
sorted_panel = random_order.sort_index(axis=0)
assert_panel_equal(sorted_panel, self.panel)
# descending
random_order = self.panel.reindex(items=ritems)
sorted_panel = random_order.sort_index(axis=0, ascending=False)
assert_panel_equal(sorted_panel,
self.panel.reindex(items=self.panel.items[::-1]))
random_order = self.panel.reindex(major=rmajor)
sorted_panel = random_order.sort_index(axis=1)
assert_panel_equal(sorted_panel, self.panel)
random_order = self.panel.reindex(minor=rminor)
sorted_panel = random_order.sort_index(axis=2)
assert_panel_equal(sorted_panel, self.panel)
def test_fillna(self):
filled = self.panel.fillna(0)
self.assertTrue(np.isfinite(filled.values).all())
filled = self.panel.fillna(method='backfill')
assert_frame_equal(filled['ItemA'],
self.panel['ItemA'].fillna(method='backfill'))
panel = self.panel.copy()
panel['str'] = 'foo'
filled = panel.fillna(method='backfill')
assert_frame_equal(filled['ItemA'],
panel['ItemA'].fillna(method='backfill'))
empty = self.panel.reindex(items=[])
filled = empty.fillna(0)
assert_panel_equal(filled, empty)
self.assertRaises(ValueError, self.panel.fillna)
self.assertRaises(ValueError, self.panel.fillna, 5, method='ffill')
self.assertRaises(TypeError, self.panel.fillna, [1, 2])
self.assertRaises(TypeError, self.panel.fillna, (1, 2))
# limit not implemented when only value is specified
p = Panel(np.random.randn(3, 4, 5))
p.iloc[0:2, 0:2, 0:2] = np.nan
self.assertRaises(NotImplementedError, lambda: p.fillna(999, limit=1))
def test_ffill_bfill(self):
assert_panel_equal(self.panel.ffill(),
self.panel.fillna(method='ffill'))
assert_panel_equal(self.panel.bfill(),
self.panel.fillna(method='bfill'))
def test_truncate_fillna_bug(self):
# #1823
result = self.panel.truncate(before=None, after=None, axis='items')
# it works!
result.fillna(value=0.0)
def test_swapaxes(self):
result = self.panel.swapaxes('items', 'minor')
self.assertIs(result.items, self.panel.minor_axis)
result = self.panel.swapaxes('items', 'major')
self.assertIs(result.items, self.panel.major_axis)
result = self.panel.swapaxes('major', 'minor')
self.assertIs(result.major_axis, self.panel.minor_axis)
panel = self.panel.copy()
result = panel.swapaxes('major', 'minor')
panel.values[0, 0, 1] = np.nan
expected = panel.swapaxes('major', 'minor')
assert_panel_equal(result, expected)
# this should also work
result = self.panel.swapaxes(0, 1)
self.assertIs(result.items, self.panel.major_axis)
# this works, but return a copy
result = self.panel.swapaxes('items', 'items')
assert_panel_equal(self.panel, result)
self.assertNotEqual(id(self.panel), id(result))
def test_transpose(self):
result = self.panel.transpose('minor', 'major', 'items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
# test kwargs
result = self.panel.transpose(items='minor', major='major',
minor='items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
# text mixture of args
result = self.panel.transpose('minor', major='major', minor='items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
result = self.panel.transpose('minor', 'major', minor='items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
# duplicate axes
with tm.assertRaisesRegexp(TypeError,
'not enough/duplicate arguments'):
self.panel.transpose('minor', maj='major', minor='items')
with tm.assertRaisesRegexp(ValueError, 'repeated axis in transpose'):
self.panel.transpose('minor', 'major', major='minor',
minor='items')
result = self.panel.transpose(2, 1, 0)
assert_panel_equal(result, expected)
result = self.panel.transpose('minor', 'items', 'major')
expected = self.panel.swapaxes('items', 'minor')
expected = expected.swapaxes('major', 'minor')
assert_panel_equal(result, expected)
result = self.panel.transpose(2, 0, 1)
assert_panel_equal(result, expected)
self.assertRaises(ValueError, self.panel.transpose, 0, 0, 1)
def test_transpose_copy(self):
panel = self.panel.copy()
result = panel.transpose(2, 0, 1, copy=True)
expected = panel.swapaxes('items', 'minor')
expected = expected.swapaxes('major', 'minor')
assert_panel_equal(result, expected)
panel.values[0, 1, 1] = np.nan
self.assertTrue(notnull(result.values[1, 0, 1]))
@ignore_sparse_panel_future_warning
def test_to_frame(self):
# filtered
filtered = self.panel.to_frame()
expected = self.panel.to_frame().dropna(how='any')
assert_frame_equal(filtered, expected)
# unfiltered
unfiltered = self.panel.to_frame(filter_observations=False)
assert_panel_equal(unfiltered.to_panel(), self.panel)
# names
self.assertEqual(unfiltered.index.names, ('major', 'minor'))
# unsorted, round trip
df = self.panel.to_frame(filter_observations=False)
unsorted = df.take(np.random.permutation(len(df)))
pan = unsorted.to_panel()
assert_panel_equal(pan, self.panel)
# preserve original index names
df = DataFrame(np.random.randn(6, 2),
index=[['a', 'a', 'b', 'b', 'c', 'c'],
[0, 1, 0, 1, 0, 1]],
columns=['one', 'two'])
df.index.names = ['foo', 'bar']
df.columns.name = 'baz'
rdf = df.to_panel().to_frame()
self.assertEqual(rdf.index.names, df.index.names)
self.assertEqual(rdf.columns.names, df.columns.names)
def test_to_frame_mixed(self):
panel = self.panel.fillna(0)
panel['str'] = 'foo'
panel['bool'] = panel['ItemA'] > 0
lp = panel.to_frame()
wp = lp.to_panel()
self.assertEqual(wp['bool'].values.dtype, np.bool_)
# Previously, this was mutating the underlying index and changing its
# name
assert_frame_equal(wp['bool'], panel['bool'], check_names=False)
# GH 8704
# with categorical
df = panel.to_frame()
df['category'] = df['str'].astype('category')
# to_panel
# TODO: this converts back to object
p = df.to_panel()
expected = panel.copy()
expected['category'] = 'foo'
assert_panel_equal(p, expected)
def test_to_frame_multi_major(self):
idx = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (
2, 'two')])
df = DataFrame([[1, 'a', 1], [2, 'b', 1], [3, 'c', 1], [4, 'd', 1]],
columns=['A', 'B', 'C'], index=idx)
wp = Panel({'i1': df, 'i2': df})
expected_idx = MultiIndex.from_tuples(
[
(1, 'one', 'A'), (1, 'one', 'B'),
(1, 'one', 'C'), (1, 'two', 'A'),
(1, 'two', 'B'), (1, 'two', 'C'),
(2, 'one', 'A'), (2, 'one', 'B'),
(2, 'one', 'C'), (2, 'two', 'A'),
(2, 'two', 'B'), (2, 'two', 'C')
],
names=[None, None, 'minor'])
expected = DataFrame({'i1': [1, 'a', 1, 2, 'b', 1, 3,
'c', 1, 4, 'd', 1],
'i2': [1, 'a', 1, 2, 'b',
1, 3, 'c', 1, 4, 'd', 1]},
index=expected_idx)
result = wp.to_frame()
assert_frame_equal(result, expected)
wp.iloc[0, 0].iloc[0] = np.nan # BUG on setting. GH #5773
result = wp.to_frame()
assert_frame_equal(result, expected[1:])
idx = MultiIndex.from_tuples([(1, 'two'), (1, 'one'), (2, 'one'), (
np.nan, 'two')])
df = DataFrame([[1, 'a', 1], [2, 'b', 1], [3, 'c', 1], [4, 'd', 1]],
columns=['A', 'B', 'C'], index=idx)
wp = Panel({'i1': df, 'i2': df})
ex_idx = MultiIndex.from_tuples([(1, 'two', 'A'), (1, 'two', 'B'),
(1, 'two', 'C'),
(1, 'one', 'A'),
(1, 'one', 'B'),
(1, 'one', 'C'),
(2, 'one', 'A'),
(2, 'one', 'B'),
(2, 'one', 'C'),
(np.nan, 'two', 'A'),
(np.nan, 'two', 'B'),
(np.nan, 'two', 'C')],
names=[None, None, 'minor'])
expected.index = ex_idx
result = wp.to_frame()
assert_frame_equal(result, expected)
def test_to_frame_multi_major_minor(self):
cols = MultiIndex(levels=[['C_A', 'C_B'], ['C_1', 'C_2']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]])
idx = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (
2, 'two'), (3, 'three'), (4, 'four')])
df = DataFrame([[1, 2, 11, 12], [3, 4, 13, 14],
['a', 'b', 'w', 'x'],
['c', 'd', 'y', 'z'], [-1, -2, -3, -4],
[-5, -6, -7, -8]], columns=cols, index=idx)
wp = Panel({'i1': df, 'i2': df})
exp_idx = MultiIndex.from_tuples(
[(1, 'one', 'C_A', 'C_1'), (1, 'one', 'C_A', 'C_2'),
(1, 'one', 'C_B', 'C_1'), (1, 'one', 'C_B', 'C_2'),
(1, 'two', 'C_A', 'C_1'), (1, 'two', 'C_A', 'C_2'),
(1, 'two', 'C_B', 'C_1'), (1, 'two', 'C_B', 'C_2'),
(2, 'one', 'C_A', 'C_1'), (2, 'one', 'C_A', 'C_2'),
(2, 'one', 'C_B', 'C_1'), (2, 'one', 'C_B', 'C_2'),
(2, 'two', 'C_A', 'C_1'), (2, 'two', 'C_A', 'C_2'),
(2, 'two', 'C_B', 'C_1'), (2, 'two', 'C_B', 'C_2'),
(3, 'three', 'C_A', 'C_1'), (3, 'three', 'C_A', 'C_2'),
(3, 'three', 'C_B', 'C_1'), (3, 'three', 'C_B', 'C_2'),
(4, 'four', 'C_A', 'C_1'), (4, 'four', 'C_A', 'C_2'),
(4, 'four', 'C_B', 'C_1'), (4, 'four', 'C_B', 'C_2')],
names=[None, None, None, None])
exp_val = [[1, 1], [2, 2], [11, 11], [12, 12], [3, 3], [4, 4],
[13, 13], [14, 14], ['a', 'a'], ['b', 'b'], ['w', 'w'],
['x', 'x'], ['c', 'c'], ['d', 'd'], ['y', 'y'], ['z', 'z'],
[-1, -1], [-2, -2], [-3, -3], [-4, -4], [-5, -5], [-6, -6],
[-7, -7], [-8, -8]]
result = wp.to_frame()
expected = DataFrame(exp_val, columns=['i1', 'i2'], index=exp_idx)
assert_frame_equal(result, expected)
def test_to_frame_multi_drop_level(self):
idx = MultiIndex.from_tuples([(1, 'one'), (2, 'one'), (2, 'two')])
df = DataFrame({'A': [np.nan, 1, 2]}, index=idx)
wp = Panel({'i1': df, 'i2': df})
result = wp.to_frame()
exp_idx = MultiIndex.from_tuples([(2, 'one', 'A'), (2, 'two', 'A')],
names=[None, None, 'minor'])
expected = DataFrame({'i1': [1., 2], 'i2': [1., 2]}, index=exp_idx)
assert_frame_equal(result, expected)
def test_to_panel_na_handling(self):
df = DataFrame(np.random.randint(0, 10, size=20).reshape((10, 2)),
index=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1],
[0, 1, 2, 3, 4, 5, 2, 3, 4, 5]])
panel = df.to_panel()
self.assertTrue(isnull(panel[0].ix[1, [0, 1]]).all())
def test_to_panel_duplicates(self):
# #2441
df = DataFrame({'a': [0, 0, 1], 'b': [1, 1, 1], 'c': [1, 2, 3]})
idf = df.set_index(['a', 'b'])
assertRaisesRegexp(ValueError, 'non-uniquely indexed', idf.to_panel)
def test_panel_dups(self):
# GH 4960
# duplicates in an index
# items
data = np.random.randn(5, 100, 5)
no_dup_panel = Panel(data, items=list("ABCDE"))
panel = Panel(data, items=list("AACDE"))
expected = no_dup_panel['A']
result = panel.iloc[0]
assert_frame_equal(result, expected)
expected = no_dup_panel['E']
result = panel.loc['E']
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[['A', 'B']]
expected.items = ['A', 'A']
result = panel.loc['A']
assert_panel_equal(result, expected)
# major
data = np.random.randn(5, 5, 5)
no_dup_panel = Panel(data, major_axis=list("ABCDE"))
panel = Panel(data, major_axis=list("AACDE"))
expected = no_dup_panel.loc[:, 'A']
result = panel.iloc[:, 0]
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:, 'E']
result = panel.loc[:, 'E']
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:, ['A', 'B']]
expected.major_axis = ['A', 'A']
result = panel.loc[:, 'A']
assert_panel_equal(result, expected)
# minor
data = np.random.randn(5, 100, 5)
no_dup_panel = Panel(data, minor_axis=list("ABCDE"))
panel = Panel(data, minor_axis=list("AACDE"))
expected = no_dup_panel.loc[:, :, 'A']
result = panel.iloc[:, :, 0]
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:, :, 'E']
result = panel.loc[:, :, 'E']
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:, :, ['A', 'B']]
expected.minor_axis = ['A', 'A']
result = panel.loc[:, :, 'A']
assert_panel_equal(result, expected)
def test_filter(self):
pass
def test_compound(self):
compounded = self.panel.compound()
assert_series_equal(compounded['ItemA'],
(1 + self.panel['ItemA']).product(0) - 1,
check_names=False)
def test_shift(self):
# major
idx = self.panel.major_axis[0]
idx_lag = self.panel.major_axis[1]
shifted = self.panel.shift(1)
assert_frame_equal(self.panel.major_xs(idx), shifted.major_xs(idx_lag))
# minor
idx = self.panel.minor_axis[0]
idx_lag = self.panel.minor_axis[1]
shifted = self.panel.shift(1, axis='minor')
assert_frame_equal(self.panel.minor_xs(idx), shifted.minor_xs(idx_lag))
# items
idx = self.panel.items[0]
idx_lag = self.panel.items[1]
shifted = self.panel.shift(1, axis='items')
assert_frame_equal(self.panel[idx], shifted[idx_lag])
# negative numbers, #2164
result = self.panel.shift(-1)
expected = Panel(dict((i, f.shift(-1)[:-1])
for i, f in self.panel.iteritems()))
assert_panel_equal(result, expected)
# mixed dtypes #6959
data = [('item ' + ch, makeMixedDataFrame()) for ch in list('abcde')]
data = dict(data)
mixed_panel = Panel.from_dict(data, orient='minor')
shifted = mixed_panel.shift(1)
assert_series_equal(mixed_panel.dtypes, shifted.dtypes)
def test_tshift(self):
# PeriodIndex
ps = tm.makePeriodPanel()
shifted = ps.tshift(1)
unshifted = shifted.tshift(-1)
assert_panel_equal(unshifted, ps)
shifted2 = ps.tshift(freq='B')
assert_panel_equal(shifted, shifted2)
shifted3 = ps.tshift(freq=bday)
assert_panel_equal(shifted, shifted3)
assertRaisesRegexp(ValueError, 'does not match', ps.tshift, freq='M')
# DatetimeIndex
panel = _panel
shifted = panel.tshift(1)
unshifted = shifted.tshift(-1)
assert_panel_equal(panel, unshifted)
shifted2 = panel.tshift(freq=panel.major_axis.freq)
assert_panel_equal(shifted, shifted2)
inferred_ts = Panel(panel.values, items=panel.items,
major_axis=Index(np.asarray(panel.major_axis)),
minor_axis=panel.minor_axis)
shifted = inferred_ts.tshift(1)
unshifted = shifted.tshift(-1)
assert_panel_equal(shifted, panel.tshift(1))
assert_panel_equal(unshifted, inferred_ts)
no_freq = panel.ix[:, [0, 5, 7], :]
self.assertRaises(ValueError, no_freq.tshift)
def test_pct_change(self):
df1 = DataFrame({'c1': [1, 2, 5], 'c2': [3, 4, 6]})
df2 = df1 + 1
df3 = DataFrame({'c1': [3, 4, 7], 'c2': [5, 6, 8]})
wp = Panel({'i1': df1, 'i2': df2, 'i3': df3})
# major, 1
result = wp.pct_change() # axis='major'
expected = Panel({'i1': df1.pct_change(),
'i2': df2.pct_change(),
'i3': df3.pct_change()})
assert_panel_equal(result, expected)
result = wp.pct_change(axis=1)
assert_panel_equal(result, expected)
# major, 2
result = wp.pct_change(periods=2)
expected = Panel({'i1': df1.pct_change(2),
'i2': df2.pct_change(2),
'i3': df3.pct_change(2)})
assert_panel_equal(result, expected)
# minor, 1
result = wp.pct_change(axis='minor')
expected = Panel({'i1': df1.pct_change(axis=1),
'i2': df2.pct_change(axis=1),
'i3': df3.pct_change(axis=1)})
assert_panel_equal(result, expected)
result = wp.pct_change(axis=2)
| assert_panel_equal(result, expected) | pandas.util.testing.assert_panel_equal |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import (
datetime,
time,
timedelta,
)
from itertools import (
product,
starmap,
)
import operator
import warnings
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.offsets import shift_months
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
DateOffset,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
TimedeltaArray,
)
from pandas.core.ops import roperator
from pandas.tests.arithmetic.common import (
assert_cannot_add,
assert_invalid_addsub_type,
assert_invalid_comparison,
get_upcast_box,
)
# ------------------------------------------------------------------
# Comparisons
class TestDatetime64ArrayLikeComparisons:
# Comparison tests for datetime64 vectors fully parametrized over
# DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
# Test comparison with zero-dimensional array is unboxed
tz = tz_naive_fixture
box = box_with_array
dti = date_range("20130101", periods=3, tz=tz)
other = np.array(dti.to_numpy()[0])
dtarr = tm.box_expected(dti, box)
xbox = get_upcast_box(dtarr, other, True)
result = dtarr <= other
expected = np.array([True, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
"foo",
-1,
99,
4.0,
object(),
timedelta(days=2),
# GH#19800, GH#19301 datetime.date comparison raises to
# match DatetimeIndex/Timestamp. This also matches the behavior
# of stdlib datetime.datetime
datetime(2001, 1, 1).date(),
# GH#19301 None and NaN are *not* cast to NaT for comparisons
None,
np.nan,
],
)
def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):
# GH#22074, GH#15966
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
dtarr = tm.box_expected(rng, box_with_array)
assert_invalid_comparison(dtarr, other, box_with_array)
@pytest.mark.parametrize(
"other",
[
# GH#4968 invalid date/int comparisons
list(range(10)),
np.arange(10),
np.arange(10).astype(np.float32),
np.arange(10).astype(object),
pd.timedelta_range("1ns", periods=10).array,
np.array(pd.timedelta_range("1ns", periods=10)),
list(pd.timedelta_range("1ns", periods=10)),
pd.timedelta_range("1 Day", periods=10).astype(object),
pd.period_range("1971-01-01", freq="D", periods=10).array,
pd.period_range("1971-01-01", freq="D", periods=10).astype(object),
],
)
def test_dt64arr_cmp_arraylike_invalid(
self, other, tz_naive_fixture, box_with_array
):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data
obj = tm.box_expected(dta, box_with_array)
assert_invalid_comparison(obj, other, box_with_array)
def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="h", periods=5, tz=tz)._data
other = np.array([0, 1, 2, dta[3], Timedelta(days=1)])
result = dta == other
expected = np.array([False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = dta != other
tm.assert_numpy_array_equal(result, ~expected)
msg = "Invalid comparison between|Cannot compare type|not supported between"
with pytest.raises(TypeError, match=msg):
dta < other
with pytest.raises(TypeError, match=msg):
dta > other
with pytest.raises(TypeError, match=msg):
dta <= other
with pytest.raises(TypeError, match=msg):
dta >= other
def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
# GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
tz = tz_naive_fixture
box = box_with_array
ts = Timestamp("2021-01-01", tz=tz)
ser = Series([ts, NaT])
obj = tm.box_expected(ser, box)
xbox = get_upcast_box(obj, ts, True)
expected = Series([True, False], dtype=np.bool_)
expected = tm.box_expected(expected, xbox)
result = obj == ts
tm.assert_equal(result, expected)
class TestDatetime64SeriesComparison:
# TODO: moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize(
"pair",
[
(
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[NaT, NaT, Timestamp("2011-01-03")],
),
(
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[NaT, NaT, Timedelta("3 days")],
),
(
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
[NaT, NaT, Period("2011-03", freq="M")],
),
],
)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("dtype", [None, object])
@pytest.mark.parametrize(
"op, expected",
[
(operator.eq, Series([False, False, True])),
(operator.ne, Series([True, True, False])),
(operator.lt, Series([False, False, False])),
(operator.gt, Series([False, False, False])),
(operator.ge, Series([False, False, True])),
(operator.le, Series([False, False, True])),
],
)
def test_nat_comparisons(
self,
dtype,
index_or_series,
reverse,
pair,
op,
expected,
):
box = index_or_series
l, r = pair
if reverse:
# add lhs / rhs switched data
l, r = r, l
left = Series(l, dtype=dtype)
right = box(r, dtype=dtype)
result = op(left, right)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
],
)
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons_scalar(self, dtype, data, box_with_array):
box = box_with_array
left = Series(data, dtype=dtype)
left = tm.box_expected(left, box)
xbox = get_upcast_box(left, NaT, True)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left == NaT, expected)
tm.assert_equal(NaT == left, expected)
expected = [True, True, True]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left != NaT, expected)
tm.assert_equal(NaT != left, expected)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left < NaT, expected)
tm.assert_equal(NaT > left, expected)
tm.assert_equal(left <= NaT, expected)
tm.assert_equal(NaT >= left, expected)
tm.assert_equal(left > NaT, expected)
tm.assert_equal(NaT < left, expected)
tm.assert_equal(left >= NaT, expected)
tm.assert_equal(NaT <= left, expected)
@pytest.mark.parametrize("val", [datetime(2000, 1, 4), datetime(2000, 1, 5)])
def test_series_comparison_scalars(self, val):
series = Series(date_range("1/1/2000", periods=10))
result = series > val
expected = Series([x > val for x in series])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"left,right", [("lt", "gt"), ("le", "ge"), ("eq", "eq"), ("ne", "ne")]
)
def test_timestamp_compare_series(self, left, right):
# see gh-4982
# Make sure we can compare Timestamps on the right AND left hand side.
ser = Series(date_range("20010101", periods=10), name="dates")
s_nat = ser.copy(deep=True)
ser[0] = Timestamp("nat")
ser[3] = Timestamp("nat")
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# No NaT
expected = left_f(ser, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), ser)
tm.assert_series_equal(result, expected)
# NaT
expected = left_f(ser, Timestamp("nat"))
result = right_f(Timestamp("nat"), ser)
tm.assert_series_equal(result, expected)
# Compare to Timestamp with series containing NaT
expected = left_f(s_nat, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), s_nat)
tm.assert_series_equal(result, expected)
# Compare to NaT with series containing NaT
expected = left_f(s_nat, NaT)
result = right_f(NaT, s_nat)
tm.assert_series_equal(result, expected)
def test_dt64arr_timestamp_equality(self, box_with_array):
# GH#11034
ser = Series([Timestamp("2000-01-29 01:59:00"), Timestamp("2000-01-30"), NaT])
ser = tm.box_expected(ser, box_with_array)
xbox = get_upcast_box(ser, ser, True)
result = ser != ser
expected = tm.box_expected([False, False, True], xbox)
tm.assert_equal(result, expected)
warn = FutureWarning if box_with_array is pd.DataFrame else None
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[0]
expected = tm.box_expected([False, True, True], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[2]
expected = tm.box_expected([True, True, True], xbox)
tm.assert_equal(result, expected)
result = ser == ser
expected = tm.box_expected([True, True, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[0]
expected = tm.box_expected([True, False, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[2]
expected = tm.box_expected([False, False, False], xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"datetimelike",
[
Timestamp("20130101"),
datetime(2013, 1, 1),
np.datetime64("2013-01-01T00:00", "ns"),
],
)
@pytest.mark.parametrize(
"op,expected",
[
(operator.lt, [True, False, False, False]),
(operator.le, [True, True, False, False]),
(operator.eq, [False, True, False, False]),
(operator.gt, [False, False, False, True]),
],
)
def test_dt64_compare_datetime_scalar(self, datetimelike, op, expected):
# GH#17965, test for ability to compare datetime64[ns] columns
# to datetimelike
ser = Series(
[
Timestamp("20120101"),
Timestamp("20130101"),
np.nan,
Timestamp("20130103"),
],
name="A",
)
result = op(ser, datetimelike)
expected = Series(expected, name="A")
tm.assert_series_equal(result, expected)
class TestDatetimeIndexComparisons:
# TODO: moved from tests.indexes.test_base; parametrize and de-duplicate
def test_comparators(self, comparison_op):
index = tm.makeDateIndex(100)
element = index[len(index) // 2]
element = Timestamp(element).to_datetime64()
arr = np.array(index)
arr_result = comparison_op(arr, element)
index_result = comparison_op(index, element)
assert isinstance(index_result, np.ndarray)
tm.assert_numpy_array_equal(arr_result, index_result)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
def test_dti_cmp_datetimelike(self, other, tz_naive_fixture):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
if tz is not None:
if isinstance(other, np.datetime64):
# no tzaware version available
return
other = localize_pydatetime(other, dti.tzinfo)
result = dti == other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = dti > other
expected = np.array([False, True])
tm.assert_numpy_array_equal(result, expected)
result = dti >= other
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
result = dti < other
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
result = dti <= other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, object])
def test_dti_cmp_nat(self, dtype, box_with_array):
left = DatetimeIndex([Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")])
right = DatetimeIndex([NaT, NaT, Timestamp("2011-01-03")])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
xbox = get_upcast_box(left, right, True)
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = lhs != rhs
expected = np.array([True, True, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs == NaT, expected)
tm.assert_equal(NaT == rhs, expected)
expected = np.array([True, True, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs != NaT, expected)
tm.assert_equal(NaT != lhs, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs < NaT, expected)
tm.assert_equal(NaT > lhs, expected)
def test_dti_cmp_nat_behaves_like_float_cmp_nan(self):
fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
didx1 = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
didx2 = DatetimeIndex(
["2014-02-01", "2014-03-01", NaT, NaT, "2014-06-01", "2014-07-01"]
)
darr = np.array(
[
np.datetime64("2014-02-01 00:00"),
np.datetime64("2014-03-01 00:00"),
np.datetime64("nat"),
np.datetime64("nat"),
np.datetime64("2014-06-01 00:00"),
np.datetime64("2014-07-01 00:00"),
]
)
cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, np.nan), (didx1, NaT)]:
result = idx1 < val
expected = np.array([False, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, True, True, True, True])
tm.assert_numpy_array_equal(result, expected)
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
result = idx1 < val
expected = np.array([True, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
expected = np.array([False, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
expected = np.array([False, False, True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
expected = np.array([False, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, False, True, True, True])
tm.assert_numpy_array_equal(result, expected)
def test_comparison_tzawareness_compat(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
box = box_with_array
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box)
dz = | tm.box_expected(dz, box) | pandas._testing.box_expected |
from IMLearn import BaseEstimator
from challenge.agoda_cancellation_estimator import AgodaCancellationEstimator
# from IMLearn.utils import split_train_test
import sklearn
import numpy as np
import pandas as pd
def _to_date_number(features: pd.DataFrame, keys: list) -> None:
for field in keys:
features[field] = pd.to_datetime(features[field])
features[field] = features[field].apply(lambda x: x.value)
def _to_day_of_week(features, full_data, keys):
for field in keys:
new_key = field + "_dayofweek"
features[new_key] = pd.to_datetime(full_data[field])
features[new_key] = features[new_key].apply(lambda x: x.dayofweek)
def _add_new_cols(features, full_data):
_to_day_of_week(features, full_data, ["checkin_date", "checkout_date", "booking_datetime"])
features['stay_days'] = (pd.to_datetime(full_data['checkout_date'])
- pd.to_datetime(full_data['checkin_date']))
features['stay_days'] = features['stay_days'].apply(lambda x: x.days)
features['days_till_vacation'] = (pd.to_datetime(full_data['checkin_date'])
- | pd.to_datetime(full_data['booking_datetime']) | pandas.to_datetime |
import pandas as pd
def filtering_ercc_table(ref_feature_count_table, min_row_sum,
filtered_ref_feature_count_table):
ref_feature_count_table_df = pd.read_table(ref_feature_count_table, sep='\t')
table_filtered_ercc = create_a_new_table_filtered_ercc(ref_feature_count_table_df)
colum_with_libraries = _extract_gene_matrix(table_filtered_ercc)
table_filtered_min_row_sum = min_row_sum_ercc_table(table_filtered_ercc, colum_with_libraries,
min_row_sum)
table_filtered_min_row_sum.to_csv(filtered_ref_feature_count_table, sep='\t', index=None)
def _extract_gene_matrix(feature_count_table_df):
gene_column = feature_count_table_df[list(filter(
lambda col: col.startswith("Libraries"), feature_count_table_df.columns))]
return gene_column
def create_a_new_table_filtered_ercc(alignment_table):
series = []
for index, row in alignment_table.iterrows():
selection = row['Libraries']
if selection.startswith('ERCC') & ('No. of aligned reads' in selection):
series.append(row)
filtered_table = pd.DataFrame(series)
return filtered_table
def min_row_sum_ercc_table(table_filtered_ercc, colum_with_libraries, min_row_sum):
gene_table_final = []
summed_values = table_filtered_ercc.sum(axis=1)
combined_df = | pd.concat([colum_with_libraries, summed_values], axis=1) | pandas.concat |
'''
pyjade
A program to export, curate, and transform data from the MySQL database used by the Jane Addams Digital Edition.
'''
import os
import re
import sys
import json
import string
import datetime
import mysql.connector
from diskcache import Cache
import pandas as pd
import numpy as np
from bs4 import BeautifulSoup
from tqdm import tqdm
from safeprint import print
'''
Options
'''
try: # Options file setup credit <NAME>
with open(os.path.join('options.json')) as env_file:
ENV = json.loads(env_file.read())
except:
print('"Options.json" not found; please add "options.json" to the current directory.')
'''
SQL Connection
'''
DB = mysql.connector.connect(
host=ENV['SQL']['HOST'],
user=ENV['SQL']['USER'],
passwd=ENV['SQL']['PASSWORD'],
database=ENV['SQL']['DATABASE']
)
CUR = DB.cursor(buffered=True)
'''
Setup
'''
BEGIN = datetime.datetime.now()
TS = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
ITEM_ELEMENTS = ENV['ELEMENT_DICTIONARY']['DCTERMS_IN_USE']
ITEM_ELEMENTS.update(ENV['ELEMENT_DICTIONARY']['DESC_JADE_ELEMENTS'])
TYPES = ENV['ELEMENT_DICTIONARY']['TYPES']
OUT_DIR = 'outputs/'
if not os.path.exists(OUT_DIR):
os.makedirs(OUT_DIR)
DATASET_OPTIONS = ENV['DATASET_OPTIONS']
CRUMBS = DATASET_OPTIONS['EXPORT_SEPARATE_SQL_CRUMBS']
PROP_SET_LIST = DATASET_OPTIONS['PROPERTIES_TO_INCLUDE_FOR_EACH_TYPE']
INCLUDE_PROPS = DATASET_OPTIONS['PROPERTIES_TO_INCLUDE_FOR_EACH_TYPE']
class Dataset():
def __init__(self):
'''
Start building the dataset objects by pulling IDs and types from omek_items
'''
statement = '''
SELECT omek_items.id as item_id, omek_item_types.`name` as 'jade_type', collection_id as 'jade_collection' FROM omek_items
JOIN omek_item_types on omek_items.item_type_id = omek_item_types.id
WHERE public = 1
ORDER BY item_id;
'''
self.omek_items = pd.read_sql(statement,DB)
self.omek_items = self.omek_items.set_index('item_id',drop=False)
self.objects = self.omek_items.copy()
self.objects['item_id'] = self.objects['item_id'].apply(
lambda x: self.convert_to_jade_id(x))
self.objects.rename(columns={'item_id': 'jade_id'},inplace=True)
self.objects = self.objects.set_index('jade_id',drop=False)
self.objects = self.objects[self.objects['jade_type'].isin(
['Text','Event','Person','Organization','Publication']
)]
# Noise is an alternate dataset to record property values that dont fit the regular usage
self.noise = self.objects.copy()
self.noise.drop('jade_type',axis=1)
self.noise.drop('jade_collection',axis=1)
def ingest(self,limit=None):
'''
Get the item element texts
'''
statement = f'''
SELECT et.id AS id, et.record_id AS record_id,
et.element_id AS element_id, et.`text` AS el_text,
items.item_type_id AS item_type
FROM omek_element_texts as et
JOIN omek_items AS items ON et.record_id = items.id
WHERE record_type = "Item"
ORDER BY id;
'''
if limit != None:
statement = statement.split(';')[0] + f' LIMIT {str(limit)};'
self.element_texts = pd.read_sql(statement,DB)
# Load environment variables
ELEMENT_IDS = list(ITEM_ELEMENTS.keys())
# Set data structure:
data = {}
noise = {}
# Iterate through the element_texts
iter = tqdm(self.element_texts.iterrows())
iter.set_description("Ingesting item attributes")
for tup in iter:
row = tup[1]
element_id = str(row.loc['element_id'])
if row.loc['record_id'] in self.omek_items.index.values:
jade_type = self.omek_items.loc[row.loc['record_id'],'jade_type']
jade_id = self.convert_to_jade_id(row.loc['record_id'])
# Filter element texts through environment variables
if element_id in ELEMENT_IDS:
if jade_type in TYPES.values():
element_label = ITEM_ELEMENTS[element_id]
# Filters property values through the sets designated in the options
if element_label in INCLUDE_PROPS[jade_type]:
compile_json(data,jade_id,element_label,row.loc['el_text'])
else:
compile_json(noise,jade_id,element_label,row.loc['el_text'])
# if CRUMBS:
# print('Excluded',element_label,'in type',jade_type)
# Add accumulated data to DataFrame
new_df = pd.DataFrame.from_dict(data,orient='index')
new_noise_df = pd.DataFrame.from_dict(noise,orient='index')
self.objects = pd.concat([self.objects,new_df],axis=1)
self.noise = pd.concat([self.noise,new_noise_df],axis=1)
# Add URLs
base_url = "https://digital.janeaddams.ramapo.edu/items/show/"
self.objects.insert(loc=1,column='jade_url',value=[
base_url+id.split('_')[-1] for id in self.objects.index.values
])
self.add_collections(limit)
self.add_tags(limit)
# Remove records with no title fields found
self.objects = self.objects.dropna(subset=['dcterms_title'])
def convert_to_jade_id(self,item_id):
'''
Prepend the type string to the SQL primary key so that locations and items are unique in the same set of relations
'''
if type(item_id) != type(str):
if item_id in self.omek_items.index.values:
the_type = self.omek_items.at[item_id,"jade_type"]
if the_type in list(TYPES.values()):
return the_type.lower()+"_"+str(item_id)
else:
return "unspecified_"+str(item_id)
else:
return "unpublished_"+str(item_id)
else:
return item_id
def add_tags(self,limit):
'''
Pull tags from the database
'''
statement = f'''
SELECT * FROM omek_records_tags
JOIN omek_tags on omek_records_tags.tag_id = omek_tags.id;
'''
self.tag_df = pd.read_sql(statement,DB)
self.objects = self.objects[:limit].apply(
lambda x : self.add_tag(x),axis=1)
def add_tag(self, row_ser):
'''
Add the tag to the list for each object
'''
new_subj_field = []
id = row_ser.loc['jade_id']
try:
tag_names = self.tag_df.loc[self.tag_df['record_id'] == int(id.split("_")[-1])]
if not tag_names.empty:
for name in tag_names['name'].to_list():
if name not in new_subj_field:
new_subj_field.append(name)
row_ser['dcterms_subject'] = new_subj_field
return row_ser
except:
return row_ser
def add_collections(self,limit):
'''
Pull collections from the database
'''
statement = '''
SELECT omek_collections.id as collection_id, `text` as collection_name FROM omek_collections
JOIN omek_element_texts AS texts ON omek_collections.id = texts.record_id
WHERE record_type = "Collection"
AND element_id = 50
AND public = 1;
'''
self.collection_df = pd.read_sql(statement,DB)
self.collection_df = self.collection_df.set_index('collection_id')
self.objects = self.objects[:limit].apply(
lambda x : self.add_collection(x),
axis=1
)
def add_collection(self,row_ser):
'''
Add the collection to the list for each object
'''
new_collection_field = []
ids = row_ser.loc['jade_collection']
if not isinstance(ids, list):
ids = [ids]
try:
for coll_id in ids:
matches = self.collection_df.at[coll_id,'collection_name']
if isinstance(matches,np.ndarray):
match_list = matches.tolist()
elif isinstance(matches,str):
match_list = [matches]
else:
print("Unrecognized type of collection",type(matches))
for name in match_list:
if name not in new_collection_field:
new_collection_field.append(name)
row_ser['jade_collection'] = new_collection_field
return row_ser
except:
return row_ser
def add_relations(self,limit=None):
'''
Ingest relation data from SQL
'''
# Read from SQL tables omek_item_relations_relations and omek_item_relations_properties
statement = f'''
SELECT relations.id as id, relations.subject_item_id AS subjId, properties.id as relId, properties.label AS relLabel, relations.object_item_id AS objId
FROM omek_item_relations_relations AS relations
JOIN omek_item_relations_properties AS properties ON relations.property_id = properties.id;
'''
if limit != None:
statement = statement.split(';')[0] + f' LIMIT {str(limit)};'
self.relations = pd.read_sql(statement,DB,index_col='id')
# Style relation labels with camel case
self.relations['relLabel'] = self.relations['relLabel'].apply(
lambda x: camel(x))
# Set up data structure
data = {}
noise = {}
# Add the type prefix to the subject and object IDs
self.relations['subjId'] = self.relations['subjId'].apply(
lambda x: self.convert_to_jade_id(x))
self.relations['objId'] = self.relations['objId'].apply(
lambda x: self.convert_to_jade_id(x))
# Iterate through the relation set
iter = tqdm(self.relations.iterrows())
iter.set_description("Adding relations")
for tup in iter:
row = tup[1]
subjId = row['subjId']
relLabel = row['relLabel']
objId = row['objId']
if (
subjId in self.objects.index.values
) and (
objId in self.objects.index.values
):
# print(subjId,objId)
compile_json(data,subjId,relLabel,objId)
else:
compile_json(noise,subjId,relLabel,objId)
# Add locations to the relations
# This is a thorny call bramble that should probably be untangled in a future iteration of the script
locSet = LocationSet()
locSet.ingest(self,limit=limit)
data, noise = self.add_locations(locSet,data,noise)
# Add the compiled relation data into the main DataFrame and the noise bin
new_df = pd.DataFrame(data={"jade_relation":list(data.values())},index=list(data.keys()))
self.objects = pd.concat([self.objects,new_df],sort=False,axis=1)
new_noise_df = pd.DataFrame(data={"jade_relation":list(noise.values())},index=list(noise.keys()))
self.noise = pd.concat([self.noise,new_noise_df],sort=False,axis=1)
def add_locations(self,locSet,data,noise):
'''
Add locations from class object already constructed
'''
# Add the type prefix to the location and item IDs
locSet.locations['loc_id'] = locSet.locations['loc_id'].astype(str)
locSet.locations['loc_id'] = locSet.locations['loc_id'].apply(
lambda x : "location_" + str(x))
locSet.locations.rename(columns={'loc_id': 'jade_id'},inplace=True)
# Merge locations table into objects table
self.objects = pd.concat([self.objects,locSet.locations],axis=0)
self.objects = self.objects.set_index('jade_id',drop=False)
self.objects.index.name = None
dataset_ids = self.objects.index.values
self.location_duplicates = locSet.location_duplicates
# Iterate through the location set
iter = tqdm(locSet.locations.iterrows())
iter.set_description("Adding locations")
for tup in iter:
row = tup[1]
# Iterate through the collection of items for each location
for rel in list(row.loc['loc_relation'].items()):
loc_id = row.loc['jade_id']
desc_list = rel[1]
item_id = rel[0]
for desc in desc_list:
# Build up the data structure for the later DataFrame
if item_id in dataset_ids:
compile_json(data,item_id,desc,loc_id)
else:
compile_json(noise,item_id,desc,loc_id)
# Remove relations from locations table as they are now represented in item rows
self.objects = self.objects.drop("loc_relation",axis=1)
# Add location types
self.objects = self.objects.apply(
lambda ser : self.add_location_types(ser),
axis=1
)
self.noise = self.noise.apply(
lambda ser : self.add_location_types(ser),
axis=1
)
self.objects = self.objects.dropna(subset=['jade_id'])
return data, noise
def add_location_types(self,row):
'''
Look for null type values and adds location if location in jade_id prefix
'''
try:
if pd.isnull(row.loc['jade_type']):
if type(row.loc['jade_id']) == type(""):
if row.loc['jade_id'].split("_")[0] == "location":
row.loc['jade_type'] = "Location"
else:
print("Type null but not location:",row)
else:
print('Dropped type not included:',row['jade_url'])
return row
except:
print("Unknown problem during adding location type for:",row)
def quantify(self):
'''
Run counting functions on properties and relations to create descriptive statistics about the data
'''
self.quant = {}
# Items
self.quant["item_property_count"] = self.objects.count()
# Item properties
self.quantify_properties()
# Item properties by type
self.quantify_properties_by_type()
# Relations (including location relations)
self.quantify_relations()
# Data nesting
self.quant['nesting'] = {}
self.check_nesting(self.objects)
def quantify_properties(self):
'''
Run counts of properties
'''
# Iterate through properties identified for faceting
props = list(DATASET_OPTIONS['SUBSET_PROPERTIES_AND_QUANTITIES'].items())
iter = tqdm(props)
iter.set_description("Quantifying subsets by facet")
for prop, lim in iter:
if prop in self.objects.columns.values:
# Special cases
if prop in ['dcterms_date']:
# Date
dc_dates_ser = self.objects[prop]
dc_dates_ser = dc_dates_ser.apply(unwrap_list)
dc_dates_ser = dc_dates_ser.dropna()
for id in dc_dates_ser.index.values:
try:
date_val = dc_dates_ser[id]
if not isinstance(date_val, list):
date_list = [date_val]
else:
date_list = date_val
for date_string in date_list:
if not isinstance(date_string, str):
date_string = str(date_string)
yearlike = date_string.split('-')[0]
if (
len(yearlike) == 4
) and (
int(yearlike[0]) == 1
) and (
yearlike[3] in '0123456789'
):
year = yearlike
dc_dates_ser[id] = str(year)
else:
dc_dates_ser.drop(id)
print('Dropped unrecognized date value:',id,dc_dates_ser[id])
except:
dc_dates_ser.drop(id)
print('Dropped unrecognized date value:',id,dc_dates_ser[id])
if len(dc_dates_ser) > 1:
self.add_to_quant(
dc_dates_ser,
sort_on_property_name=False)
# All others / standard structure
else:
ser = self.objects[prop]
ser = ser.dropna()
if len(ser) > 1:
self.add_to_quant(ser)
def add_to_quant(
self,
series, # A named Series object whose index is the item or location IDs
# and whose values are non-empty strings or lists of strings
sort_on_property_name = False # Default False sorts by largest count. Optional True sorts alphabetically by property name
):
'''
Index the DataFrame's IDs by value of passed property (column name)
'''
property = series.name
# Create an index of jade_ids by property value for the series (column) passed
for id in series.index.values:
cell = series[id]
if isinstance(cell, np.ndarray):
cell = cell.tolist()
if not isinstance(cell, list):
cell = [cell]
for val in cell:
compile_json(
self.quant,
property,
val.strip() if isinstance(val, str) else val,
id)
# Create a dictionary of property values and instance counts
for val in list(self.quant[property].keys()):
compile_json(self.quant,
property+"_count",
val,
len(self.quant[property][val]))
# Sort the dictionary and add it to the dataset object
if not sort_on_property_name:
self.quant[property+"_count"] = dict(
sort_by_item_counts(self.quant[property+"_count"]))
self.quant[property+"_count"] = pd.Series(
self.quant[property+"_count"],
index=list(self.quant[property+"_count"].keys()),
name=property+"_count")
if sort_on_property_name:
self.quant[property+"_count"] = self.quant[property+"_count"].sort_index()
# Go ahead and unwrap the single-integer lists created by compile_json
self.quant[property+"_count"] = self.quant[property+"_count"].apply(unwrap_list)
def quantify_properties_by_type(self):
'''
Create a table of property counts by object type
'''
# Get a copy of the main DataFrame and send each row through the counter
self.quant['prop_table'] = {}
df = self.objects.copy()
df = df.apply(
lambda ser : self.compile_types_by_prop(ser),
axis=1
)
# Make the resulting dict a DataFrame, sort it, and abbreviate column headers
self.quant['prop_table'] = pd.DataFrame.from_dict(
self.quant['prop_table'],
orient='index')
self.quant['prop_table'] = self.quant['prop_table'][[
'Person',
'Text',
'Event',
'Organization',
'Publication',
'Location',
'All Types'
]]
self.quant['prop_table'] = self.quant['prop_table'].sort_index()
self.quant['prop_table'].rename(columns={'Organization':'Org.', 'Publication':'Pub.', 'Location':'Loc.'},inplace=True)
def compile_types_by_prop(self,ser):
'''
Count the properties in the passed series by object type
'''
jade_type = ser.loc['jade_type']
jade_type = unwrap_list(jade_type)
if jade_type in list(INCLUDE_PROPS.keys()):
for prop in ser.index.values:
if prop in INCLUDE_PROPS[jade_type]:
cell = ser.loc[prop]
if not isinstance(cell, list):
cell = [cell]
if not pd.isnull(cell).any():
if prop not in self.quant['prop_table']:
self.quant['prop_table'][prop] = {}
if "All Properties" not in self.quant['prop_table']:
self.quant['prop_table']['All Properties'] = {}
if jade_type not in self.quant['prop_table'][prop]:
self.quant['prop_table'][prop][jade_type] = 1
else:
self.quant['prop_table'][prop][jade_type] += 1
if "All Types" not in self.quant['prop_table'][prop]:
self.quant['prop_table'][prop]["All Types"] = 1
else:
self.quant['prop_table'][prop]["All Types"] += 1
if jade_type not in self.quant['prop_table']['All Properties']:
self.quant['prop_table']['All Properties'][jade_type] = 1
else:
self.quant['prop_table']['All Properties'][jade_type] += 1
return ser
def quantify_relations(self):
'''
Make a list of unique relation triples and a table of the most common subject–object pairs
'''
# Iterate through relations in the Dataset
uniq_rels = {}
count_df_index = []
count_df_columns = []
iter = tqdm(self.objects.index.values)
iter.set_description("Counting unique relations")
for subjId in iter:
row = self.objects.loc[subjId]
row_rels_dict = row.loc['jade_relation']
if not | pd.isnull(row_rels_dict) | pandas.isnull |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Train logistic regression for Markov-chain or frequency approach.
"""
import dill
import collections
import pandas as pd
import numpy as np
from sklearn import linear_model
from sklearn.model_selection import KFold
import stn.deg as deg # noqa
from stn import blockPlanning # noqa
def get_log_reg_list(prof, stn):
"""
Train logistic regression (Markov-chain approach) for each task-mode
combination.
prof: task-mode data generated using lhs.py
stn: stn structure
"""
TP = {}
prods = stn.products
for j in stn.units:
for i in stn.I[j]:
for k in stn.O[j]:
tm = i + "-" + k
TP[j, tm] = get_logreg(prof, tm, j, prods)
for tm in set(["None-None", "M-M"]):
TP[j, tm] = get_logreg(prof, tm, j, prods)
return TP
def get_log_reg_list_freq(prof, stn):
"""
Train logistic regression (Frequency approach) for each task-mode
combination.
prof: task-mode data generated using lhs.py
stn: stn structure
"""
TP = {}
prods = stn.products
for j in stn.units:
for i in stn.I[j]:
for k in stn.O[j]:
tm = i + "-" + k
TP[j, tm] = get_logreg_freq(prof, tm, j, prods)
return TP
def get_logreg(prof, tm, j, prods):
"""
Train logistic regression (Markov-chain approach).
prof: task-mode data generated using lhs.py
tm: task-mode
j: name of unit
prods: list of products
"""
# Filter relevant data
dfj = prof.loc[prof["unit"] == j, ].copy()
dfj["tm"] = [row["task"] + "-" + row["mode"] for i, row in dfj.iterrows()]
dfj["tm-1"] = dfj["tm"].shift(-1)
dfj.loc[ | pd.isna(dfj["tm-1"]) | pandas.isna |
from turtle import TPen, color
import numpy as np
import pandas as pd
import random
import matplotlib.pyplot as plt
import seaborn as sns
import sklearn.metrics as metrics
from keras.models import Sequential
from keras.layers import Dense, LSTM, Flatten, Dropout
def get_ace_values(temp_list):
'''
This function lists out all permutations of ace values in the array sum_array
For example, if you have 2 aces, there are 4 permutations:
[[1,1], [1,11], [11,1], [11,11]]
These permutations lead to 3 unique sums: [2, 12, 22]
of these 3, only 2 are <=21 so they are returned: [2, 12]
'''
sum_array = np.zeros((2**len(temp_list), len(temp_list)))
# This loop gets the permutations
for i in range(len(temp_list)):
n = len(temp_list) - i
half_len = int(2**n * 0.5)
for rep in range(int(sum_array.shape[0]/half_len/2)): #⭐️ shape[0] 返回 numpy 数组的行数
sum_array[rep*2**n : rep*2**n+half_len, i] = 1
sum_array[rep*2**n+half_len : rep*2**n+half_len*2, i] = 11
# Only return values that are valid (<=21)
# return list(set([int(s) for s in np.sum(sum_array, axis=1) if s<=21])) #⭐️ 将所有 'A' 能组成总和不超过 21 的值返回
return [int(s) for s in np.sum(sum_array, axis=1)] #⭐️ 将所有 'A' 能组成的点数以 int 类型返回(有重复和超过 21 点的值)
def ace_values(num_aces):
'''
Convert num_aces, an int to a list of lists
For example, if num_aces=2, the output should be [[1,11],[1,11]]
I require this format for the get_ace_values function
'''
temp_list = []
for i in range(num_aces):
temp_list.append([1,11])
return get_ace_values(temp_list)
def func(x):
'''
判断玩家起手是否为 21 点
'''
if x == 21:
return 1
else:
return 0
def make_decks(num_decks, card_types):
'''
Make a deck -- 根据给定副数洗好牌
input:
num_decks -> 牌副数
card_types -> 单副牌单个花色对应的牌值
output:
new_deck -> 一副牌对应牌值
'''
new_deck = []
for i in range(num_decks):
for j in range(4): # 代表黑红梅方
new_deck.extend(card_types) #⭐️ extend() 函数用于在列表末尾一次性追加另一个序列中的多个值
random.shuffle(new_deck)
return new_deck
def total_up(hand):
'''
Total up value of hand
input:
<list> hand -> 当前手牌组合
output:
<int> -> 计算当前手牌的合法值
'''
aces = 0 # 记录 ‘A’ 的数目
total = 0 # 记录除 ‘A’ 以外数字之和
for card in hand:
if card != 'A':
total += card
else:
aces += 1
# Call function ace_values to produce list of possible values for aces in hand
ace_value_list = ace_values(aces)
final_totals = [i+total for i in ace_value_list if i+total<=21] # ‘A’ 可以是 1 也可以是 11,当前牌值不超过 21 时,取最大值 -- 规则❗️
if final_totals == []:
return min(ace_value_list) + total
else:
return max(final_totals)
def model_decision_old(model, player_sum, has_ace, dealer_card_num, hit=0, card_count=None):
'''
Given the relevant inputs, the function below uses the neural net to make a prediction
and then based on that prediction, decides whether to hit or stay
—— 将玩家各参数传入神经网络模型,如果预测结果大于 0.52, 则 hit, 否则 stand
input:
model -> 模型(一般指 NN 模型)
player_sum -> 玩家当前手牌和
has_ace -> 玩家发牌是否有 'A'
dealer_card_num -> 庄家发牌(明牌)值
hit -> 玩家是否‘要牌’
card_count -> 记牌器
return:
1 -> hit
0 -> stand
'''
# 将需要进入神经网络模型的数据统一格式
# [[18 0 0 6]]
input_array = np.array([player_sum, hit, has_ace, dealer_card_num]).reshape(1, -1) # 二维数组变成一行 (1, n)
cc_array = pd.DataFrame.from_dict([card_count])
input_array = np.concatenate([input_array, cc_array], axis=1)
# input_array 作为输入传入神经网络,使用预测函数后存入 predict_correct
# [[0.10379896]]
predict_correct = model.predict(input_array)
if predict_correct >= 0.52:
return 1
else:
return 0
def model_decision(model, card_count, dealer_card_num):
'''
Given the relevant inputs, the function below uses the neural net to make a prediction
and then based on that prediction, decides whether to hit or stay
—— 将玩家各参数传入神经网络模型,如果预测结果大于 0.52, 则 hit, 否则 stand
input:
model -> 模型(一般指 NN 模型)
card_count -> 记牌器
dealer_card_num -> 庄家发牌(明牌)值
return:
1 -> hit
0 -> stand
'''
# 将需要进入神经网络模型的数据统一格式
cc_array_bust = pd.DataFrame.from_dict([card_count])
input_array = np.concatenate([cc_array_bust, np.array(dealer_card_num).reshape(1, -1)], axis=1)
# input_array 作为输入传入神经网络,使用预测函数后存入 predict_correct
# [[0.10379896]]
predict_correct = model.predict(input_array)
if predict_correct >= 0.52:
return 1
else:
return 0
def create_data(type, dealer_card_feature, player_card_feature, player_results, action_results=None, new_stack=None, games_played=None, card_count_list=None, dealer_bust=None):
'''
input:
type -> 0: naive 版本
1: random 版本
2: NN 版本
dealer_card_feature -> 所有游戏庄家的第一张牌
player_card_feature -> 所有游戏玩家所有手牌
player_results -> 玩家输赢结果
action_results -> 玩家是否要牌
new_stack -> 是否是第一轮游戏
games_played -> 本局第几轮游戏
card_count_list -> 记牌器
dealer_bust -> 庄家是否爆牌
return:
model_df -> dealer_card: 庄家发牌(明牌)
player_total_initial: 玩家一发牌手牌和
Y: 玩家一“输”、“平”、“赢”结果(-1, 0, 1)
lose: 玩家一“输”、“不输”结果(1, 0)
has_ace: 玩家一发牌是否有'A'
dealer_card_num: 庄家发牌(明牌)牌值
correct_action: 判断是否是正确的决定
hit?: 玩家一发牌后是否要牌
new_stack: 是否是第一轮游戏
games_played_with_stack: 本局第几轮游戏
dealer_bust: 庄家是否爆牌
blackjack?: 玩家起手是否 21 点
2 ~ 'A': 本轮游戏记牌
'''
model_df = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/python
#
# Copyright (c) 2017, United States Government, as represented by the
# Administrator of the National Aeronautics and Space Administration.
#
# All rights reserved.
#
# The Astrobee platform is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import pandas as pd
import os
import sys
def save_rmse_results_to_csv(rmses, prefix='', rmses_2=None, label_1=None, label_2=None):
mean_rmses_dataframe = | pd.DataFrame() | pandas.DataFrame |
"""
This script trains the RNN model and creates the predictions for each
submission round required by the benchmark.
"""
# import packages
import os
import inspect
import itertools
import argparse
import sys
import numpy as np
import pandas as pd
import tensorflow as tf
import tensorflow.contrib.training as training
from rnn_train import rnn_train
from rnn_predict import rnn_predict
from make_features import make_features
import hparams
from utils import *
# Add TSPerf root directory to sys.path
file_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
tsperf_dir = os.path.join(file_dir, "../../../../")
if tsperf_dir not in sys.path:
sys.path.append(tsperf_dir)
import retail_sales.OrangeJuice_Pt_3Weeks_Weekly.common.benchmark_settings as bs
data_relative_dir = "../../data"
def create_round_prediction(
data_dir,
submission_round,
hparams,
make_features_flag=True,
train_model_flag=True,
train_back_offset=0,
predict_cut_mode="predict",
random_seed=1,
):
"""
This function trains the model and creates the predictions for a certain
submission round.
"""
# conduct feature engineering and save related numpy array to disk
if make_features_flag:
make_features(submission_round=submission_round)
# read the numpy arrays output from the make_features.py
# file_dir = './prototypes/retail_rnn_model'
intermediate_data_dir = os.path.join(data_dir, "intermediate/round_{}".format(submission_round))
ts_value_train = np.load(os.path.join(intermediate_data_dir, "ts_value_train.npy"))
feature_train = np.load(os.path.join(intermediate_data_dir, "feature_train.npy"))
feature_test = np.load(os.path.join(intermediate_data_dir, "feature_test.npy"))
# convert the dtype to float32 to suffice tensorflow cudnn_rnn requirements.
ts_value_train = ts_value_train.astype(dtype="float32")
feature_train = feature_train.astype(dtype="float32")
feature_test = feature_test.astype(dtype="float32")
# define parameters
# constant
predict_window = feature_test.shape[1]
# train the rnn model
if train_model_flag:
tf.reset_default_graph()
tf.set_random_seed(seed=random_seed)
train_error = rnn_train(
ts_value_train,
feature_train,
feature_test,
hparams,
predict_window,
intermediate_data_dir,
submission_round,
back_offset=train_back_offset,
)
# make prediction
tf.reset_default_graph()
pred_batch_size = 1024
pred_o = rnn_predict(
ts_value_train,
feature_train,
feature_test,
hparams,
predict_window,
intermediate_data_dir,
submission_round,
pred_batch_size,
cut_mode=predict_cut_mode,
)
return pred_o, train_error
def create_round_submission(
data_dir,
submission_round,
hparams,
make_features_flag=True,
train_model_flag=True,
train_back_offset=0,
predict_cut_mode="predict",
random_seed=1,
):
"""
This function trains the model and creates the submission in pandas
DataFrame for a certain submission round.
"""
pred_o, _ = create_round_prediction(
data_dir,
submission_round,
hparams,
make_features_flag=make_features_flag,
train_model_flag=train_model_flag,
train_back_offset=train_back_offset,
predict_cut_mode=predict_cut_mode,
random_seed=random_seed,
)
# get rid of prediction at horizon 1
pred_sub = pred_o[:, 1:].reshape((-1))
# arrange the predictions into pd.DataFrame
# read in the test_file for this round
train_file = os.path.join(data_dir, "train/train_round_{}.csv".format(submission_round))
test_file = os.path.join(data_dir, "train/aux_round_{}.csv".format(submission_round))
train = pd.read_csv(train_file, index_col=False)
test = pd.read_csv(test_file, index_col=False)
train_last_week = bs.TRAIN_END_WEEK_LIST[submission_round - 1]
store_list = train["store"].unique()
brand_list = train["brand"].unique()
test_week_list = range(
bs.TEST_START_WEEK_LIST[submission_round - 1], bs.TEST_END_WEEK_LIST[submission_round - 1] + 1
)
test_item_list = list(itertools.product(store_list, brand_list, test_week_list))
test_item_df = pd.DataFrame.from_records(test_item_list, columns=["store", "brand", "week"])
test = test_item_df.merge(test, how="left", on=["store", "brand", "week"])
submission = test.sort_values(by=["store", "brand", "week"], ascending=True)
submission["round"] = submission_round
submission["weeks_ahead"] = submission["week"] - train_last_week
submission["prediction"] = pred_sub
submission = submission[["round", "store", "brand", "week", "weeks_ahead", "prediction"]]
return submission
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--seed", type=int, dest="seed", default=1, help="random seed")
args = parser.parse_args()
random_seed = args.seed
# set the data directory
file_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
data_dir = os.path.join(file_dir, data_relative_dir)
# import hyper parameters
# TODO: add ema in the code to imporve the performance
hparams_dict = hparams.hparams_smac
hparams = training.HParams(**hparams_dict)
num_round = len(bs.TEST_END_WEEK_LIST)
pred_all = | pd.DataFrame() | pandas.DataFrame |
import operator
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import FloatingArray
import pandas.core.ops as ops
# Basic test for the arithmetic array ops
# -----------------------------------------------------------------------------
@pytest.mark.parametrize(
"opname, exp",
[("add", [1, 3, None, None, 9]), ("mul", [0, 2, None, None, 20])],
ids=["add", "mul"],
)
def test_add_mul(dtype, opname, exp):
a = pd.array([0, 1, None, 3, 4], dtype=dtype)
b = pd.array([1, 2, 3, None, 5], dtype=dtype)
# array / array
expected = pd.array(exp, dtype=dtype)
op = getattr(operator, opname)
result = op(a, b)
tm.assert_extension_array_equal(result, expected)
op = getattr(ops, "r" + opname)
result = op(a, b)
tm.assert_extension_array_equal(result, expected)
def test_sub(dtype):
a = pd.array([1, 2, 3, None, 5], dtype=dtype)
b = pd.array([0, 1, None, 3, 4], dtype=dtype)
result = a - b
expected = pd.array([1, 1, None, None, 1], dtype=dtype)
tm.assert_extension_array_equal(result, expected)
def test_div(dtype):
a = pd.array([1, 2, 3, None, 5], dtype=dtype)
b = pd.array([0, 1, None, 3, 4], dtype=dtype)
result = a / b
expected = pd.array([np.inf, 2, None, None, 1.25], dtype="Float64")
tm.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize("zero, negative", [(0, False), (0.0, False), (-0.0, True)])
def test_divide_by_zero(zero, negative):
# https://github.com/pandas-dev/pandas/issues/27398, GH#22793
a = pd.array([0, 1, -1, None], dtype="Int64")
result = a / zero
expected = FloatingArray(
np.array([np.nan, np.inf, -np.inf, 1], dtype="float64"),
np.array([False, False, False, True]),
)
if negative:
expected *= -1
tm.assert_extension_array_equal(result, expected)
def test_floordiv(dtype):
a = pd.array([1, 2, 3, None, 5], dtype=dtype)
b = pd.array([0, 1, None, 3, 4], dtype=dtype)
result = a // b
# Series op sets 1//0 to np.inf, which IntegerArray does not do (yet)
expected = pd.array([0, 2, None, None, 1], dtype=dtype)
tm.assert_extension_array_equal(result, expected)
def test_mod(dtype):
a = pd.array([1, 2, 3, None, 5], dtype=dtype)
b = pd.array([0, 1, None, 3, 4], dtype=dtype)
result = a % b
expected = pd.array([0, 0, None, None, 1], dtype=dtype)
tm.assert_extension_array_equal(result, expected)
def test_pow_scalar():
a = pd.array([-1, 0, 1, None, 2], dtype="Int64")
result = a**0
expected = pd.array([1, 1, 1, 1, 1], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = a**1
expected = pd.array([-1, 0, 1, None, 2], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = a**pd.NA
expected = pd.array([None, None, 1, None, None], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = a**np.nan
expected = FloatingArray(
np.array([np.nan, np.nan, 1, np.nan, np.nan], dtype="float64"),
np.array([False, False, False, True, False]),
)
tm.assert_extension_array_equal(result, expected)
# reversed
a = a[1:] # Can't raise integers to negative powers.
result = 0**a
expected = pd.array([1, 0, None, 0], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = 1**a
expected = pd.array([1, 1, 1, 1], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = pd.NA**a
expected = pd.array([1, None, None, None], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = np.nan**a
expected = FloatingArray(
np.array([1, np.nan, np.nan, np.nan], dtype="float64"),
np.array([False, False, True, False]),
)
tm.assert_extension_array_equal(result, expected)
def test_pow_array():
a = pd.array([0, 0, 0, 1, 1, 1, None, None, None])
b = pd.array([0, 1, None, 0, 1, None, 0, 1, None])
result = a**b
expected = pd.array([1, 0, None, 1, 1, 1, 1, None, None])
tm.assert_extension_array_equal(result, expected)
def test_rpow_one_to_na():
# https://github.com/pandas-dev/pandas/issues/22022
# https://github.com/pandas-dev/pandas/issues/29997
arr = pd.array([np.nan, np.nan], dtype="Int64")
result = np.array([1.0, 2.0]) ** arr
expected = pd.array([1.0, np.nan], dtype="Float64")
tm.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize("other", [0, 0.5])
def test_numpy_zero_dim_ndarray(other):
arr = pd.array([1, None, 2])
result = arr + np.array(other)
expected = arr + other
tm.assert_equal(result, expected)
# Test generic characteristics / errors
# -----------------------------------------------------------------------------
def test_error_invalid_values(data, all_arithmetic_operators):
op = all_arithmetic_operators
s = pd.Series(data)
ops = getattr(s, op)
# invalid scalars
msg = "|".join(
[
r"can only perform ops with numeric values",
r"IntegerArray cannot perform the operation mod",
r"unsupported operand type",
r"can only concatenate str \(not \"int\"\) to str",
"not all arguments converted during string",
"ufunc '.*' not supported for the input types, and the inputs could not",
"ufunc '.*' did not contain a loop with signature matching types",
"Addition/subtraction of integers and integer-arrays with Timestamp",
]
)
with pytest.raises(TypeError, match=msg):
ops("foo")
with pytest.raises(TypeError, match=msg):
ops(pd.Timestamp("20180101"))
# invalid array-likes
str_ser = pd.Series("foo", index=s.index)
# with pytest.raises(TypeError, match=msg):
if all_arithmetic_operators in [
"__mul__",
"__rmul__",
]: # (data[~data.isna()] >= 0).all():
res = ops(str_ser)
expected = pd.Series(["foo" * x for x in data], index=s.index)
tm.assert_series_equal(res, expected)
else:
with pytest.raises(TypeError, match=msg):
ops(str_ser)
msg = "|".join(
[
"can only perform ops with numeric values",
"cannot perform .* with this index type: DatetimeArray",
"Addition/subtraction of integers and integer-arrays "
"with DatetimeArray is no longer supported. *",
"unsupported operand type",
r"can only concatenate str \(not \"int\"\) to str",
"not all arguments converted during string",
"cannot subtract DatetimeArray from ndarray",
]
)
with pytest.raises(TypeError, match=msg):
ops(pd.Series(pd.date_range("20180101", periods=len(s))))
# Various
# -----------------------------------------------------------------------------
# TODO test unsigned overflow
def test_arith_coerce_scalar(data, all_arithmetic_operators):
op = tm.get_op_from_name(all_arithmetic_operators)
s = pd.Series(data)
other = 0.01
result = op(s, other)
expected = op(s.astype(float), other)
expected = expected.astype("Float64")
# rmod results in NaN that wasn't NA in original nullable Series -> unmask it
if all_arithmetic_operators == "__rmod__":
mask = (s == 0).fillna(False).to_numpy(bool)
expected.array._mask[mask] = False
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("other", [1.0, np.array(1.0)])
def test_arithmetic_conversion(all_arithmetic_operators, other):
# if we have a float operand we should have a float result
# if that is equal to an integer
op = tm.get_op_from_name(all_arithmetic_operators)
s = pd.Series([1, 2, 3], dtype="Int64")
result = op(s, other)
assert result.dtype == "Float64"
def test_cross_type_arithmetic():
df = pd.DataFrame(
{
"A": pd.Series([1, 2, np.nan], dtype="Int64"),
"B": pd.Series([1, np.nan, 3], dtype="UInt8"),
"C": [1, 2, 3],
}
)
result = df.A + df.C
expected = pd.Series([2, 4, np.nan], dtype="Int64")
tm.assert_series_equal(result, expected)
result = (df.A + df.C) * 3 == 12
expected = pd.Series([False, True, None], dtype="boolean")
tm.assert_series_equal(result, expected)
result = df.A + df.B
expected = pd.Series([2, np.nan, np.nan], dtype="Int64")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("op", ["mean"])
def test_reduce_to_float(op):
# some reduce ops always return float, even if the result
# is a rounded number
df = pd.DataFrame(
{
"A": ["a", "b", "b"],
"B": [1, None, 3],
"C": pd.array([1, None, 3], dtype="Int64"),
}
)
# op
result = getattr(df.C, op)()
assert isinstance(result, float)
# groupby
result = getattr(df.groupby("A"), op)()
expected = pd.DataFrame(
{"B": np.array([1.0, 3.0]), "C": pd.array([1, 3], dtype="Float64")},
index=pd.Index(["a", "b"], name="A"),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"source, neg_target, abs_target",
[
([1, 2, 3], [-1, -2, -3], [1, 2, 3]),
([1, 2, None], [-1, -2, None], [1, 2, None]),
([-1, 0, 1], [1, 0, -1], [1, 0, 1]),
],
)
def test_unary_int_operators(any_signed_int_ea_dtype, source, neg_target, abs_target):
dtype = any_signed_int_ea_dtype
arr = pd.array(source, dtype=dtype)
neg_result, pos_result, abs_result = -arr, +arr, abs(arr)
neg_target = pd.array(neg_target, dtype=dtype)
abs_target = pd.array(abs_target, dtype=dtype)
tm.assert_extension_array_equal(neg_result, neg_target)
tm.assert_extension_array_equal(pos_result, arr)
assert not tm.shares_memory(pos_result, arr)
tm.assert_extension_array_equal(abs_result, abs_target)
def test_values_multiplying_large_series_by_NA():
# GH#33701
result = pd.NA * pd.Series(np.zeros(10001))
expected = pd.Series([pd.NA] * 10001)
tm.assert_series_equal(result, expected)
def test_bitwise(dtype):
left = | pd.array([1, None, 3, 4], dtype=dtype) | pandas.array |
from analysis import detect_video, filter_results
import pandas as pd
d = detect_video('testvids/ccc01_sparrot_major.mp4', 'test_out.avi')
d = filter_results(d)
print(d)
| pd.DataFrame(d) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 8 02:45:30 2020
@author: rajee
"""
import tensorflow as tf
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from hotelling.plots import control_chart, control_stats, univariate_control_chart
from hotelling.stats import hotelling_t2
from sklearn.svm import OneClassSVM
from KPCA import KPCA_M
from LSTM_model import model_forecast
from model import model_LSTM_CNN
def plot_series(time, series, label, format="-", start=0, end=None):
plt.plot(time[start:end], series[start:end], format, label = label)
plt.xlabel("Time")
plt.ylabel("Value")
plt.grid(True)
dataset = | pd.read_csv('IDV4.csv') | pandas.read_csv |
# Copyright 2021 <NAME>
# This program is licensed under the MIT license. See the License.txt file for details
# noinspection PyUnresolvedReferences
from typing import List, Set, Dict, Tuple, Optional, Union
import gpxpy
import os
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import seaborn as sns
import pandas
from datetime import datetime, timezone
import math
import argparse
import numpy as np
import time
from lxml import etree
import pytz
import tzlocal
# Global variable controls whether some timing information is "printed" to the console or output
# window. This variable can be changed with the --print_debugging command line option.
# You can also temporarily override it if necessary when testing.
print_debugging: bool = False
def meters_to_user_units_string(meters: float, units: str) -> str:
"""Convert a meters value to user units, and format as a string"""
if units == 'english':
return format(meters * 3.2808, '0.1f')
else:
return format(meters, '0.2f')
def meters_to_user_units(meters: float, units: str) -> float:
"""Convert a meters value to user units"""
if units == 'english':
return meters * 3.2808
else:
return meters
def user_units_to_meters(value: float, units: str) -> float:
"""Convert a user units value to meters"""
if units == 'english':
return value / 3.2808
else:
return value
def meters_to_user_units_scale_factor(units: str) -> float:
"""
Return the scale factor to convert meters to user units.
Multiply the meters value by the scale factor to get user units
Args:
units: String containing 'english' or 'meters'
Returns: Scale factor
"""
if units == 'english':
return 3.2808
else:
return 1.0
def ceg_segment(segment: gpxpy.gpx.GPXTrackSegment, threshold: float, direction: int) -> Optional[float]:
"""
Returns the CEG (cumulative elevation gain) or CEL (cumulative elevation loss) of the GPXTrackPoints
in a GPXTrackSegment.
Args:
segment: The GPXTrackSegment to analyze
threshold: The threshold distance (in meters)
direction: The direction to analyze. Use +1 for CEG and -1 for CEL.
Returns:
The CEG (or CEL), or None if segment does not have elevations (less than 75% of points have
associated elevations).
"""
if not segment.has_elevations:
return None
previous_good: Optional[float] = None
elevation_sum: float = 0.0
elevation_gain: float = 0.0
for point in segment.points:
if previous_good is None:
previous_good = point.elevation
elif point.elevation is not None:
elevation_gain = point.elevation - previous_good
if abs(elevation_gain) >= threshold:
if (((direction >= 0) and (elevation_gain > 0)) or
((direction < 0) and (elevation_gain < 0))):
elevation_sum += elevation_gain
elevation_gain = 0
previous_good = point.elevation
# The last numeric cell of the range is always considered a "good" data point.
# Add it now. If the last point was already considered good and added,
# elevation_gain will be zero.
if (((direction >= 0) and (elevation_gain > 0)) or
((direction < 0) and (elevation_gain < 0))):
elevation_sum += elevation_gain
return elevation_sum
def ceg_track(track: gpxpy.gpx.GPXTrack, threshold: float, direction: int) -> Optional[float]:
"""
Returns the CEG (cumulative elevation gain) or CEL (cumulative elevation loss) of the GPXTrackSegments
in a GPXTrack.
Does not count gains and losses that are less than the threshold.
Parameters:
track: The GPXTrack to analyze
threshold: The threshold distance (in meters)
direction: The direction to analyze. Use +1 for CEG and -1 for CEL.
"""
if not track.has_elevations:
return None
elevation_sum: float = 0
for segment in track.segments:
elevation_sum += ceg_segment(segment, threshold, direction) or 0.0
return elevation_sum
def ceg(gpx: gpxpy.gpx.GPX, threshold: float, direction: int) -> Optional[float]:
"""
Returns the CEG (cumulative elevation gain) or CEL (cumulative elevation loss) of the GPXTrackSegments
in a GPXTrack.
Does not count gains and losses that are less than the threshold.
Parameters:
gpx: The GPX object to analyze
threshold: The threshold distance (in meters)
direction: The direction to analyze. Use +1 for CEG and -1 for CEL.
"""
if not gpx.has_elevations:
return None
elevation_sum: float = 0.0
for track in gpx.tracks:
elevation_sum += ceg_track(track, threshold, direction) or 0.0
return elevation_sum
def filter_elevation2(gpx: gpxpy.gpx, kernel: tuple) -> None:
size = len(kernel)
if size % 2 == 0:
raise RuntimeError('Elevation filter kernel size is not odd number')
# Convolve the segment elevations with the filter kernel
# For points that are closer to the end than the half-size, don't
# do any convolution
# Todo: Use part of the kernel for points near the end-point
kernel_sum: float = sum(kernel)
if kernel_sum == 0.0:
raise RuntimeError('Elevation filter kernel sum is zero')
half_size = (size - 1) // 2
for track in gpx.tracks:
for segment in track.segments:
new_elevations: List[float] = []
for i in range(len(segment.points)):
if ((i - half_size) < 0) or ((i + half_size) >= len(segment.points)):
new_elevations.append(segment.points[i].elevation)
else:
weighted_sum: Optional[float] = 0.0
for j in range(size):
if segment.points[i - half_size + j].elevation is None:
weighted_sum = None
break
weighted_sum += kernel[j] * segment.points[i - half_size + j].elevation
if weighted_sum is not None:
new_elevations.append(weighted_sum / kernel_sum)
else:
new_elevations.append(segment.points[i].elevation)
for i in range(len(segment.points)):
segment.points[i].elevation = new_elevations[i]
def filter_elevation(gpx: gpxpy.gpx, filter_method: str) -> None:
filter_id = filter_method[0:1].lower()
if filter_id == '0':
return
if filter_id == 'a':
filter_elevation2(gpx, (1.0 / 3.0, 1.0 / 3.0, 1.0 / 3.0))
elif filter_id == 'b':
filter_elevation2(gpx, (1.0 / 5.0, 1.0 / 5.0, 1.0 / 5.0, 1.0 / 5.0, 1.0 / 5.0))
elif filter_id == 'c':
filter_elevation2(gpx, (1.0 / 7.0, 1.0 / 7.0, 1.0 / 7.0, 1.0 / 7.0, 1.0 / 7.0, 1.0 / 7.0, 1.0 / 7.0))
elif filter_id == 'd':
filter_elevation2(gpx, (0.3, 0.4, 0.3))
# elif filter_id == 'e':
# filter_elevation2(gpx, (0.4, 0.2, 0.4))
# elif filter_id == 'f':
# gpx.smooth()
def write_gpx_file(gpx: gpxpy.gpx, input_filename: str, suffix: str) -> None:
if not suffix or not suffix.strip():
return
# Add the suffix to each track name
for track in gpx.tracks:
if track.name is None or len(track.name) == 0:
track.name = os.path.splitext(os.path.basename(input_filename))[0]
track.name += suffix
# Add the suffix to the filename.
root, ext = os.path.splitext(input_filename)
output_filename = root + suffix + ext
# Fix up xml to be basecamp compatible.
# Garmin basecamp has a bug that gives an "unknown" error when trying to open a file that has a
# <bounds ...> start-tag with a </bounds> end-tag. However basecamp works fine with a empty-element
# tag <bounds ... />.
# Here I convert the start-tag and end-tag form (<bounds... ></bounds>) to a empty-element tag form
# (<bounds.../>).
xml = gpx.to_xml()
pos1 = xml.find('<bounds ')
if pos1 >= 0:
pos2 = xml.find('>', pos1)
if pos2 > pos1 and xml[pos1 - 1:pos1] != '/':
pos3 = xml.find('</bounds>', pos2)
if pos3 > pos2:
xml = xml[:pos2] + '/>' + xml[pos3 + 9:]
# Write the file.
with (open(output_filename, 'w')) as output_file:
output_file.write(xml)
def segment_to_elevation_list(segment: gpxpy.gpx.GPXTrackSegment) -> List[Optional[float]]:
"""Converts the elevation values in a segment to a list.
Args:
segment: The track segment to convert
Returns:
A list of Optional[Float] values which are the elevations.
"""
# Create a list of data. Each element is an elevation.
data = []
for point in segment.points:
data.append(point.elevation)
return data
def segment_to_time_list(segment: gpxpy.gpx.GPXTrackSegment) -> List[datetime]:
"""Converts the time values in a segment to a list.
Args:
segment: The track segment to convert
Returns:
A list of times, with the timezones set to UTC.
"""
# Create a list of data. Each element is a datetime object. The datetime object in the
# segment.points[i].time has a timezone of SimpleTZ, which isn't hashable and causes any
# conversion to a pandas dataframe to fail. So we first convert to a list and fix the timezone
# to be UTC.
data = []
for point in segment.points:
data.append(point.time.replace(tzinfo=timezone.utc))
return data
def segment_to_time_segment_legend_list(segment: gpxpy.gpx.GPXTrackSegment, legend: str) -> \
List[List[Union[datetime, Optional[float], str]]]:
"""Converts the time and elevation values in a segment to a list.
Args:
segment: The track segment to convert
legend: String that will become the identifier (final value in return values)
Returns:
A of times, with the timezones set to UTC.
"""
# Create a list of data. Each element is a datetime object. The datetime object in the
# segment.points[i].time has a timezone of SimpleTZ, which isn't hashable and causes any
# conversion to a pandas dataframe to fail. So we first convert to a list and fix the timezone
# to be UTC.
data = []
for point in segment.points:
data.append([point.time.replace(tzinfo=timezone.utc), point.elevation, legend])
return data
def get_timezone(args: argparse.Namespace) -> pytz.BaseTzInfo:
timezone_name = args.timezone
try:
if timezone_name == 'Local':
return tzlocal.get_localzone()
else:
return pytz.timezone(timezone_name)
except BaseException:
print(f'Invalid timezone name: {timezone_name}')
return pytz.timezone('utc')
def plot_elevations(input_gpx: gpxpy.gpx, input_filename: str, pre_difference_gpx: gpxpy.gpx,
difference_gpx: gpxpy.gpx, output_gpx: gpxpy.gpx,
args: argparse.Namespace, is_interactive: bool) -> None:
"""
Plot the elevations from a gpx object.
There will be a separate plot for each segment in each track.
Args:
input_gpx: The input gpx object
input_filename: The filename of the input file. Used to rename the plot file
pre_difference_gpx: The gpx object that corresponds to the data before subtracting the difference file. This
is the data after filtering.
difference_gpx: The gpx object that corresponds to the difference file. Can be None
output_gpx: The gpx object after filtering and subtracting the difference
args: The args parse structure that gives the arguments passed to the command line or
set in the GUI. The values used are:
show_plot: True to show interactive plot
plot_suffix: Suffix and extension to add to base filename for plot file. If empty
string, no plot file.
plot_input: Add input elevations to plot
plot_before_difference: Add before-difference elevations to the plot
plot_difference_file: Add difference file elevations to plot
plot_output Add output elevations (after filter and difference) to the plot
units: The units string, either 'english' or 'metric'
is_interactive: True if run under gui, false if run from command line.
If false, and plot_elev is true, plots block further processing
so you can view and interact with the plot.
Returns: None
"""
# You must be either showing a plot or saving a plot file.
if not args.show_plot and not args.plot_suffix:
return
# You must turn on one of the plot options
if not args.plot_input and not args.plot_before_difference and not args.plot_difference_file and \
not args.plot_output:
return
for track_idx in range(len(input_gpx.tracks)):
for segment_idx in range(len(input_gpx.tracks[track_idx].segments)):
# Create the data list. This is a long format data structure (in pandas sense of long vs wide).
data = []
num_points = len(input_gpx.tracks[track_idx].segments[segment_idx].points)
# plot the input if requested
if args.plot_input:
data.extend(segment_to_time_segment_legend_list(input_gpx.tracks[track_idx].segments[segment_idx],
'input'))
# plot the pre-difference if requested.
if args.plot_before_difference \
and pre_difference_gpx is not None \
and track_idx < len(pre_difference_gpx.tracks) \
and segment_idx < len(pre_difference_gpx.tracks[track_idx].segments) \
and len(pre_difference_gpx.tracks[track_idx].segments[segment_idx].points) == num_points:
data.extend(segment_to_time_segment_legend_list(
pre_difference_gpx.tracks[track_idx].segments[segment_idx], 'before diff'))
# plot the difference file if requested.
if args.plot_difference_file \
and difference_gpx is not None \
and track_idx < len(difference_gpx.tracks) \
and segment_idx < len(difference_gpx.tracks[track_idx].segments):
data.extend(segment_to_time_segment_legend_list(
difference_gpx.tracks[track_idx].segments[segment_idx], 'diff file'))
# plot the output if requested.
if args.plot_output \
and output_gpx is not None \
and track_idx < len(output_gpx.tracks) \
and segment_idx < len(output_gpx.tracks[track_idx].segments) \
and len(output_gpx.tracks[track_idx].segments[segment_idx].points) == num_points:
data.extend(segment_to_time_segment_legend_list(output_gpx.tracks[track_idx].segments[segment_idx],
'output'))
# Create the dataframe.
sensor_df = pandas.DataFrame(data, columns=['time', 'elevation', 'legend'])
# sensor_df.info(verbose=True)
# print(sensor_df)
# Convert to user units
sensor_df['elevation'] *= meters_to_user_units_scale_factor(args.units)
# Plot the dataFrame.
fig, axes = plt.subplots()
sns.lineplot(data=sensor_df, x='time', y='elevation', hue='legend')
# Set the axis labels.
# Set the x axis to show HH:MM
plt.xlabel('Time')
plt.ylabel(f'Elevation ({"feet" if args.units == "english" else "meters"})')
plt.xticks(rotation=30)
time_format = mdates.DateFormatter('%H:%M', tz=get_timezone(args))
axes.xaxis.set_major_formatter(time_format)
# Put the legend at the bottom of the chart, and make it horizontal.
plt.legend(ncol=5, loc="lower center", bbox_to_anchor=(0.5, -0.3))
# tight_layout rearranges everything so it fits
plt.tight_layout()
# Save the file if requested
if args.plot_suffix:
# Add the suffix to the filename and save the file.
root, ext = os.path.splitext(input_filename)
plot_filename: str = root + args.plot_suffix
if plot_filename == input_filename:
print('Plot file cannot overwrite input file. Plot file not written')
else:
plt.savefig(plot_filename, dpi=200)
# Show the plot if requested
if args.show_plot:
plt.show(block=not is_interactive)
def plot_temperature(output_gpx: gpxpy.gpx, args: argparse.Namespace, is_interactive: bool) -> None:
if not args.merge_temperature \
or not args.plot_temperature \
or output_gpx is None:
return
for track_idx in range(len(output_gpx.tracks)):
for segment_idx in range(len(output_gpx.tracks[track_idx].segments)):
# Create the data list. This is a long format data structure (in pandas sense of long vs wide).
data = []
for point in output_gpx.tracks[track_idx].segments[segment_idx].points:
for ext in point.extensions:
for ext_child in ext:
if ext_child.tag[-5:] == 'atemp':
try:
temperature = float(ext_child.text)
if args.temperature_units == 'F':
temperature = temperature * 9.0 / 5.0 + 32
except (ValueError, OverflowError):
temperature = 0.0
data.append([point.time.replace(tzinfo=timezone.utc), temperature, 'temperature'])
if len(data) == 0:
return
# Create the dataframe.
sensor_df = pandas.DataFrame(data, columns=['time', 'temperature', 'legend'])
# Plot the dataFrame.
fig, axes = plt.subplots()
sns.lineplot(data=sensor_df, x='time', y='temperature', hue='legend')
# Set the axis labels.
# Set the x axis to show HH:MM
plt.xlabel('Time')
plt.ylabel(f'Temperature (\u00b0{args.temperature_units})')
plt.xticks(rotation=30)
time_format = mdates.DateFormatter('%H:%M', tz=get_timezone(args))
axes.xaxis.set_major_formatter(time_format)
# Put the legend at the bottom of the chart, and make it horizontal.
plt.legend(ncol=5, loc="lower center", bbox_to_anchor=(0.5, -0.3))
# tight_layout rearranges everything so it fits
plt.tight_layout()
# Show the plot
plt.show(block=not is_interactive)
def print_stats(gpx: gpxpy.gpx, args: argparse.Namespace, is_interactive: bool) -> None:
for track in gpx.tracks:
print(f'Track: {track.name}')
# if args.gpxpy_up_down:
# uphill, downhill = track.get_uphill_downhill()
# print(f' gpxpy uphill = {meters_to_user_units_string(uphill, args.units)}'
# f' downhill = {meters_to_user_units_string(downhill, args.units)}')
ceg_list: List[List[float, float]] = []
if args.ceg_threshold >= 0:
if not args.all_ceg:
threshold = float(args.ceg_threshold)
threshold_meters = user_units_to_meters(float(args.ceg_threshold), args.units)
print(f' CEG({threshold}) = '
f'{meters_to_user_units_string(ceg_track(track, threshold_meters, 1), args.units)}'
f' CEL({threshold}) = '
f'{meters_to_user_units_string(ceg_track(track, threshold_meters, -1), args.units)}'
)
else:
start = 0
stop = math.ceil(float(args.ceg_threshold)) + 1
for threshold in range(start, stop):
threshold_meters = user_units_to_meters(float(threshold), args.units)
ceg_meters = ceg_track(track, threshold_meters, 1)
cel_meters = ceg_track(track, threshold_meters, -1)
ceg_list.append([threshold, meters_to_user_units(ceg_meters, args.units)])
print(f' CEG({threshold}) = {meters_to_user_units_string(ceg_meters, args.units)}'
f' CEL({threshold}) = {meters_to_user_units_string(cel_meters, args.units)}'
)
if args.plot_ceg and len(ceg_list) > 1:
# Create the dataframe.
sensor_df = pandas.DataFrame(ceg_list, columns=['threshold', 'CEG'])
# sensor_df.info(verbose=True)
# print(sensor_df)
fig, ax = plt.subplots()
sns.lineplot(data=sensor_df, x='threshold', y='CEG', ax=ax)
# Set the axis labels.
plt.xlabel(f'Threshold ({"feet" if args.units == "english" else "meters"})')
plt.ylabel(f'CEG ({"feet" if args.units == "english" else "meters"})')
plt.title('Cumulative Elevation Gain')
# tight_layout rearranges everything so it fits
plt.tight_layout()
# Show the plot if requested
plt.show(block=not is_interactive)
# if args.gpxpy_up_down or (args.ceg_threshold >= 0):
if args.ceg_threshold >= 0:
min_elevation, max_elevation = track.get_elevation_extremes()
if min_elevation is None:
min_elevation = 0.0
if max_elevation is None:
max_elevation = 0.0
print(f' Min: {meters_to_user_units_string(min_elevation, args.units)}'
f' Max: {meters_to_user_units_string(max_elevation, args.units)}'
f' Max-Min: {meters_to_user_units_string(max_elevation - min_elevation, args.units)}')
def subtract_difference(gpx: gpxpy.gpx, difference_gpx: gpxpy.gpx, difference_filename: str):
if not gpx or not difference_gpx:
return
start = time.time()
for track_idx in range(len(gpx.tracks)):
track = gpx.tracks[track_idx]
if track_idx >= len(difference_gpx.tracks):
print(f'Missing track # {track_idx} in {difference_filename}')
else:
difference_track = difference_gpx.tracks[track_idx]
for segment_idx in range(len(track.segments)):
if segment_idx >= len(difference_track.segments):
print(f'Missing track # {track_idx} segment # {segment_idx} in {difference_filename}')
else:
segment = track.segments[segment_idx]
# The difference gpx file have different time-stamps from the input gpx file.
# So we will search and interpolate in the difference_gpx file.
# Convert difference_gpx to numpy arrays to make this efficient.
difference_segment = difference_track.segments[segment_idx]
difference_timestamps, difference_elevations = \
convert_segment_points_to_arrays(difference_segment)
for i in range(len(segment.points)):
if segment.points[i].time is None:
segment.points[i].elevation = None
elif (i < len(difference_segment.points)) and \
(segment.points[i].time.timestamp() == difference_timestamps[i]):
# The difference file timestamp matches the gpx timestamp. Subtract on a
# point by point basis.
# Note if there is a missing timestamp or elevation, they have been
# removed from the difference arrays. In that case we will apply
# the interpolation method. You could correct for this by converting
# the segment elevations to ndarrays as well, but they you have to figure
# out the index in the segment elevation to update -- not worth the trouble.
# In my testing this optimization reduced the time from 23 msec for the
# interpolation method to 8 ms for this straight lookup.
if (difference_segment.points[i].elevation is not None) and \
(segment.points[i].elevation is not None):
segment.points[i].elevation -= difference_segment.points[i].elevation
else:
segment.points[i].elevation = None
else:
# The difference file uses different timestamps than the gpx file.
# Interpolate in the difference file to get the elevation at the time matching
# the gpx file, and subtract it.
if segment.points[i].elevation is not None:
interpolated_difference = np.interp(segment.points[i].time.timestamp(),
difference_timestamps, difference_elevations,
left=np.nan, right=np.nan)
if np.isnan(interpolated_difference):
segment.points[i].elevation = None
else:
segment.points[i].elevation -= interpolated_difference
end = time.time()
if print_debugging:
print(f'subtract_difference elapsed time {end - start}')
def read_csv_sensor_file(csv_filename: str, is_interactive: bool) -> Optional[pandas.DataFrame]:
"""
Read csv file with pressure data.
Works with data files from tempo disc device.
Args:
csv_filename: The filename of the csv (tempo disc) file.
If None or empty string, not an error, returns None
is_interactive: True if running under gui.
Returns:
"""
# Read the Sensor file if one is specified
if not csv_filename:
return None
try:
sensor_df_heading = pandas.read_csv(csv_filename, nrows=2)
sensor_df = pandas.read_csv(csv_filename, skiprows=2, parse_dates=['date'])
except (IOError, ValueError, Exception) as exc:
print(f'Cannot read sensor file:\n {csv_filename}\nError: {str(exc)}')
return None
if sensor_df is None or sensor_df.empty or sensor_df_heading is None or sensor_df_heading.empty:
return None
# sensor_df_heading contains the first two lines of the file. This is a heading line plus one data line.
# We use it to determine if the file contains temperatures in Fahrenheit or Celsius
# If Fahrenheit, then all temps are converted to Celsius
if ('Temp_Units' in sensor_df_heading.columns) and (sensor_df_heading['Temp_Units'][0] == "Fahrenheit"):
sensor_df['temperature'] = (sensor_df['temperature'] - 32.0) * 5.0 / 9.0
# Parse the datetime field. Use exact=False to ignore the "(Mountain Standard Time)" text at the end.
# The format string (which is in strptime format) does not allow you to skip characters.
sensor_df['date'] = \
pandas.to_datetime(sensor_df['date'], format='%a %b %d %Y %H:%M:%S GMT%z', exact=False)
# Debugging code to show pressure plot.
if print_debugging:
# Show pressure plot (for debugging)
sensor_df.info(verbose=True)
print(sensor_df)
print(type(sensor_df['date'][0]))
# For debugging, look at the pressure chart
grid: sns.axisgrid.FacetGrid = sns.relplot(data=sensor_df, kind='line', x='date', y='pressure')
grid.set_xticklabels(rotation=30)
grid.set_xlabels('Time (MM-DD HH UTC)')
grid.set_ylabels('hPa')
plt.title('Pressure')
plt.tight_layout()
plt.show(block=not is_interactive)
return sensor_df
def pressure_to_elevation(pressure: float, p0: float) -> float:
"""
Calculate the elevation for a given pressure.
Args:
pressure: The pressure, in hPa (mbars)
p0: P0, the pressure at sea level.
Returns: Elevation in meters
"""
return (1 - ((pressure / p0) ** 0.190284)) * 145366.45 * 0.3048
def gpx_pressures_to_elevations(elevations: np.ndarray, pressures: np.ndarray, p0: float):
"""
Convert an array of pressures to elevations
Args:
elevations: Array of returned elevations.
pressures: Array of pressure values
p0: The calibration p0
Returns:
"""
for idx, pressure in enumerate(pressures):
if np.isnan(pressure):
elevations[idx] = np.nan
else:
elevations[idx] = round(pressure_to_elevation(pressure, p0), 2)
def calculate_p0(pressure: float, elevation: float) -> float:
"""
Calculate P0 from pressure and desired elevation
Args:
pressure: The pressure at this elevation
elevation: The desired elevation
Returns: The calibrated P0
"""
p0: float = pressure / ((1 - elevation / 0.3048 / 145366.45) ** (1 / 0.190284))
return p0
def get_point_data(gpx_timestamps: np.ndarray, gpx_elevations: np.ndarray,
sensor_timestamps: np.ndarray, sensor_pressures: np.ndarray,
sensor_temperatures: np.ndarray) \
-> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""
Create a list of pressures based on the pressure data.
Args:
gpx_timestamps: Array of datetime.timestamps (floats) from the gpx files
gpx_elevations: Array of gpx elevations.
sensor_timestamps: Array of datetime.timestamps (floats) from the barometric sensor
sensor_pressures: Array of pressures from the barometric sensor
sensor_temperatures: Array of temperatures from the sensor
The arrays must not have any None values, they are only float values.
The sensor_timestamps and sensor_pressures arrays must be the same size
Returns:
Array of gpx_timestamps: This will be the input parameter gpx_timestamps, except
with all points removed that could not have pressures
calculated. This happens when the gpx_timestamp is outside
the barometer timestamps.
Array of gpx_elevations: The gpx_elevations input array, except with all points
removed that could not have pressures calculated. This
happens when the gpx_timestamp is outside the barometer
timestamps.
Array of gpx_pressures: The pressure values associated with the gpx_timestamps.
"""
start = time.time()
# Slice the sensor timestamp, pressure, and temperature arrays to only include the data
# that applies to the times in segment_point_times. This makes the lookup run faster.
# It's not really significant now that I use ndarrays instead of lists and pandas
# DataFrames and series, but it's easy to do and does speed things up a little.
sensor_len = len(sensor_timestamps)
start_time = gpx_timestamps[0]
end_time = gpx_timestamps[len(gpx_timestamps) - 1]
start_search = np.searchsorted(sensor_timestamps, start_time, side='left') - 1
end_search = np.searchsorted(sensor_timestamps, end_time, side='right') + 1
if start_search < 0:
start_search = 0
if end_search > sensor_len:
end_search = sensor_len
sensor_timestamp_array = sensor_timestamps[start_search:end_search]
sensor_pressure_array = sensor_pressures[start_search:end_search]
sensor_temperature_array = sensor_temperatures[start_search:end_search]
# For each point in segment_point_times, find the two surrounding times in the
# timestamp array. Then use the corresponding values in the pressure array
# to interpolate the pressure at that time.
# Do the same with the temperature data
gpx_new_timestamps: np.ndarray = np.zeros_like(gpx_timestamps)
gpx_new_elevations: np.ndarray = np.zeros_like(gpx_elevations)
gpx_pressures: np.ndarray = np.zeros_like(gpx_timestamps)
gpx_temperatures: np.ndarray = np.zeros_like(gpx_timestamps)
new_idx: int = 0
for idx, point_time in enumerate(gpx_timestamps):
# Interpolate in the pressure_array.
pressure = np.interp(point_time, sensor_timestamp_array, sensor_pressure_array,
left=np.nan, right=np.nan)
if not np.isnan(pressure):
# Convert pressure to uncalibrated elevation
gpx_pressures[new_idx] = pressure
gpx_new_timestamps[new_idx] = gpx_timestamps[idx]
gpx_new_elevations[new_idx] = gpx_elevations[idx]
temperature = np.interp(point_time, sensor_timestamp_array, sensor_temperature_array,
left=np.nan, right=np.nan)
if np.isnan(temperature):
if new_idx > 0:
temperature = gpx_temperatures[new_idx - 1]
else:
temperature = 0.0
gpx_temperatures[new_idx] = temperature
new_idx += 1
end = time.time()
if print_debugging:
print(f'get_point_data elapsed time {end - start}')
# If any timestamps were outside the barometer timestamps, warn the user
if new_idx < len(gpx_timestamps):
print(
f'{100.0 * (len(gpx_timestamps)-new_idx) / len(gpx_timestamps)}% of points in the segment'
' were outside the barometer timestamps')
# Return a slice (view) of the timestamps, elevations, and pressures.
# This is usually the same size as the input arrays, but an be smaller if
# the gpx_timestamp is outside the sensor_timestamps
return gpx_new_timestamps[:new_idx], gpx_new_elevations[:new_idx], gpx_pressures[:new_idx],\
gpx_temperatures[:new_idx]
# Define constants the control calibration.
# If needed, these could be come functions and be settable by command line parameters
# Units are seconds (same which match timestamps).
# Skip initial data interval -- 5 minutes
# Some (many) gpx units start up with wildly wrong elevation data.
skip_initial_interval: float = 5.0 * 60.0
# Beginning average interval -- 5 minutes
# When using method A, skip over skip_initial_interval, then average over
# beginning_average_interval
beginning_average_interval: float = 5.0 * 60.0
# Section length - 1 hour
# Divide the gpx segment into sections of 1 about 1 hour
section_interval: float = 60.0 * 60.0
def add_constant_elevation(gpx_pressure_elevations: np.ndarray, gpx_pressures: np.ndarray, offset: float) -> None:
"""
Add a constant height adjustment to all elevations in pressure_elevations
Args:
gpx_pressure_elevations:
gpx_pressures
offset:
Returns:
"""
if len(gpx_pressure_elevations) < 1:
return
# Find the pressure and elevation at the midpoint. Adjust the elevation by the offset.
# Calculate a new P0 based on the pressure and desired elevation at that point.
mid_idx: int = len(gpx_pressure_elevations) // 2
p0 = calculate_p0(gpx_pressures[mid_idx], gpx_pressure_elevations[mid_idx] + offset)
# Recalculate the pressure_elevations using the new p0.
# ReCalculate new (calibrated) elevations from the pressure data.
gpx_pressures_to_elevations(gpx_pressure_elevations, gpx_pressures, p0)
def find_closest_timestamp_idx(gpx_timestamps: np.ndarray, search_timestamp: float) -> int:
"""
Finds the index of the time in gpx_timestamps that is closest to search_timestamp.
Args:
gpx_timestamps: Array of timestamps to search
search_timestamp: Time to search for
Returns:
Index of element in gpx_timestamps that is closest to the search_timestamp.
Will be between 0 and len(gpx_timestamps)-1
"""
idx: int = np.searchsorted(gpx_timestamps, search_timestamp)
if idx <= 0:
# search_timestamp is before start. return 0.
return idx
if idx >= len(gpx_timestamps):
# search_timestamp is beyond end. Return last point
return len(gpx_timestamps) - 1
# Search_timestamp is between idx-1 and idx. Find the closest
if abs(gpx_timestamps[idx - 1] - search_timestamp) < abs(gpx_timestamps[idx] - search_timestamp):
return idx - 1
else:
return idx
def calibrate_elevations2(gpx_timestamps: np.ndarray,
gpx_pressures: np.ndarray, gpx_pressure_elevations: np.ndarray,
elevation_differences: np.ndarray.view,
) -> None:
"""
# Calibration Method C
# Break the file into sections.
# Calculate the average elevation_difference over a section_interval, which gives you the offset
# to apply at the midpoint of the section. Calibrate between endpoints by interpolating the offsets
# between the endpoints. On the edges, continue calibrating by extrapolating the first and last
# section calibration values.
Args:
gpx_timestamps:
gpx_pressures:
gpx_pressure_elevations:
elevation_differences:
Returns:
"""
segment_count: int = len(gpx_timestamps)
start_time: float = gpx_timestamps[0]
stop_time: float = gpx_timestamps[segment_count - 1]
segment_time: float = stop_time - start_time
# Description of the algorithm
#
# Calculate the number of sections (section_count) by dividing the segment time, less the
# skip_initial_interval, by the segment time. Set a minimum of 2 segments.
#
# Then calculate the section midpoints and the section endpoints (in seconds).
# The first section starts at start_time+skip_initial_interval, and the last section ends
# at stop_time. The section endpoints and midpoints are times -- they do not necessarily
# correspond to a specific data point in the segment.
#
# Calculate the offset for each segment by averaging over the segment. At the midpoint, calculate
# the P0 that is required to produce that offset.
#
# Recompute all elevations based on a new P0. Between section mid-points use a P0
# calculated from the P0 at the two midpoints, interpolating (based on time not array indexes).
# At the beginning (and end) of the data, extrapolate the P0 based on the two section mid-points
# at the beginning (and end) of the section point list.
#
# For example, say the segment is 1:55 (h:mm) long, you have a skip_initial_interval of 5 minutes,
# and a section_interval of 60 minutes.
# Subtract the skip_interval of 5 minutes and divide by 60 minutes section_interval, rounds to
# 2 segments. The section endpoints are [0:05, 1:00, 1:55] and the midpoints are [0:27.5, 1:27.5].
#
# Here is a table showing the minimum and maximum length of the sections as a function of the
# segment length (less the skip_initial_interval). This table assumes the same 60 minute
# section_interval and 5 minute skip_initial_interval:
# Just under Just over
# Seg len num secs sec len | num secs sec len
# 0:60 2 0:30 2 0:30
# 1:30 2 0:45 2 0:45
# 2:30 2 1:15 3 0:50
# 3:30 3 1:10 4 0:52.5
#
# As you continue to increase the segment length, the section length approaches 1:00.
# For segments shorter than 1:30, the calculated number of sections is 1, but with only
# 1 section we can't calculate a slope, so we force the number of segments to 2. That forces
# the section length down. We don't call this function for segments shorter than 60 minutes,
# so our minimum section time is 30 minutes.
# Calculate the number of sections, and the section length.
section_count: int = round((segment_time - skip_initial_interval) / section_interval)
section_count = max(section_count, 2)
# Calculate the section end-points and mid-points
section_endpoint_times: np.ndarray
section_time: float
section_endpoint_times, section_time = np.linspace(start_time + skip_initial_interval, stop_time,
num=(section_count + 1), retstep=True)
section_endpoints: np.ndarray = np.zeros(section_count + 1, dtype=int)
section_midpoints: np.ndarray = np.zeros(section_count, dtype=int)
for i in range(section_count + 1):
idx: int = find_closest_timestamp_idx(gpx_timestamps, section_endpoint_times[i])
section_endpoints[i] = idx
if i > 0:
idx = find_closest_timestamp_idx(gpx_timestamps,
(section_endpoint_times[i] + section_endpoint_times[i - 1]) / 2.0)
section_midpoints[i - 1] = idx
# Calculate the average over each section. Then calculate the p0 required to adjust
# the midpoint by this value.
section_p0: np.ndarray = np.zeros(section_count)
for i in range(section_count):
offset: float = np.average(elevation_differences[section_endpoints[i]:section_endpoints[i + 1]])
# offset is the offset to apply at the midpoint.
p0: float = calculate_p0(gpx_pressures[section_midpoints[i]],
gpx_pressure_elevations[section_midpoints[i]] + offset)
section_p0[i] = p0
# Calibrate every elevation based on the section p0 values, interpolating between section points and
# extrapolating on each end.
# The same code is used for interpolation and extrapolation.
# idx is the index into the gpx_timestamps and related arrays.
# section_idx is the index into the section_midpoints of the current section.
# section_idx+1 is the index into the section_midpoints of the next section.
# Interpolation is happening between section_idx and section_idx+1.
# If we working on a point to the left of section_idx[0] or to the right of section_list[section_count-1],
# this still works, we are just extrapolating beyond the interpolation range but we still want to
# use the interpolation range for the slope of the line.
# We don't use numpy interp() function because it can't handle extrapolation.
section_idx = 0
for idx in range(segment_count):
# Switch to next section if we are there.
while (section_idx < section_count - 2) \
and (idx > section_midpoints[section_idx + 1]):
section_idx += 1
p0: float = (section_p0[section_idx + 1] - section_p0[section_idx]) \
* (gpx_timestamps[idx] - gpx_timestamps[section_endpoints[section_idx]]) \
/ (gpx_timestamps[section_midpoints[section_idx + 1]]
- gpx_timestamps[section_endpoints[section_idx]]) \
+ section_p0[section_idx]
gpx_pressure_elevations[idx] = round(pressure_to_elevation(gpx_pressures[idx], p0), 2)
return
def calibrate_elevations(gpx_timestamps: np.ndarray, gpx_elevations: np.ndarray,
gpx_pressures: np.ndarray, gpx_pressure_elevations: np.ndarray,
args: argparse.Namespace) -> None:
"""
Args:
gpx_timestamps: Array of times from the gpx files, converted to datetime.timestamp
gpx_elevations: Array of elevations from gpx file
gpx_pressures: Array of pressures
gpx_pressure_elevations: Array of elevations calculated from pressure data. Updated
to contain calibrated elevations.
args: Argument list
calibration_method
Returns:
The calibration method can be:
'0 None',
'A Average near beginning of file',
'B Average over entire file',
'C Linear fit in 1 hour chunks'
For 0, returns immediately.
For A and L:
Calculate the difference between the gpx_elevations and the pressure_elevations.
1) If the time span of the gpx_times is less than 5 minutes, or there are fewer than 20 points,
takes the average of the differences and applies that to the pressure_elevations.
2) Otherwise, if the time span of the gpx_times is less than 10 minutes, takes the average over
the final 5 minutes or 20 data points, whichever is larger, and applies that to the
pressure_elevations
For A:
If the gpx times are less than 5 minutes, takes the average
"""
# If calibration method is '0 None', do nothing.
if args.calibration_method[0].lower() == '0':
return
length = len(gpx_timestamps)
if len(gpx_elevations) != length \
or len(gpx_pressures) != length \
or len(gpx_pressure_elevations) != length \
or length < 2:
raise IndexError
# Calculate the elevation differences.
elevation_differences: np.ndarray = np.subtract(gpx_elevations, gpx_pressure_elevations)
start_time = gpx_timestamps[0]
stop_time = gpx_timestamps[length - 1]
# if the gpx file is 0 or 1 points long, do nothing
if length < 2:
return
# if the gpx file is skip_initial_interval or shorter, or less than 20 points, we can't skip
# the skip_initial_interval. Calibrate to the average of the entire elevation_differences
if (length < 20) or ((stop_time - start_time) <= skip_initial_interval):
offset = np.average(elevation_differences)
add_constant_elevation(gpx_pressure_elevations, gpx_pressures, offset)
# If the gpx file is shorter than 2 * skip_initial_interval, calibrate to the average of
# the last skip_interval_interval part of the file.
elif (stop_time - start_time) <= (2 * skip_initial_interval):
# side='right' means a[i-1] <= search_time < a[i]
start_idx = np.searchsorted(gpx_timestamps, stop_time - skip_initial_interval, side='right') - 1
start_idx = max(start_idx, 0)
start_idx = min(start_idx, length - 1)
offset = np.average(elevation_differences[start_idx:])
add_constant_elevation(gpx_pressure_elevations, gpx_pressures, offset)
# Method A
# Calibrate to the average of the elevation_differences, but skipping the skip_initial_interval
# and then averaging for only beginning_average_interval.
elif args.calibration_method[0].lower() == 'a':
start_idx = np.searchsorted(gpx_timestamps, start_time + skip_initial_interval, side='right') - 1
start_idx = max(start_idx, 0)
start_idx = min(start_idx, length - 1)
end_idx = np.searchsorted(gpx_timestamps, start_time + skip_initial_interval + beginning_average_interval,
side='right')
offset = np.average(elevation_differences[start_idx:end_idx])
add_constant_elevation(gpx_pressure_elevations, gpx_pressures, offset)
# Method B
# Calibrate to the average of the elevation_differences. But skip the skip_initial_interval.
# This method is used rather than later methods if the gpx file is shorter than the section
# time. In this case, the pressure will be essentially constant and attempting to fit a slope
# to the differences will introduce errors.
elif (args.calibration_method[0].lower() == 'b') \
or ((stop_time - start_time) <= (section_interval + skip_initial_interval)):
start_idx = np.searchsorted(gpx_timestamps, start_time + skip_initial_interval, side='right') - 1
start_idx = max(start_idx, 0)
start_idx = min(start_idx, length - 1)
offset = np.average(elevation_differences[start_idx:])
add_constant_elevation(gpx_pressure_elevations, gpx_pressures, offset)
# Method C
# Break the file into sections.
# At each endpoint, calculate the average elevation_difference over a section_interval surrounding
# the endpoint. Calibrate the endpoint with that value. Calibrate the points between the endpoints
# by changing p0 linearly between the end-points.
elif args.calibration_method[0].lower() == 'c':
# Pass this off to a separate function.
calibrate_elevations2(gpx_timestamps, gpx_pressures, gpx_pressure_elevations,
elevation_differences)
def convert_segment_points_to_arrays(segment: gpxpy.gpx.GPXTrackSegment) -> Tuple[np.ndarray, np.ndarray]:
"""
Convert the time and elevation data in a segment to numpy ndarrays of float values.
Removes all points where either the time or elevation is None. This means that the size of the
returned may be smaller than the size of segment.points, and that the values in the arrays may
not match up (index to index) with the segment points list.
Args:
segment: The segment from the gpx object.
Returns: 2 element tuple of numpy 1D ndarrays, represent the time and elevation.
The time is converted to a float timestamp.
Both arrays have dtype float and are the same size.
The size of the returned arrays may be smaller than the segment.points list, if there are any
None values in the data.
"""
time_list = []
elevation_list = []
for point in segment.points:
if point.time is not None and point.elevation is not None:
time_list.append(point.time.timestamp())
elevation_list.append(point.elevation)
return np.array(time_list), np.array(elevation_list)
def convert_dataframe_to_arrays(sensor_df: pandas.DataFrame) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
# Optimizations:
# In my first version, np.searchsorted turned out to be a bottleneck.
# This version took about 4 seconds to create a pressure list for a gpx file of 844 points
# with a Tempo Disc dataframe length of 3920 elements. That's shorter than usual -- the
# typical Tempo Disk dataframe size is 6000 points, so that would take longer.
# In Version 2 I sliced the date_list and pressure_list to be only those items that include
# the times in segment_point_times. This reduced the time to 0.3 secs, better than a factor
# of 10.
# In version 3 I converted the datetime objects to timestamps, and stored them in a numpy
# ndarray. I also stored the pressure list in a numpy ndarray. This lets np.searchsorted work
# on its native type without any conversions. This further reduces the time to 0.022 seconds
# (more than another factor of 10). If I remove the slicing optimization (version # 2) above,
# the time only increases slightly to 0.023 seconds, so using ndarrays is the most important
# optimization.
# These timings were performed under the debugger. If you run the application from PyCharm
# but not using the debugger, the times decrease further to about 0.012 secs for my test case.
#
# I now convert both the gpx file and the tempo disk dataframe to numpy ndarrays before
# substituting pressure-based elevations and calibration.
# Convert the date from a date to a timestamp.
# Convert date pressure and temperature lists to ndarrays.
# All ndarrays are the same length.
# Remove any entries where either time or pressure is None. There should not be any of these,
# but just in case.
# If the temperature value is none, set it to the previous temperature, or 0 if the first entry.
# Again this shouldn't happen, but check just in case.
sensor_time_series: pandas.Series = sensor_df['date']
sensor_pressure_series: pandas.Series = sensor_df['pressure']
sensor_temperature_series: pandas.Series = sensor_df['temperature']
sensor_time_list: List[float] = []
sensor_pressure_list: List[float] = []
sensor_temperature_list: List[float] = []
count = min(len(sensor_time_series), len(sensor_pressure_series))
for i in range(count):
if sensor_time_series[i] is not None and sensor_pressure_series[i] is not None:
sensor_time_list.append(sensor_time_series[i].timestamp())
sensor_pressure_list.append(sensor_pressure_series[i])
if sensor_temperature_series[i] is not None:
sensor_temperature_list.append(sensor_temperature_series[i])
elif i > 0:
sensor_temperature_list.append(sensor_temperature_series[i - 1])
else:
sensor_temperature_list.append(0.0)
return np.array(sensor_time_list), np.array(sensor_pressure_list), np.array(sensor_temperature_list)
gpxtpx_key: str = 'gpxtpx'
gpxtpx_extension: str = 'http://www.garmin.com/xmlschemas/TrackPointExtension/v1'
def add_replace_trackpoint_temperature(point: gpxpy.gpx.GPXTrackPoint, temperature: float):
"""
Look to see if there is an existing temperature in the track point.
If so replace it.
If not, add it.
Args:
point:
temperature:
Returns:
"""
# Look to see if there is an existing temperature element
for ext in point.extensions:
for ext_child in ext:
if ext_child.tag[-5:] == 'atemp':
ext_child.text = str(round(temperature, 1))
return
# No existing temp found.
# point.extensions is a list of lxml.etree._Element objects.
ext_element = etree.Element('{' + gpxtpx_extension + '}TrackPointExtension')
sub_element = etree.SubElement(ext_element, '{' + gpxtpx_extension + '}atemp')
sub_element.text = str(round(temperature, 1))
point.extensions.append(ext_element)
return
def replace_elevations_from_pressure(gpx: gpxpy.gpx, sensor_df: pandas.DataFrame, args: argparse.Namespace) -> None:
"""
Replace the elevations in the gpx file with elevations from a pandas DataFrame.
The DataFrame comes from a Tempo Disc csv file that has pressure values.
Args:
gpx: Input and output gpx file
sensor_df: Input data frame with Tempo Disc data
args: ArgParse object
The gpx file is modified in place.
Returns:
None
"""
if not args.merge_pressure and not args.merge_temperature:
return gpx
if not gpx or sensor_df is None or sensor_df.empty:
return gpx
# For efficiency, convert the pandas DataFrame with the Tempo Disc data to numpy
# arrays of floats.
sensor_timestamps, sensor_pressures, sensor_temperatures = convert_dataframe_to_arrays(sensor_df)
# num_sensor_df_points = len(sensor_df['date'])
# date_series = sensor_df['date']
# pressure_series = sensor_df['pressure']
# default p0
# 1013.25 hPa is the standard pressure level used in flight at standard flight levels (so all
# aircraft use the same altimeter setting.
# It's just a starting point, we will calibrate it.
p0: float = 1013.25
# p0 = calculate_p0(893.6, 1143.694)
for track in gpx.tracks:
for segment in track.segments:
# For efficiency, convert gpx file to numpy arrays of floats -- timestamps and elevations.
# Remove any points which have None for either the time or the elevation.
gpx_timestamps, gpx_elevations = convert_segment_points_to_arrays(segment)
# Look up the pressure and temperature for each point.
# This may remove points if they are outside the barometer data.
# gpx_timestamps, gpx_elevations, gpx_pressures, and gpx_temperatures will all be the same size
# (which could be zero).
# gpx_timestamps and gpx_elevations could be smaller than before.
gpx_timestamps, gpx_elevations, gpx_pressures, gpx_temperatures, = \
get_point_data(gpx_timestamps, gpx_elevations,
sensor_timestamps, sensor_pressures, sensor_temperatures)
if len(gpx_timestamps) < 2:
return
gpx_pressure_elevations: np.ndarray = np.zeros(0)
if args.merge_pressure:
# Calculate new (uncalibrated) elevations from the pressure data.
gpx_pressure_elevations: np.ndarray = np.zeros_like(gpx_pressures)
gpx_pressures_to_elevations(gpx_pressure_elevations, gpx_pressures, p0)
# Perform calibration if requested
calibrate_elevations(gpx_timestamps, gpx_elevations, gpx_pressures, gpx_pressure_elevations, args)
if args.merge_temperature:
# Make sure there is a TrackPointExtension enabled, so we can insert temperatures if we have any.
if (gpx_temperatures is not None) \
and (len(gpx_temperatures) > 0) \
and (gpxtpx_key not in gpx.nsmap):
gpx.nsmap[gpxtpx_key] = gpxtpx_extension
# Store the results back into gpx
if (args.merge_pressure or args.merge_temperature) \
and (len(gpx_timestamps) > 0):
pressure_idx = 0
for idx, point in enumerate(segment.points):
if point.time.timestamp() >= gpx_timestamps[pressure_idx]:
if args.merge_pressure \
and gpx_pressure_elevations is not None \
and pressure_idx < len(gpx_pressure_elevations):
point.elevation = gpx_pressure_elevations[pressure_idx]
if args.merge_temperature \
and gpx_temperatures is not None \
and pressure_idx < len(gpx_temperatures):
add_replace_trackpoint_temperature(point, gpx_temperatures[pressure_idx])
pressure_idx += 1
def time_str_to_datetime(adjust_sensor_time: str, display_error: bool) -> pandas.Timedelta:
try:
delta: pandas.Timedelta = | pandas.Timedelta(adjust_sensor_time) | pandas.Timedelta |
import operator
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import SparseArray
@pytest.mark.parametrize("fill_value", [0, np.nan])
@pytest.mark.parametrize("op", [operator.pos, operator.neg])
def test_unary_op(op, fill_value):
arr = np.array([0, 1, np.nan, 2])
sparray = SparseArray(arr, fill_value=fill_value)
result = op(sparray)
expected = SparseArray(op(arr), fill_value=op(fill_value))
tm.assert_sp_array_equal(result, expected)
@pytest.mark.parametrize("fill_value", [True, False])
def test_invert(fill_value):
arr = np.array([True, False, False, True])
sparray = SparseArray(arr, fill_value=fill_value)
result = ~sparray
expected = SparseArray(~arr, fill_value=not fill_value)
tm.assert_sp_array_equal(result, expected)
result = ~ | pd.Series(sparray) | pandas.Series |
from pandas import DataFrame
from src.implementations.symbol_tables import config
class HashMap:
"""Implements https://en.wikipedia.org/wiki/Hash_table#Associative_arrays,
using https://en.wikipedia.org/wiki/Hash_table#Separate_chaining
for collision resolution
"""
def __init__(self, size: int):
# List of lists implements chaining
self.size = size
self.table = [[] for i in range(size)]
self.load = 0
def __str__(self):
return f'{self.table} | size: {self.size} | load: {self.load}'
def get(self, key):
"""Returns value of key in hash map if key in hash map, else None"""
row = self.table[self._modular_hash(key)]
if len(row) < 0:
print(f'\nKey {key} not in table')
return None
for tup in row:
if tup[0] == key:
return tup[1]
def put(self, key, value):
"""Puts key:value into hash map, resizing underlying table if needed"""
row = self.table[self._modular_hash(key)]
for tup in row:
if tup[0] == key:
tup[1] = value
return
row.append([key, value])
self.load += 1
if self.load / self.size >= config.chaining['LOAD_FACTOR_MAX']:
self._upsize()
return
def delete(self, key):
"""Deletes key from hash map. Returns True if successful, else False"""
row = self.table[self._modular_hash(key)]
if len(row) <= 0:
print(f'Key {key} not in table')
return False
for i, tup in enumerate(row):
if tup[0] == key:
del row[i]
self.load -= 1
if self.load / self.size <= config.chaining['LOAD_FACTOR_MIN']:
self._downsize()
print(f'Deleted key {key} from table')
return True
def _modular_hash(self, key) -> int:
# Hashing key and using modulo operator to wrap it into self.size
return hash(key) % self.size
def _downsize(self):
# Downsize underlying array (allocate less memory)
return self._resize(config.chaining['DOWNSIZE_FACTOR'])
def _upsize(self):
# Upsize underlying array (allocate more memory)
return self._resize(config.chaining['UPSIZE_FACTOR'])
def _resize(self, factor: float):
# Resize underlying array by factor:float
self.size = int(self.size * factor)
aux_table = [[] for i in range(self.size)]
for row in self.table:
for k, v in row:
aux_table[self._modular_hash(k)].append([k, v])
self.table = aux_table
return
if __name__ == '__main__':
initial_size = 5
HM = HashMap(initial_size)
test_items = [
[1, 1],
[2, 2],
[3, 3],
[4, 4],
[5, 5],
[12, 12]
]
print(f'INITIAL EMPTY HM:\n{DataFrame(HM.table)}\n*********************')
# .get() should work if HM is empty
for key, _ in test_items:
assert HM.get(key) is None
# .put() and, .get() should work if HM has items
for key, value in test_items:
assert HM.get(key) is None
HM.put(key, value)
assert HM.get(key) == value
print(f'HM AFTER PUTS:\n{ | DataFrame(HM.table) | pandas.DataFrame |
import os
import pickle
import sys
from pathlib import Path
from typing import Union
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from Bio import pairwise2
from scipy import interp
from scipy.stats import linregress
from sklearn.metrics import roc_curve, auc, precision_recall_curve
import thoipapy
import thoipapy.validation.bocurve
from thoipapy.utils import make_sure_path_exists
def collect_indiv_validation_data(s, df_set, logging, namedict, predictors, THOIPA_predictor_name, subsets):
"""
Parameters
----------
s
df_set
logging
namedict
predictors
THOIPA_predictor_name
Returns
-------
"""
logging.info("start collect_indiv_validation_data THOIPA_PREDDIMER_TMDOCK")
ROC_AUC_df = pd.DataFrame()
PR_AUC_df = pd.DataFrame()
mean_o_minus_r_by_sample_df = pd.DataFrame()
AUBOC_from_complete_data_ser = | pd.Series() | pandas.Series |
import pandas as pd
import os
import time
try:from ethnicolr import census_ln, pred_census_ln,pred_wiki_name,pred_fl_reg_name
except: os.system('pip install ethnicolr')
import seaborn as sns
import matplotlib.pylab as plt
import scipy
from itertools import permutations
import numpy as np
import matplotlib.gridspec as gridspec
from igraph import VertexClustering
from itertools import combinations
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
plt.rcParams['font.sans-serif'] = "Palatino"
plt.rcParams['font.serif'] = "Palatino"
plt.rcParams['mathtext.fontset'] = 'custom'
plt.rcParams['mathtext.it'] = 'Palatino:italic'
plt.rcParams['mathtext.bf'] = 'Palatino:bold'
plt.rcParams['mathtext.cal'] = 'Palatino'
from matplotlib.ticker import FormatStrFormatter
from matplotlib import ticker
from sklearn.ensemble import RandomForestClassifier,RandomForestRegressor
from sklearn.neural_network import MLPClassifier,MLPRegressor
from sklearn.linear_model import RidgeClassifierCV
from sklearn.multioutput import MultiOutputRegressor
from sklearn.linear_model import RidgeCV
from sklearn.decomposition import PCA
from statsmodels.stats.multitest import multipletests
import multiprocessing
from multiprocessing import Pool
import tqdm
import igraph
from scipy.stats import pearsonr
global paper_df
global main_df
global g
global graphs
global pal
global homedir
global method
global node_2_a
global a_2_node
global a_2_paper
global control
global matrix_idxs
global prs
# matrix_idxs = {'white_M':0,'white_W':1,'white_U':2,'api_M':3,'api_W':4,'api_U':5,'hispanic_M':6,'hispanic_W':7,'hispanic_U':8,'black_M':9,'black_W':10,'black_U':11}
pal = np.array([[72,61,139],[82,139,139],[180,205,205],[205,129,98]])/255.
# global us_only
# us_only = True
"""
AF = author names, with the format LastName, FirstName; LastName, FirstName; etc..
SO = journal
DT = document type (review or article)
CR = reference list
TC = total citations received (at time of downloading about a year ago)
PD = month of publication
PY = year of publication
DI = DOI
"""
import argparse
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
return v
parser = argparse.ArgumentParser()
parser.add_argument('-homedir',action='store',dest='homedir',default='/Users/maxwell/Dropbox/Bertolero_Bassett_Projects/citations/')
parser.add_argument('-method',action='store',dest='method',default='wiki')
parser.add_argument('-continent',type=str2bool,action='store',dest='continent',default=False)
parser.add_argument('-continent_only',type=str2bool,action='store',dest='continent_only',default=False)
parser.add_argument('-control',type=str2bool,action='store',dest='control',default=False)
parser.add_argument('-within_poc',type=str2bool,action='store',dest='within_poc',default=False)
parser.add_argument('-walk_length',type=str,action='store',dest='walk_length',default='cited')
parser.add_argument('-walk_papers',type=str2bool,action='store',dest='walk_papers',default=False)
r = parser.parse_args()
locals().update(r.__dict__)
globals().update(r.__dict__)
wiki_2_race = {"Asian,GreaterEastAsian,EastAsian":'api', "Asian,GreaterEastAsian,Japanese":'api',
"Asian,IndianSubContinent":'api', "GreaterAfrican,Africans":'black', "GreaterAfrican,Muslim":'black',
"GreaterEuropean,British":'white', "GreaterEuropean,EastEuropean":'white',
"GreaterEuropean,Jewish":'white', "GreaterEuropean,WestEuropean,French":'white',
"GreaterEuropean,WestEuropean,Germanic":'white', "GreaterEuropean,WestEuropean,Hispanic":'hispanic',
"GreaterEuropean,WestEuropean,Italian":'white', "GreaterEuropean,WestEuropean,Nordic":'white'}
matrix_idxs = {'white_M':0,'api_M':1,'hispanic_M':2,'black_M':3,'white_W':4,'api_W':5,'hispanic_W':6,'black_W':7}
def log_p_value(p):
if p == 0.0:
p = "-log10($\it{p}$)>250"
elif p > 0.001:
p = np.around(p,3)
p = "$\it{p}$=%s"%(p)
else:
p = (-1) * np.log10(p)
p = "-log10($\it{p}$)=%s"%(np.around(p,0).astype(int))
return p
def convert_r_p(r,p):
return "$\it{r}$=%s\n%s"%(np.around(r,2),log_p_value(p))
def nan_pearsonr(x,y):
xmask = np.isnan(x)
ymask = np.isnan(y)
mask = (xmask==False) & (ymask==False)
return pearsonr(x[mask],y[mask])
def mean_confidence_interval(data, confidence=0.95):
a = 1.0 * np.array(data)
n = len(a)
m, se = np.mean(a), scipy.stats.sem(a)
h = se * scipy.stats.t.ppf((1 + confidence) / 2., n-1)
return m, m-h, m+h
def make_df(method=method):
"""
this makes the actual data by pulling the race from the census or wiki data
"""
# if os.path.exists('/%s/data/result_df_%s.csv'%(homedir,method)):
# df = pd.read_csv('/%s/data/result_df_%s.csv'%(homedir,method))
# return df
main_df = pd.read_csv('/%s/article_data/NewArticleData2019_filtered.csv'%(homedir),header=0)
result_df = pd.DataFrame(columns=['fa_race','la_race','citation_count'])
store_fa_race = []
store_la_race = []
store_citations = []
store_year = []
store_journal = []
store_fa_g = []
store_la_g = []
store_fa_category = []
store_la_category = []
for entry in tqdm.tqdm(main_df.iterrows(),total=len(main_df)):
store_year.append(entry[1]['PY'])
store_journal.append(entry[1]['SO'])
fa = entry[1].AF.split(';')[0]
la = entry[1].AF.split(';')[-1]
fa_lname,fa_fname = fa.split(', ')
la_lname,la_fname = la.split(', ')
try:store_citations.append(len(entry[1].cited.split(',')))
except:store_citations.append(0)
##wiki
if method =='wiki':
names = [{'lname': fa_lname,'fname':fa_fname}]
fa_df = pd.DataFrame(names,columns=['lname','fname'])
fa_race = wiki_2_race[pred_wiki_name(fa_df,'lname','fname').race.values[0]]
names = [{'lname': la_lname,'fname':la_fname}]
la_df = pd.DataFrame(names,columns=['lname','fname'])
la_race = wiki_2_race[pred_wiki_name(la_df,'lname','fname').race.values[0]]
if method =='florida':
names = [{'lname': fa_lname,'fname':fa_fname}]
fa_df = pd.DataFrame(names,columns=['lname','fname'])
fa_race = pred_fl_reg_name(fa_df,'lname','fname').race.values[0].split('_')[-1]
names = [{'lname': la_lname,'fname':la_fname}]
la_df = pd.DataFrame(names,columns=['lname','fname'])
la_race = pred_fl_reg_name(la_df,'lname','fname').race.values[0].split('_')[-1]
#census
if method =='census':
names = [{'name': fa_lname},{'name':la_lname}]
la_df = pd.DataFrame(names)
r = pred_census_ln(la_df,'name')
fa_race,la_race= r.race.values
if method =='combined':
##wiki
names = [{'lname': fa_lname,'fname':fa_fname}]
fa_df = pd.DataFrame(names,columns=['fname','lname'])
fa_race_wiki = wiki_2_race[pred_wiki_name(fa_df,'fname','lname').race.values[0]]
names = [{'lname': la_lname,'fname':la_fname}]
la_df = pd.DataFrame(names,columns=['fname','lname'])
la_race_wiki = wiki_2_race[pred_wiki_name(la_df,'fname','lname').race.values[0]]
names = [{'name': fa_lname},{'name':la_lname}]
la_df = pd.DataFrame(names)
r = pred_census_ln(la_df,'name')
fa_race_census,la_race_census= r.race.values
if la_race_census != la_race_wiki:
if la_race_wiki == 'white':
la_race = la_race_census
if la_race_census == 'white':
la_race = la_race_wiki
elif (la_race_census != 'white') & (la_race_wiki != 'white'): la_race = la_race_wiki
elif la_race_census == la_race_wiki: la_race = la_race_wiki
if fa_race_census != fa_race_wiki:
if fa_race_wiki == 'white':
fa_race = fa_race_census
if fa_race_census == 'white':
fa_race = fa_race_wiki
elif (fa_race_census != 'white') & (fa_race_wiki != 'white'): fa_race = fa_race_wiki
elif fa_race_census == fa_race_wiki: fa_race = fa_race_wiki
store_la_race.append(la_race)
store_fa_race.append(fa_race)
store_fa_g.append(entry[1].AG[0])
store_la_g.append(entry[1].AG[1])
store_fa_category.append('%s_%s' %(fa_race,entry[1].AG[0]))
store_la_category.append('%s_%s' %(la_race,entry[1].AG[1]))
result_df['fa_race'] = store_fa_race
result_df['la_race'] = store_la_race
result_df['fa_g'] = store_fa_g
result_df['la_g'] = store_la_g
result_df['journal'] = store_journal
result_df['year'] = store_year
result_df['citation_count'] = store_citations
result_df['fa_category'] = store_fa_category
result_df['la_category'] = store_la_category
# result_df.citation_count = result_df.citation_count.values.astype(int)
result_df.to_csv('/%s/data/result_df_%s.csv'%(homedir,method),index=False)
return result_df
def make_pr_df(method=method):
"""
this makes the actual data by pulling the race from the census or wiki data
"""
main_df = pd.read_csv('/%s/article_data/NewArticleData2019.csv'%(homedir),header=0)
prs = np.zeros((main_df.shape[0],8,8))
gender_base = {}
for year in np.unique(main_df.PY.values):
ydf = main_df[main_df.PY==year].AG
fa = np.array([x[0] for x in ydf.values])
la = np.array([x[1] for x in ydf.values])
fa_m = len(fa[fa=='M'])/ len(fa[fa!='U'])
fa_w = len(fa[fa=='W'])/ len(fa[fa!='U'])
la_m = len(la[fa=='M'])/ len(la[la!='U'])
la_w = len(la[fa=='W'])/ len(la[la!='U'])
gender_base[year] = [fa_m,fa_w,la_m,la_w]
asian = [0,1,2]
black = [3,4]
white = [5,6,7,8,9,11,12]
hispanic = [10]
if method =='wiki_black':
black = [3]
for entry in tqdm.tqdm(main_df.iterrows(),total=len(main_df)):
fa = entry[1].AF.split(';')[0]
la = entry[1].AF.split(';')[-1]
fa_lname,fa_fname = fa.split(', ')
la_lname,la_fname = la.split(', ')
fa_g = entry[1].AG[0]
la_g = entry[1].AG[1]
paper_matrix = np.zeros((2,8))
# 1/0
##wiki
if method =='wiki' or method == 'wiki_black':
names = [{'lname': fa_lname,'fname':fa_fname}]
fa_df = pd.DataFrame(names,columns=['lname','fname'])
fa_race = pred_wiki_name(fa_df,'lname','fname').values[0][3:]
fa_race = [np.sum(fa_race[white]),np.sum(fa_race[asian]),np.sum(fa_race[hispanic]),np.sum(fa_race[black])]
names = [{'lname': la_lname,'fname':la_fname}]
la_df = pd.DataFrame(names,columns=['lname','fname'])
la_race = pred_wiki_name(la_df,'lname','fname').values[0][3:]
la_race = [np.sum(la_race[white]),np.sum(la_race[asian]),np.sum(la_race[hispanic]),np.sum(la_race[black])]
# #census
if method =='census':
names = [{'name': fa_lname},{'name':la_lname}]
la_df = pd.DataFrame(names)
r = pred_census_ln(la_df,'name')
fa_race = [r.iloc[0]['white'],r.iloc[0]['api'],r.iloc[0]['hispanic'],r.iloc[0]['black']]
la_race = [r.iloc[1]['white'],r.iloc[1]['api'],r.iloc[1]['hispanic'],r.iloc[1]['black']]
if method =='florida':
names = [{'lname': fa_lname,'fname':fa_fname}]
fa_df = pd.DataFrame(names,columns=['lname','fname'])
asian, hispanic, black, white = pred_fl_reg_name(fa_df,'lname','fname').values[0][3:]
fa_race = [white,asian,hispanic,black]
names = [{'lname': la_lname,'fname':la_fname}]
la_df = pd.DataFrame(names,columns=['lname','fname'])
asian, hispanic, black, white = pred_fl_reg_name(la_df,'lname','fname').values[0][3:]
la_race = [white,asian,hispanic,black]
if method == 'combined':
names = [{'lname': fa_lname,'fname':fa_fname}]
fa_df = pd.DataFrame(names,columns=['fname','lname'])
fa_race_wiki = pred_wiki_name(fa_df,'lname','fname').values[0][3:]
fa_race_wiki = [np.sum(fa_race_wiki[white]),np.sum(fa_race_wiki[asian]),np.sum(fa_race_wiki[hispanic]),np.sum(fa_race_wiki[black])]
names = [{'lname': la_lname,'fname':la_fname}]
la_df = pd.DataFrame(names,columns=['fname','lname'])
la_race_wiki = pred_wiki_name(la_df,'lname','fname').values[0][3:]
la_race_wiki = [np.sum(la_race_wiki[white]),np.sum(la_race_wiki[asian]),np.sum(la_race_wiki[hispanic]),np.sum(la_race_wiki[black])]
names = [{'name': fa_lname},{'name':la_lname}]
la_df = pd.DataFrame(names)
r = pred_census_ln(la_df,'name')
fa_race_census = [r.iloc[0]['white'],r.iloc[0]['api'],r.iloc[0]['hispanic'],r.iloc[0]['black']]
la_race_census = [r.iloc[1]['white'],r.iloc[1]['api'],r.iloc[1]['hispanic'],r.iloc[1]['black']]
if fa_race_census[0] < fa_race_wiki[0]: fa_race = fa_race_census
else: fa_race = fa_race_wiki
if la_race_census[0] < la_race_wiki[0]: la_race = la_race_census
else: la_race = la_race_wiki
gender_b = gender_base[year]
if fa_g == 'M': paper_matrix[0] = np.outer([1,0],fa_race).flatten()
if fa_g == 'W': paper_matrix[0] = np.outer([0,1],fa_race).flatten()
if fa_g == 'U': paper_matrix[0] = np.outer([gender_b[0],gender_b[1]],fa_race).flatten()
if la_g == 'M': paper_matrix[1] = np.outer([1,0],la_race).flatten()
if la_g == 'W': paper_matrix[1] = np.outer([0,1],la_race).flatten()
if la_g == 'U': paper_matrix[1] = np.outer([gender_b[2],gender_b[3]],la_race).flatten()
paper_matrix = np.outer(paper_matrix[0],paper_matrix[1])
paper_matrix = paper_matrix / np.sum(paper_matrix)
prs[entry[0]] = paper_matrix
np.save('/%s/data/result_pr_df_%s.npy'%(homedir,method),prs)
def make_all_author_race():
"""
this makes the actual data by pulling the race from the census or wiki data,
but this version include middle authors, which we use for the co-authorship networks
"""
main_df = pd.read_csv('/%s/article_data/NewArticleData2019.csv'%(homedir),header=0)
names = []
lnames = []
fnames = []
for entry in main_df.iterrows():
for a in entry[1].AF.split('; '):
a_lname,a_fname = a.split(', ')
lnames.append(a_lname.strip())
fnames.append(a_fname.strip())
names.append(a)
df = pd.DataFrame(np.array([names,fnames,lnames]).swapaxes(0,1),columns=['name','fname','lname'])
df = df.drop_duplicates('name')
if method =='florida':
# 1/0
r = pred_fl_reg_name(df,'lname','fname')
r.rename(columns={'nh_black':'black','nh_white':'white'})
r.to_csv('/%s/data/result_df_%s_all.csv'%(homedir,method),index=False)
if method =='census':
r = pred_census_ln(df,'lname')
r.to_csv('/%s/data/result_df_%s_all.csv'%(homedir,method),index=False)
all_races = []
r = dict(zip(df.name.values,df.race.values))
for idx,paper in tqdm.tqdm(main_df.iterrows(),total=main_df.shape[0]):
races = []
for a in paper.AF.split('; '):
a_lname,a_fname = a.split(', ')
races.append(r[a_lname.strip()])
all_races.append('_'.join(str(x) for x in races))
main_df['all_races'] = all_races
main_df.to_csv('/%s/data/all_data_%s.csv'%(homedir,method),index=False)
race2wiki = {'api': ["Asian,GreaterEastAsian,EastAsian","Asian,GreaterEastAsian,Japanese", "Asian,IndianSubContinent"],
'black':["GreaterAfrican,Africans", "GreaterAfrican,Muslim"],
'white':["GreaterEuropean,British", "GreaterEuropean,EastEuropean", "GreaterEuropean,Jewish", "GreaterEuropean,WestEuropean,French",
"GreaterEuropean,WestEuropean,Germanic", "GreaterEuropean,WestEuropean,Nordic", "GreaterEuropean,WestEuropean,Italian"],
'hispanic':["GreaterEuropean,WestEuropean,Hispanic"]}
if method =='wiki':
r = pred_wiki_name(df,'lname','fname')
for race in ['api','black','hispanic','white']:
r[race] = 0.0
for e in race2wiki[race]:
r[race] = r[race] + r[e]
for race in ['api','black','hispanic','white']:
for e in race2wiki[race]:
r = r.drop(columns=[e])
r.to_csv('/%s/data/result_df_%s_all.csv'%(homedir,method),index=False)
all_races = []
for idx,paper in tqdm.tqdm(main_df.iterrows(),total=main_df.shape[0]):
races = []
for a in paper.AF.split('; '):
races.append(r[r.name==a].race.values[0])
all_races.append('_'.join(str(x) for x in races))
main_df['all_races'] = all_races
main_df.to_csv('/%s/data/all_data_%s.csv'%(homedir,method),index=False)
if method =='combined':
r_wiki = pred_wiki_name(df,'lname','fname')
for race in ['api','black','hispanic','white']:
r_wiki[race] = 0.0
for e in race2wiki[race]:
r_wiki[race] = r_wiki[race] + r_wiki[e]
for race in ['api','black','hispanic','white']:
for e in race2wiki[race]:
r_wiki = r_wiki.drop(columns=[e])
r_census = pred_census_ln(df,'lname')
census = r_census.white < r_wiki.white
wiki = r_census.white > r_wiki.white
r = r_census.copy()
r[census] = r_census
r[wiki] = r_wiki
r.to_csv('/%s/data/result_df_%s_all.csv'%(homedir,method),index=False)
def figure_1_pr_authors():
df = pd.read_csv('/%s/data/result_df_%s_all.csv'%(homedir,method))
paper_df = pd.read_csv('/%s/article_data/NewArticleData2019.csv'%(homedir),header=0)
results = []
for year in np.unique(paper_df.PY.values):
print (year)
ydf = paper_df[paper_df.PY==year]
names = []
for p in ydf.iterrows():
for n in p[1].AF.split(';'):
names.append(n.strip())
names = np.unique(names)
result = np.zeros((len(names),4))
for idx,name in enumerate(names):
try:result[idx] = df[df.name==name].values[0][-4:]
except:result[idx] = np.nan
results.append(np.nansum(result,axis=0))
results = np.array(results)
plt.close()
sns.set(style='white',font='Palatino')
# pal = sns.color_palette("Set2")
# pal = sns.color_palette("vlag",4)
fig = plt.figure(figsize=(7.5,4),constrained_layout=False)
gs = gridspec.GridSpec(15, 14, figure=fig,wspace=.75,hspace=0,left=.1,right=.9,top=.9,bottom=.1)
ax1 = fig.add_subplot(gs[:15,:7])
ax1_plot = plt.stackplot(np.unique(paper_df.PY),np.flip(results.transpose()[[3,0,2,1]],axis=0), labels=['Black','Hispanic','Asian','White'],colors=np.flip(pal,axis=0), alpha=1)
handles, labels = plt.gca().get_legend_handles_labels()
labels.reverse()
handles.reverse()
leg = plt.legend(loc=2,frameon=False,labels=labels,handles=handles,fontsize=8)
for text in leg.get_texts():
plt.setp(text, color = 'black')
plt.margins(0,0)
plt.ylabel('sum of predicted author race')
plt.xlabel('publication year')
ax1.tick_params(axis='y', which='major', pad=0)
plt.title('a',{'fontweight':'bold'},'left',pad=2)
# 1/0
ax2 = fig.add_subplot(gs[:15,8:])
ax2_plot = plt.stackplot(np.unique(paper_df.PY),np.flip(np.divide(results.transpose()[[3,0,2,1]],np.sum(results,axis=1)),axis=0)*100, labels=['Black','Hispanic','Asian','White'],colors=np.flip(pal,axis=0),alpha=1)
handles, labels = plt.gca().get_legend_handles_labels()
labels.reverse()
handles.reverse()
leg = plt.legend(loc=2,frameon=False,labels=labels,handles=handles,fontsize=8)
for text in leg.get_texts():
plt.setp(text, color = 'white')
plt.margins(0,0)
plt.ylabel('percentage of predicted author race',labelpad=-5)
plt.xlabel('publication year')
ax2.yaxis.set_major_formatter(ticker.PercentFormatter())
ax2.tick_params(axis='y', which='major', pad=0)
plt.title('b',{'fontweight':'bold'},'left',pad=2)
plt.savefig('authors.pdf')
def figure_1_pr():
n_iters = 1000
df =pd.read_csv('/%s/article_data/NewArticleData2019.csv'%(homedir),header=0).rename({'PY':'year','SO':'journal'},axis='columns')
matrix = np.load('/%s/data/result_pr_df_%s.npy'%(homedir,method))
results = np.zeros((len(np.unique(df.year)),4))
if within_poc == False:
labels = ['white author & white author','white author & author of color','author of color & white author','author of color &\nauthor of color']
groups = [np.vectorize(matrix_idxs.get)(['white_M','white_W',]),
np.vectorize(matrix_idxs.get)(['api_M','api_W','hispanic_M','hispanic_W','black_M','black_W',])]
names = ['white-white','white-poc','poc-white','poc-poc']
plot_matrix = np.zeros((matrix.shape[0],len(groups),len(groups)))
plot_base_matrix = np.zeros((matrix.shape[0],len(groups),len(groups)))
for i in range(len(groups)):
for j in range(len(groups)):
plot_matrix[:,i,j] = np.nansum(matrix[:,groups[i]][:,:,groups[j]].reshape(matrix.shape[0],-1),axis=1)
for yidx,year in enumerate(np.unique(df.year)):
papers = df[df.year==year].index
r = np.mean(plot_matrix[papers],axis=0).flatten()
results[yidx,0] = r[0]
results[yidx,1] = r[1]
results[yidx,2] = r[2]
results[yidx,3] = r[3]
if within_poc == True:
names = ['white author','Asian author','Hispanic author','Black author']
groups = [[0,4],[1,5],[2,6],[3,7]]
labels = names
plot_matrix = np.zeros((matrix.shape[0],len(groups)))
for i in range(4):
plot_matrix[:,i] = plot_matrix[:,i] + np.nansum(np.nanmean(matrix[:,groups[i],:],axis=-1),axis=-1)
plot_matrix[:,i] = plot_matrix[:,i] + np.nansum(np.nanmean(matrix[:,:,groups[i]],axis=-1),axis=-1)
for yidx,year in enumerate(np.unique(df.year)):
papers = df[df.year==year].index
r = np.mean(plot_matrix[papers],axis=0).flatten()
results[yidx,0] = r[0]
results[yidx,1] = r[1]
results[yidx,2] = r[2]
results[yidx,3] = r[3]
plt.close()
sns.set(style='white',font='Palatino')
# pal = sns.color_palette("Set2")
# pal = sns.color_palette("vlag",4)
fig = plt.figure(figsize=(7.5,4),constrained_layout=False)
gs = gridspec.GridSpec(15, 16, figure=fig,wspace=.75,hspace=0,left=.1,right=.9,top=.9,bottom=.1)
ax1 = fig.add_subplot(gs[:15,:5])
plt.sca(ax1)
ax1_plot = plt.stackplot(np.unique(df.year),np.flip(results.transpose(),axis=0)*100, labels=np.flip(labels),colors=np.flip(pal,axis=0), alpha=1)
handles, labels = plt.gca().get_legend_handles_labels()
labels.reverse()
handles.reverse()
leg = plt.legend(loc=9,frameon=False,labels=labels,handles=handles,fontsize=8)
for text in leg.get_texts():
plt.setp(text, color = 'w')
plt.margins(0,0)
plt.ylabel('percentage of publications')
plt.xlabel('publication year')
ax1.tick_params(axis='x', which='major', pad=-1)
ax1.tick_params(axis='y', which='major', pad=0)
i,j,k,l = np.flip(results[0]*100)
i,j,k,l = [i,(i+j),(i+j+k),(i+j+k+l)]
i,j,k,l = [np.mean([0,i]),np.mean([i,j]),np.mean([j,k]),np.mean([k,l])]
# i,j,k,l = np.array([100]) - np.array([i,j,k,l])
plt.sca(ax1)
ax1.yaxis.set_major_formatter(ticker.PercentFormatter())
ax1.set_yticks([i,j,k,l])
ax1.set_yticklabels(np.flip(np.around(results[0]*100,0).astype(int)))
ax2 = ax1_plot[0].axes.twinx()
plt.sca(ax2)
i,j,k,l = np.flip(results[-1]*100)
i,j,k,l = [i,(i+j),(i+j+k),(i+j+k+l)]
i,j,k,l = [np.mean([0,i]),np.mean([i,j]),np.mean([j,k]),np.mean([k,l])]
plt.ylim(0,100)
ax2.yaxis.set_major_formatter(ticker.PercentFormatter())
ax2.set_yticks([i,j,k,l])
ax2.set_yticklabels(np.flip(np.around(results[-1]*100,0)).astype(int))
plt.xticks([1995., 2000., 2005., 2010., 2015., 2019],np.array([1995., 2000., 2005., 2010., 2015., 2019]).astype(int))
ax2.tick_params(axis='y', which='major', pad=0)
plt.title('a',{'fontweight':'bold'},'left',pad=2)
plot_df = pd.DataFrame(columns=['year','percentage','iteration'])
for yidx,year in enumerate(np.unique(df.year)):
for i in range(n_iters):
data = df[(df.year==year)]
papers = data.sample(int(len(data)),replace=True).index
r = np.mean(plot_matrix[papers],axis=0).flatten()
total = r.sum()
r = np.array(r[1:])/total
r = r.sum()
tdf = pd.DataFrame(np.array([r,year,i]).reshape(1,-1),columns=['percentage','year','iteration'])
plot_df = plot_df.append(tdf,ignore_index=True)
plot_df.percentage = plot_df.percentage.astype(float)
plot_df.iteration = plot_df.iteration.astype(int)
plot_df.percentage = plot_df.percentage.astype(float) * 100
pct_df = pd.DataFrame(columns=['year','percentage','iteration'])
plot_df = plot_df.sort_values('year')
for i in range(n_iters):
a = plot_df[(plot_df.iteration==i)].percentage.values
# change = np.diff(a) / a[:-1] * 100.
change = np.diff(a)
tdf = pd.DataFrame(columns=['year','percentage','iteration'])
tdf.year = range(1997,2020)
tdf.percentage = change[1:]
tdf.iteration = i
pct_df = pct_df.append(tdf,ignore_index=True)
pct_df = pct_df.dropna()
pct_df = pct_df[np.isinf(pct_df.percentage)==False]
ci = mean_confidence_interval(pct_df.percentage)
ci = np.around(ci,2)
print ("Across 1000 bootstraps, the mean percent increase per year was %s%% (95 CI:%s%%,%s%%)"%(ci[0],ci[1],ci[2]))
plt.text(.5,.48,"Increasing at %s%% per year\n(95%% CI:%s%%,%s%%)"%(ci[0],ci[1],ci[2]),{'fontsize':8,'color':'white'},horizontalalignment='center',verticalalignment='bottom',rotation=9,transform=ax2.transAxes)
axes = []
jidx = 3
for makea in range(5):
axes.append(fig.add_subplot(gs[jidx-3:jidx,6:10]))
jidx=jidx+3
for aidx,journal in enumerate(np.unique(df.journal)):
ax = axes[aidx]
plt.sca(ax)
if aidx == 2: ax.set_ylabel('percentage of publications')
if aidx == 4: ax.set_xlabel('publication\nyear',labelpad=-10)
results = np.zeros(( len(np.unique(df[(df.journal==journal)].year)),4))
for yidx,year in enumerate(np.unique(df[(df.journal==journal)].year)):
papers = df[(df.year==year)&(df.journal==journal)].index
r = np.mean(plot_matrix[papers],axis=0).flatten()
results[yidx,0] = r[0]
results[yidx,1] = r[1]
results[yidx,2] = r[2]
results[yidx,3] = r[3]
data = df[df.journal==journal]
if journal == 'NATURE NEUROSCIENCE':
for i in range(3): results = np.concatenate([[[0,0,0,0]],results],axis=0)
ax1_plot = plt.stackplot(np.unique(df.year),np.flip(results.transpose(),axis=0)*100, labels=np.flip(labels,axis=0),colors=np.flip(pal,axis=0), alpha=1)
plt.margins(0,0)
ax.set_yticks([])
if aidx != 4:
ax.set_xticks([])
else: plt.xticks(np.array([1996.5,2017.5]),np.array([1995.,2019]).astype(int))
plt.title(journal.title(), pad=-10,color='w',fontsize=8)
if aidx == 0: plt.text(0,1,'b',{'fontweight':'bold'},horizontalalignment='left',verticalalignment='bottom',transform=ax.transAxes)
journals = np.unique(df.journal)
plot_df = pd.DataFrame(columns=['journal','year','percentage','iteration'])
for j in journals:
for yidx,year in enumerate(np.unique(df.year)):
for i in range(n_iters):
data = df[(df.year==year)&(df.journal==j)]
papers = data.sample(int(len(data)),replace=True).index
r = np.mean(plot_matrix[papers],axis=0).flatten()
total = r.sum()
r = np.array(r[1:])/total
r = r.sum()
tdf = pd.DataFrame(np.array([j,r,year,i]).reshape(1,-1),columns=['journal','percentage','year','iteration'])
plot_df = plot_df.append(tdf,ignore_index=True)
plot_df.percentage = plot_df.percentage.astype(float)
plot_df.iteration = plot_df.iteration.astype(int)
plot_df.percentage = plot_df.percentage.astype(float) * 100
pct_df = pd.DataFrame(columns=['journal','year','percentage','iteration'])
plot_df = plot_df.sort_values('year')
for i in range(n_iters):
for j in journals:
a = plot_df[(plot_df.iteration==i)&(plot_df.journal==j)].percentage.values
# change = np.diff(a) / a[:-1] * 100.
change = np.diff(a)
tdf = pd.DataFrame(columns=['journal','year','percentage','iteration'])
tdf.year = range(1997,2020)
tdf.percentage = change[1:]
tdf.journal = j
tdf.iteration = i
pct_df = pct_df.append(tdf,ignore_index=True)
pct_df = pct_df.dropna()
pct_df = pct_df[np.isinf(pct_df.percentage)==False]
ci = pct_df.groupby(['journal']).percentage.agg(mean_confidence_interval).values
axes = []
jidx = 3
for makea in range(5):
axes.append(fig.add_subplot(gs[jidx-3:jidx,11:]))
jidx=jidx+3
for i,ax,journal,color in zip(range(5),axes,journals,sns.color_palette("rocket_r", 5)):
plt.sca(ax)
ax.clear()
#
# plot_df[np.isnan(plot_df.percentage)] = 0.0
if i == 0: plt.text(0,1,'c',{'fontweight':'bold'},horizontalalignment='left',verticalalignment='bottom',transform=ax.transAxes)
lp = sns.lineplot(data=plot_df[plot_df.journal==journal],y='percentage',x='year',color=color,ci='sd')
plt.margins(0,0)
thisdf = plot_df[plot_df.journal==journal]
minp = int(np.around(thisdf.mean()['percentage'],0))
thisdf = thisdf[thisdf.year==thisdf.year.max()]
maxp = int(np.around(thisdf.mean()['percentage'],0))
plt.text(-0.01,.5,'%s'%(minp),horizontalalignment='right',verticalalignment='top', transform=ax.transAxes,fontsize=10)
plt.text(1.01,.9,'%s'%(maxp),horizontalalignment='left',verticalalignment='top', transform=ax.transAxes,fontsize=10)
ax.set_yticks([])
# ax.set_xticks([])
ax.set_ylabel('')
plt.margins(0,0)
ax.set_yticks([])
if i == 2:
ax.set_ylabel('percentage of publications',labelpad=12)
if i != 4: ax.set_xticks([])
else: plt.xticks(np.array([1.5,22.5]),np.array([1995.,2019]).astype(int))
mean_pc,min_pc,max_pc = np.around(ci[i],2)
if i == 4: ax.set_xlabel('publication\nyear',labelpad=-10)
else: ax.set_xlabel('')
plt.text(.99,0,'95%' + "CI: %s<%s<%s"%(min_pc,mean_pc,max_pc),horizontalalignment='right',verticalalignment='bottom', transform=ax.transAxes,fontsize=8)
if journal == 'NATURE NEUROSCIENCE':
plt.xlim(-3,21)
plt.savefig('/%s/figures/figure1_pr_%s_%s.pdf'%(homedir,method,within_poc))
def validate():
black_names = pd.read_csv('%s/data/Black scientists - Faculty.csv'%(homedir))['Name'].values[1:]
fnames = []
lnames = []
all_names =[]
for n in black_names:
try:
fn,la = n.split(' ')[:2]
fnames.append(fn.strip())
lnames.append(la.strip())
all_names.append('%s_%s'%(fn.strip(),la.strip()))
except:continue
black_df = pd.DataFrame(np.array([all_names,fnames,lnames]).swapaxes(0,1),columns=['name','fname','lname'])
main_df = pd.read_csv('/%s/article_data/NewArticleData2019.csv'%(homedir),header=0)
names = []
lnames = []
fnames = []
for entry in main_df.iterrows():
for a in entry[1].AF.split('; '):
a_lname,a_fname = a.split(', ')
lnames.append(a_lname.strip())
fnames.append(a_fname.strip())
names.append('%s_%s'%(a_fname,a_lname))
main_df = pd.DataFrame(np.array([names,fnames,lnames]).swapaxes(0,1),columns=['name','fname','lname'])
main_df = main_df.drop_duplicates('name')
if method == 'wiki':
black_r = pred_wiki_name(black_df,'lname','fname')
all_r = pred_wiki_name(main_df,'lname','fname')
asian = [0,1,2]
black = [3,4]
white = [5,6,7,8,9,11,12]
hispanic = [10]
all_df = pd.DataFrame(columns=['probability','sample'])
all_df['probability'] = all_r.as_matrix()[:,4:][:,black].sum(axis=1)
all_df['sample'] = 'papers'
black_df = pd.DataFrame(columns=['probability','sample'])
black_df['probability'] = black_r.as_matrix()[:,4:][:,black].sum(axis=1)
black_df['sample'] = 'Black-in-STEM'
if method == 'florida':
black_r = pred_fl_reg_name(black_df,'lname','fname')
all_r = pred_fl_reg_name(main_df,'lname','fname')
asian = [0,1,2]
black = [3,4]
white = [5,6,7,8,9,11,12]
hispanic = [10]
all_df = pd.DataFrame(columns=['probability','sample'])
all_df['probability'] = all_r.values[:,-2]
all_df['sample'] = 'papers'
black_df = pd.DataFrame(columns=['probability','sample'])
black_df['probability'] = black_r.values[:,-2]
black_df['sample'] = 'Black-in-STEM'
if method == 'census':
black_r = pred_census_ln(black_df,'lname')
all_r = pred_census_ln(main_df,'lname')
all_df = pd.DataFrame(columns=['probability','sample'])
all_df['probability'] = all_r.values[:,-3]
all_df['sample'] = 'papers'
black_df = pd.DataFrame(columns=['probability','sample'])
black_df['probability'] = black_r.values[:,-3]
black_df['sample'] = 'Black-in-STEM'
data = all_df.append(black_df,ignore_index=True)
data.probability = data.probability.astype(float)
plt.close()
sns.set(style='white',font='Palatino')
fig = plt.figure(figsize=(7.5,3),constrained_layout=True)
gs = gridspec.GridSpec(6,6, figure=fig)
ax1 = fig.add_subplot(gs[:6,:3])
plt.sca(ax1)
sns.histplot(data=data,x='probability',hue="sample",stat='density',common_norm=False,bins=20)
ax2 = fig.add_subplot(gs[:6,3:])
plt.sca(ax2)
sns.histplot(data=data,x='probability',hue="sample",stat='density',common_norm=False,bins=20)
plt.ylim(0,2.5)
plt.savefig('Black-in-STEM_%s.pdf'%(method))
plt.close()
sns.set(style='white',font='Palatino')
fig = plt.figure(figsize=(7.5,3),constrained_layout=True)
gs = gridspec.GridSpec(6,6, figure=fig)
ax1 = fig.add_subplot(gs[:6,:3])
plt.sca(ax1)
sns.histplot(data=data[data['sample']=='papers'],x='probability',stat='density',common_norm=False,bins=20)
ax2 = fig.add_subplot(gs[:6,3:])
plt.sca(ax2)
sns.histplot(data=data[data['sample']=='Black-in-STEM'],x='probability',hue="sample",stat='density',common_norm=False,bins=20)
# plt.ylim(0,2.5)
plt.savefig('Black-in-STEM_2.pdf')
def make_pr_control():
"""
control for features of citing article
"""
# 1) the year of publication
# 2) the journal in which it was published
# 3) the number of authors
# 4) whether the paper was a review article
# 5) the seniority of the paper’s first and last authors.
# 6) paper location
df = pd.read_csv('/%s/article_data/NewArticleData2019.csv'%(homedir),header=0)
prs = np.load('/%s/data/result_pr_df_%s.npy'%(homedir,method))
cont = pd.read_csv('/%s/article_data/CountryAndContData.csv'%(homedir))
df = df.merge(cont,how='outer',left_index=True, right_index=True)
df = df.merge(pd.read_csv('/%s/article_data/SeniorityData.csv'%(homedir)),left_index=True, right_index=True)
reg_df = pd.DataFrame(columns=['year','n_authors','journal','paper_type','senior','location'])
for entry in tqdm.tqdm(df.iterrows(),total=len(df)):
idx = entry[0]
paper = entry[1]
year = entry[1].PY
n_authors = len(paper.AF.split(';'))
journal = entry[1].SO
paper_type = paper.DT
senior = entry[1].V4
try: loc = entry[1]['FirstListed.Cont'].split()[0]
except: loc = 'None'
reg_df.loc[len(reg_df)] = [year,n_authors,journal,paper_type,senior,loc]
reg_df["n_authors"] = pd.to_numeric(reg_df["n_authors"])
reg_df["year"] = pd.to_numeric(reg_df["year"])
reg_df["senior"] = pd.to_numeric(reg_df["senior"])
skl_df = pd.get_dummies(reg_df).values
ridge = MultiOutputRegressor(RidgeCV(alphas=[1e-5,1e-4,1e-3, 1e-2, 1e-1, 1,10,25,50,75,100])).fit(skl_df,prs.reshape(prs.shape[0],-1))
ridge_probabilities = ridge.predict(skl_df)
ridge_probabilities = np.divide((ridge_probabilities), np.sum(ridge_probabilities,axis=1).reshape(-1,1))
ridge_probabilities = ridge_probabilities.reshape(ridge_probabilities.shape[0],8,8)
np.save('/%s/data/probabilities_pr_%s.npy'%(homedir,method),ridge_probabilities)
def make_pr_control_jn():
"""
control for features of citing article
"""
# 1) the year of publication
# 2) the journal in which it was published
# 3) the number of authors
# 4) whether the paper was a review article
# 5) the seniority of the paper’s first and last authors.
# 6) paper location
# 6) paper sub-field
df = pd.read_csv('/%s/article_data/NewArticleData2019.csv'%(homedir),header=0)
prs = np.load('/%s/data/result_pr_df_%s.npy'%(homedir,method))
cont = pd.read_csv('/%s/article_data/CountryAndContData.csv'%(homedir))
df = df.merge(cont,how='outer',left_index=True, right_index=True)
df = df.merge(pd.read_csv('/%s/article_data/SeniorityData.csv'%(homedir)),left_index=True, right_index=True)
df = df.rename(columns={'DI':'doi'})
df['category'] = 'none'
sub = pd.read_csv('/%s/article_data/JoNcategories_no2019.csv'%(homedir))
for cat,doi in zip(sub.category,sub.doi):
df.iloc[np.where(df.doi==doi)[0],-1] = cat
reg_df = pd.DataFrame(columns=['year','n_authors','journal','paper_type','senior','location','category'])
for entry in tqdm.tqdm(df.iterrows(),total=len(df)):
idx = entry[0]
paper = entry[1]
year = entry[1].PY
n_authors = len(paper.AF.split(';'))
journal = entry[1].SO
paper_type = paper.DT
senior = entry[1].V4
cat = entry[1].category
try: loc = entry[1]['FirstListed.Cont'].split()[0]
except: loc = 'None'
reg_df.loc[len(reg_df)] = [year,n_authors,journal,paper_type,senior,loc,cat]
reg_df["n_authors"] = pd.to_numeric(reg_df["n_authors"])
reg_df["year"] = | pd.to_numeric(reg_df["year"]) | pandas.to_numeric |
#!/usr/bin/env python
# coding: utf-8
# In[2]:
import os
import pandas as pd
import numpy as np
import string
# from operator import itemgetter
from collections import Counter, OrderedDict
from nltk.tokenize import word_tokenize, sent_tokenize
from nltk.stem import SnowballStemmer
from nltk.corpus import stopwords
import nltk
#nltk.download('punkt')
#nltk.download('stopwords')
from gensim.models.phrases import Phrases, Phraser
from gensim.models import Word2Vec
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import PCA
from matplotlib import pyplot as plt
import traceback
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
# First, import the wine dataset.
# In[3]:
base_location = r"wine_data"
i = 0
for file in os.listdir(base_location):
file_location = base_location + '/' + str(file)
if i==0:
wine_dataframe = pd.read_csv(file_location, encoding='latin-1')
i+=1
else:
df_to_append = pd.read_csv(file_location, encoding='latin-1', low_memory=False)
wine_dataframe = pd.concat([wine_dataframe, df_to_append], axis=0)
# In[4]:
#wine_dataframe.drop_duplicates(subset=['Name'], inplace=True)
geographies = ['Subregion', 'Region', 'Province', 'Country']
for geo in geographies:
wine_dataframe[geo] = wine_dataframe[geo].apply(lambda x : str(x).strip())
# Then, the food dataset.
# In[5]:
food_review_dataset = pd.read_csv('food_data/Reviews.csv')
print(food_review_dataset.shape)
# ### 1. Training our Word Embeddings
#
# First, we need to train a Word2Vec model on all the words in our corpus. We will process our wine and food terms separately - some of the wine terms will be standardized to account for commonalities in the colorful language of the world of wine.
# In[6]:
wine_reviews_list = list(wine_dataframe['Description'])
food_reviews_list = list(food_review_dataset['Text'])
# To begin, we need to tokenize the terms in our corpus (wine and food).
# In[7]:
full_wine_reviews_list = [str(r) for r in wine_reviews_list]
full_wine_corpus = ' '.join(full_wine_reviews_list)
wine_sentences_tokenized = sent_tokenize(full_wine_corpus)
full_food_reviews_list = [str(r) for r in food_reviews_list]
full_food_corpus = ' '.join(full_food_reviews_list)
food_sentences_tokenized = sent_tokenize(full_food_corpus)
#print(wine_sentences_tokenized[:2])
#print(food_sentences_tokenized[:2])
# Next, the text in each sentence is normalized (tokenize, remove punctuation and remove stopwords).
# In[8]:
stop_words = set(stopwords.words('english'))
punctuation_table = str.maketrans({key: None for key in string.punctuation})
sno = SnowballStemmer('english')
def normalize_text(raw_text):
try:
word_list = word_tokenize(raw_text)
normalized_sentence = []
for w in word_list:
try:
w = str(w)
lower_case_word = str.lower(w)
stemmed_word = sno.stem(lower_case_word)
no_punctuation = stemmed_word.translate(punctuation_table)
if len(no_punctuation) > 1 and no_punctuation not in stop_words:
normalized_sentence.append(no_punctuation)
except:
continue
return normalized_sentence
except:
return ''
normalized_wine_sentences = []
for s in wine_sentences_tokenized:
normalized_text = normalize_text(s)
normalized_wine_sentences.append(normalized_text)
normalized_food_sentences = []
for s in food_sentences_tokenized:
normalized_text = normalize_text(s)
normalized_food_sentences.append(normalized_text)
#print(normalized_wine_sentences[:2])
#print(normalized_food_sentences[:2])
# Not all of the terms we are interested in are single words. Some of the terms are phrases, consisting of two (or more!) words. An example of this might be 'high tannin'. We can use gensim's Phrases feature to extract all the most relevant bi- and tri-grams from our corpus.
#
# We will train a separate trigram model for wine and for food.
# In[9]:
# first, take care of the wine trigrams
wine_bigram_model = Phrases(normalized_wine_sentences, min_count=100)
wine_bigrams = [wine_bigram_model[line] for line in normalized_wine_sentences]
wine_trigram_model = Phrases(wine_bigrams, min_count=50)
phrased_wine_sentences = [wine_trigram_model[line] for line in wine_bigrams]
wine_trigram_model.save('wine_trigrams.pkl')
### now, do the same for food
food_bigram_model = Phrases(normalized_food_sentences, min_count=100)
food_bigrams = [food_bigram_model[sent] for sent in normalized_food_sentences]
food_trigram_model = Phrases(food_bigrams, min_count=50)
phrased_food_sentences = [food_trigram_model[sent] for sent in food_bigrams]
food_trigram_model.save('food_trigrams.pkl')
wine_trigram_model = Phraser.load('wine_trigrams.pkl')
food_trigram_model = Phraser.load('food_trigrams.pkl')
descriptor_mapping = pd.read_csv('descriptor_mapping.csv', encoding='latin1').set_index('raw descriptor')
def return_mapped_descriptor(word, mapping):
if word in list(mapping.index):
normalized_word = mapping.at[word, 'level_3']
return normalized_word
else:
return word
normalized_wine_sentences = []
for sent in phrased_wine_sentences:
normalized_wine_sentence = []
for word in sent:
normalized_word = return_mapped_descriptor(word, descriptor_mapping)
normalized_wine_sentence.append(str(normalized_word))
normalized_wine_sentences.append(normalized_wine_sentence)
# If the trigram model has already been trained, simply retrieve it.
# In[10]:
#wine_trigram_model = Phraser.load('wine_trigrams.pkl')
#food_trigram_model = Phraser.load('food_trigrams.pkl')
# Now for the most important part: leveraging existing wine theory, the work of others like <NAME>, wine descriptor mappings and the UC Davis wine wheel, the top 5000 most frequent wine terms were reviewed to (i) determine whether they are a descriptor that can be derived by blind tasting, and (ii) whether they are informative (judgments like 'tasty' and 'great' are not considered to be informative). The roughly 1000 descriptors that remain were then mapped onto a normalized descriptor, a category and a class:
# In[11]:
#descriptor_mapping = pd.read_csv('descriptor_mapping.csv', encoding='latin1').set_index('raw descriptor')
#def return_mapped_descriptor(word, mapping):
# if word in list(mapping.index):
# normalized_word = mapping.at[word, 'level_3']
# return normalized_word
# else:
# return word
#normalized_wine_sentences = []
#for sent in phrased_wine_sentences:
# normalized_wine_sentence = []
# for word in sent:
# normalized_word = return_mapped_descriptor(word, descriptor_mapping)
# normalized_wine_sentence.append(str(normalized_word))
# normalized_wine_sentences.append(normalized_wine_sentence)
# We will go through the same process for food, but without normalizing the nonaroma descriptors.
# In[12]:
aroma_descriptor_mapping = descriptor_mapping.loc[descriptor_mapping['type'] == 'aroma']
#print(aroma_descriptor_mapping)
normalized_food_sentences = []
for sent in phrased_food_sentences:
normalized_food_sentence = []
for word in sent:
normalized_word = return_mapped_descriptor(word, aroma_descriptor_mapping)
normalized_food_sentence.append(str(normalized_word))
normalized_food_sentences.append(normalized_food_sentence)
# Now, let's combine the wine dataset with our food dataset so we can train our embeddings. We want to make sure that the food and wine embeddings are calculated in the same feature space so that we can compute similarity vectors later on.
# In[13]:
normalized_sentences = normalized_wine_sentences + normalized_food_sentences
# In[98]:
normalized_sentences
# We are ready to train our Word2Vec model!
# In[14]:
#Changed by Praveen - vector_size and epochs added)
wine_word2vec_model = Word2Vec(normalized_sentences, vector_size=300, min_count=8, epochs=15)
print(wine_word2vec_model)
wine_word2vec_model.save('food_word2vec_model.bin')
# In[ ]:
wine_word2vec_model
# In[15]:
# if the word2vec model has already been trained, simply load it
wine_word2vec_model = Word2Vec.load("food_word2vec_model.bin")
# ### 2. Preprocessing our Wine Dataset
#
# We can now turn our attention to our wine dataset. Descriptions for a single wine are unlikely to contain sufficient information about all the nonaromas and aromas to yield consistent and reliable pairing recommendations. As such, we will produce recommendations at the grape variety & subregion level.
#
# First, let's normalize the names of the grape varieties in our dataset.
# In[16]:
variety_mapping = {'Shiraz': 'Syrah', 'Pinot Gris': 'Pinot Grigio', 'Pinot Grigio/Gris': 'Pinot Grigio',
'Garnacha, Grenache': 'Grenache', 'Garnacha': 'Grenache', 'Carmenère': 'Carmenere',
'Grüner Veltliner': 'Gruner Veltliner', 'Torrontés': 'Torrontes',
'Rhône-style Red Blend': 'Rhone-style Red Blend', 'Albariño': 'Albarino',
'Gewürztraminer': 'Gewurztraminer', 'Rhône-style White Blend': 'Rhone-style White Blend',
'Spätburgunder, Pinot Noir': 'Pinot Noir', 'Sauvignon, Sauvignon Blanc': 'Sauvignon Blanc',
'Pinot Nero, Pinot Noir': 'Pinot Noir', 'Malbec-Merlot, Bordeaux-style Red Blend': 'Bordeaux-style Red Blend',
'Meritage, Bordeaux-style Red Blend': 'Bordeaux-style Red Blend', 'Garnacha, Grenache': 'Grenache',
'Fumé Blanc': 'Sauvignon Blanc', 'Cabernet Sauvignon-Cabernet Franc, Bordeaux-style Red Blend': 'Bordeaux-style Red Blend',
'Cabernet Merlot, Bordeaux-style Red Blend': 'Bordeaux-style Red Blend', 'Cabernet Sauvignon-Merlot, Bordeaux-style Red Blend': 'Bordeaux-style Red Blend',
'Cabernet Blend, Bordeaux-style Red Blend': 'Bordeaux-style Red Blend', 'Malbec-Cabernet Sauvignon, Bordeaux-style Red Blend': 'Bordeaux-style Red Blend',
'Merlot-<NAME>, Bordeaux-style Red Blend': 'Bordeaux-style Red Blend', 'Merlot-<NAME>, Bordeaux-style Red Blend': 'Bordeaux-style Red Blend',
'Cabernet Franc-Merlot, Bordeaux-style Red Blend': 'Bordeaux-style Red Blend', 'Merlot-Malbec, Bordeaux-style Red Blend': 'Bordeaux-style Red Blend',
'Cabernet, Bordeaux-style Red Blend': 'Bordeaux-style Red Blend', 'Primitivo, Zinfandel': 'Zinfandel',
'Aragonês, Tempranillo': 'Aragonez, Tempranillo'
}
def consolidate_varieties(variety_name):
if variety_name in variety_mapping:
return variety_mapping[variety_name]
else:
return variety_name
wine_df_clean = wine_dataframe.copy()
wine_df_clean['Variety'] = wine_df_clean['Variety'].apply(consolidate_varieties)
# Next, we need to define the set of geography subregions we will use to define our wines. Not too general, not too specific... just right.
# In[17]:
order_of_geographies = ['Subregion', 'Region', 'Province', 'Country']
# replace any nan values in the geography columns with the word none
def replace_nan_for_zero(value):
if str(value) == '0' or str(value) == 'nan':
return 'none'
else:
return value
for o in order_of_geographies:
wine_df_clean[o] = wine_df_clean[o].apply(replace_nan_for_zero)
wine_df_clean.loc[:, order_of_geographies].fillna('none', inplace=True)
# In[18]:
variety_geo = wine_df_clean.groupby(['Variety', 'Country', 'Province', 'Region', 'Subregion']).size().reset_index().rename(columns={0:'count'})
variety_geo_sliced = variety_geo.loc[variety_geo['count'] > 1]
vgeos_df = pd.DataFrame(variety_geo_sliced, columns=['Variety', 'Country', 'Province', 'Region', 'Subregion', 'count'])
vgeos_df.to_csv('varieties_all_geos.csv')
# In[19]:
variety_geo_df = pd.read_csv('varieties_all_geos_normalized.csv', index_col=0)
wine_df_merged = pd.merge(left=wine_df_clean, right=variety_geo_df, left_on=['Variety', 'Country', 'Province', 'Region', 'Subregion'],
right_on=['Variety', 'Country', 'Province', 'Region', 'Subregion'])
#wine_df_merged.drop(['Unnamed: 0', 'Appellation', 'Bottle Size', 'Category', 'Country',
# 'Date Published', 'Designation', 'Importer', 'Province', 'Rating',
# 'Region', 'Reviewer', 'Reviewer Twitter Handle', 'Subregion', 'User Avg Rating', 'Winery', 'count'],
# axis=1, inplace=True)
wine_df_merged.shape
# We only want to keep wine types (location + variety) that appear frequently enough in our dataset.
# In[20]:
variety_geos = wine_df_merged.groupby(['Variety', 'geo_normalized']).size()
at_least_n_types = variety_geos[variety_geos > 30].reset_index()
wine_df_merged_filtered = | pd.merge(wine_df_merged, at_least_n_types, left_on=['Variety', 'geo_normalized'], right_on=['Variety', 'geo_normalized']) | pandas.merge |
import seaborn as sns
import pandas as pd
import geopandas as gpd
import numpy as np
import matplotlib.pyplot as plt
from pandas.io.json import json_normalize
from pysal.lib import weights
from sklearn import cluster
from shapely.geometry import Point
# # # # # PET DATA # # # # #
# filename = "pets.json"
# with open(filename, 'r') as f:
# objects = ijson.items
# austin dangerous dog api
urlD = 'https://data.austintexas.gov/resource/ykw4-j3aj.json'
# austin stray dog data
urlS = 'https://data.austintexas.gov/resource/hye6-gvq2.json'
# found_df / austin found pets pandas data frame constructor
pets_df = pd.read_json(urlS, orient='records')
location_df = | json_normalize(pets_df['location']) | pandas.io.json.json_normalize |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 14 15:42:36 2021
@author: nicolasnavarre
"""
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
figures = 'figures/'
def production_by_group(POM_data):
POM_global = pd.DataFrame()
POM_global ["Org"] = POM_data.groupby(["Area"]).apply(lambda x: x["Org per Capita (with waste & feed)"].sum())
POM_global ["EAT"] = POM_data.groupby(["Area"]).apply(lambda x: x["EAT per Capita (with waste & feed)"].sum())
import numpy as np
for j in POM_data['GROUP'].unique().tolist():
food_list_eat = []
food_list_org = []
group_list = []
diet_df = pd.DataFrame()
width = 0.35
fig, ax = plt.subplots()
temp_count = 0
temp_counteat = 0
food_data = POM_data.groupby(["EAT_group"]).apply(lambda x: (x["POM"].sum()/(x["Population (2016), 1000person"].sum()*1000)))
if j == 'Other':
continue
for i in food_data.index:
df_temp = POM_data.loc[(POM_data['EAT_group'] == i) & (POM_data['GROUP'] == j)]
df_pop = df_temp[['Area', 'Population (2016), 1000person']]
df_pop = df_pop.drop_duplicates()
Org_avg = df_temp.groupby(["Area"]).apply(lambda x: (x["POM Org (with waste & feed)"].sum()))
EAT_avg = df_temp.groupby(["Area"]).apply(lambda x: (x["POM EAT (with waste & feed)"].sum()))
Org_avg = pd.DataFrame(Org_avg)
Org_avg = Org_avg.reset_index()
Org_avg = pd.merge(Org_avg, df_pop, on = 'Area', how = 'left')
Org_avg = Org_avg.rename(columns = {0 : 'food'})
Org_food = ((Org_avg['food'] * 1000 * 1000 * 1000) / 365 ).sum() / (Org_avg["Population (2016), 1000person"] * 1000).sum()
EAT_avg = pd.DataFrame(EAT_avg)
EAT_avg = EAT_avg.reset_index()
EAT_avg = pd.merge(EAT_avg, df_pop, on = 'Area', how = 'left')
EAT_avg = EAT_avg.rename(columns = {0 : 'food'})
EAT_food = ((EAT_avg['food'] * 1000 * 1000 * 1000) / 365 ).sum() / (EAT_avg["Population (2016), 1000person"] * 1000).sum()
temp_count += Org_food
temp_counteat += EAT_food
food_list_eat.append(EAT_food)
food_list_org.append(Org_food)
group_list.append(i)
x = np.arange(len(group_list))
diet_df['group'] = group_list
diet_df['gF EAT'] = food_list_eat
diet_df['gF Org'] = food_list_org
diet_df['dif'] = diet_df['gF Org'] - diet_df['gF EAT']
diet_df = diet_df.sort_values(by=['dif'], ascending=False)
ax.bar(x + width/2, diet_df['gF EAT'], width, label='EAT Diet', color = 'g')
ax.bar(x - width/2, diet_df['gF Org'], width, label='BAU Diet', color = 'r')
ax.set_ylabel('Prod/capita (g/person-day)')
ax.set_xticks(x)
ax.set_xticklabels(diet_df['group'])
pos_values = len(diet_df[diet_df["dif"]>0])
ax.axvspan(-0.5, pos_values-0.5, facecolor='0.2', alpha=0.25, zorder=-100)
plt.xticks(rotation = 90)
legend_elements = [Line2D([0], [0], lw = 0, marker='s', color='r', label='Current Diet\nTotal = '+str(int(temp_count))+' g/d',\
markerfacecolor='r'),
Line2D([0], [0], lw = 0, marker='s', color='g', label='EAT Lancet Diet\nTotal = '+str(int(temp_counteat))+' g/d',\
markerfacecolor='g')]
lg = ax.legend(handles=legend_elements)
fig.savefig(figures+j+" EAT_Group Production.png", bbox_extra_artists=(lg,), bbox_inches='tight', dpi = 400)
plt.close()
plt.close()
temp_count = 0
temp_counteat = 0
food_data = POM_data.groupby(["EAT_group"]).apply(lambda x: (x["POM"].sum()/(x["Population (2016), 1000person"].sum()*1000)))
x_labels = []
eat_bar = []
org_bar = []
group_list = []
food_list_eat = []
food_list_org = []
cal_list_eat = []
cal_list_org = []
diet_df = pd.DataFrame()
extra_nations = ['Puerto Rico', 'Palestine', 'Greenland', 'Falkland Islands (Malvinas)'\
'New Caledonia', 'China', 'China, Taiwan Province of' ]
POM_data['OrgCal perD']= ((POM_data['POM (no waste)']*10**9)/365 * POM_data['calories per g'])/(POM_data["Population (2016), 1000person"]*1000)
for i in food_data.index:
df_temp = POM_data.loc[(POM_data['EAT_group'] == i)]
df_pop = df_temp[['Area', 'Population (2016), 1000person']]
df_pop = df_pop.drop_duplicates()
Org_avg = df_temp.groupby(["Area"]).apply(lambda x: (x["POM Org (with waste & feed)"].sum()))
EAT_avg = df_temp.groupby(["Area"]).apply(lambda x: (x["POM EAT (with waste & feed)"].sum()))
Org_cal = df_temp.groupby(["Area"]).apply(lambda x: (x["OrgCal perD"].sum()))
EAT_cal = df_temp.groupby(["Area"]).apply(lambda x: (x["Cal Needed"].sum()))
Org_avg = pd.DataFrame(Org_avg)
Org_avg = Org_avg.reset_index()
Org_avg = pd.merge(Org_avg, df_pop, on = 'Area', how = 'left')
Org_avg = Org_avg.rename(columns = {0 : 'food'})
Org_avg = Org_avg[Org_avg['Population (2016), 1000person'] != 0]
Org_avg = Org_avg[~Org_avg['Area'].isin(extra_nations)]
Org_food = ((Org_avg['food'] * 1000 * 1000 * 1000) / 365 ).sum() / (Org_avg["Population (2016), 1000person"] * 1000).sum()
EAT_avg = pd.DataFrame(EAT_avg)
EAT_avg = EAT_avg.reset_index()
EAT_avg = pd.merge(EAT_avg, df_pop, on = 'Area', how = 'left')
EAT_avg = EAT_avg.rename(columns = {0 : 'food'})
EAT_avg = EAT_avg[EAT_avg['Population (2016), 1000person'] != 0]
EAT_avg = EAT_avg[~EAT_avg['Area'].isin(extra_nations)]
EAT_food = ((EAT_avg['food'] * 1000 * 1000 * 1000) / 365 ).sum() / (EAT_avg["Population (2016), 1000person"] * 1000).sum()
Org_cal = pd.DataFrame(Org_cal)
Org_cal = Org_cal.reset_index()
Org_cal = pd.merge(Org_cal, df_pop, on = 'Area', how = 'left')
Org_cal = Org_cal.rename(columns = {0 : 'cal'})
Org_cal = Org_cal[Org_cal['Population (2016), 1000person'] != 0]
Org_cal = Org_cal[~Org_cal['Area'].isin(extra_nations)]
Org_cal_food = Org_cal['cal'].sum()/ len(Org_cal['cal'])
EAT_cal = | pd.DataFrame(EAT_cal) | pandas.DataFrame |
import numpy as np
import pandas as pd
import tensorflow as tf
import os
import keras
from keras.models import Sequential, Model
from keras.layers import GlobalAveragePooling2D, Dense, Input, Dropout, concatenate
from keras.applications.vgg16 import VGG16
from keras.preprocessing.text import Tokenizer
from sklearn.model_selection import train_test_split
from data_generator import TextMultimodalDataGenerator
from math import ceil
import datetime
# If using Nvidia gpu and running into memory issues
#gpus = tf.config.experimental.list_physical_devices('GPU')
#tf.config.experimental.set_memory_growth(gpus[0], True)
#tf.TF_ENABLE_GPU_GARBAGE_COLLECTION=False
BATCH_SIZE = 32
IMAGE_SIZE = 224
DROPOUT_PROB = 0.2
DATASET_PATH = "data/"
LOG_PATH = "log/"
TABULAR_COLS = ['gender', 'masterCategory', 'subCategory', 'articleType', 'baseColour', 'usage']
log_name = LOG_PATH + str(datetime.datetime.today().strftime("%Y%m%d%H%M%S")) + ".txt"
# Read CSV file
#df = pd.read_csv(DATASET_PATH + "prepared_data.csv", error_bad_lines=False)
df = pd.read_csv(DATASET_PATH + "balanced_sorted.csv", nrows=100, error_bad_lines=False)
df['image'] = df.apply(lambda row: str(row['id']) + ".jpg", axis=1)
df['usage'] = df['usage'].astype('str')
images = df['image']
tabular = pd.get_dummies(df[TABULAR_COLS])
labels = pd.get_dummies(df['season'])
NUM_CLASSES = len(labels.columns)
dummy_tabular_cols = tabular.columns
dummy_labels_cols = labels.columns
# Text pre-processing
vocab_filename = 'data/vocab.txt'
file = open(vocab_filename, 'r')
vocab = file.read()
file.close()
vocab = vocab.split()
vocab = set(vocab)
sentences = df['productDisplayName'].astype('str').values.tolist()
usage = pd.get_dummies(df['season'])
usage = usage.values.tolist()
tokenizer = Tokenizer()
tokenizer.fit_on_texts(sentences)
text = pd.DataFrame(tokenizer.texts_to_matrix(sentences, mode='tfidf'))
dummy_text_cols = text.columns
data = pd.concat([ images, tabular, text, labels ], axis=1)
train, test = train_test_split(
data,
random_state=42,
shuffle=True,
stratify=labels
)
train = train.reset_index(drop=True)
test = test.reset_index(drop=True)
train_images = train['image']
train_tabular = train[dummy_tabular_cols]
train_text = train[dummy_text_cols]
train_labels = train[dummy_labels_cols]
training_generator = TextMultimodalDataGenerator(
train_images,
train_tabular,
train_text,
train_labels,
batch_size=BATCH_SIZE,
target_size=(IMAGE_SIZE, IMAGE_SIZE),
directory=DATASET_PATH + "images/"
)
test_images = test['image']
test_tabular = test[dummy_tabular_cols]
test_text = test[dummy_text_cols]
test_labels = test[dummy_labels_cols]
test_generator = TextMultimodalDataGenerator(
test_images,
test_tabular,
test_text,
test_labels,
batch_size=BATCH_SIZE,
target_size=(IMAGE_SIZE, IMAGE_SIZE),
directory=DATASET_PATH + "images/"
)
# Build image model
base_model1 = VGG16(weights='imagenet', include_top=False, input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3))
for layer in base_model1.layers:
layer.trainable = False
x1 = base_model1.output
x1 = GlobalAveragePooling2D()(x1)
x1 = Dropout(DROPOUT_PROB)(x1)
# Add simple input layer for tabular data
input_tab = Input(batch_shape=(None, len(train_tabular.columns)))
# CHAIN FUSION WITH TABULAR
x2 = concatenate([x1, input_tab])
x2 = Dense(x2.shape[1], activation='relu')(x2)
x2 = Dropout(DROPOUT_PROB)(x2)
# Build text model
n_words = text.shape[1]
input_text = Input(batch_shape=(None, len(train_text.columns)))
x = concatenate([x2, input_text])
# The same as in the tabular data
x = Sequential()(x)
x = Dense(x.shape[1], input_shape=(n_words,), activation='relu')(x) #12
x = Dropout(DROPOUT_PROB)(x)
x = Dense(ceil(x.shape[1]/2), activation='relu')(x) #8
x = Dropout(DROPOUT_PROB)(x)
predictions = Dense(NUM_CLASSES, activation='softmax')(x)
model = Model(inputs=[base_model1.input, input_tab, input_text], outputs=predictions) # Inputs go into two different layers
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
print(model.summary())
log_file = open(log_name, 'w')
log_file.write('VGG->Tabular Chain Fusion \n')
summary = model.summary(print_fn=lambda x: log_file.write(x + '\n'))
log_file.close()
print(summary)
callbacks = [
keras.callbacks.EarlyStopping(
# Stop training when `val_loss` is no longer improving
monitor='val_loss',
# "no longer improving" being defined as "no better than 1e-2 less"
min_delta=1e-2,
# "no longer improving" being further defined as "for at least 2 epochs"
patience=2,
verbose=1)
]
history = model.fit_generator(
generator=training_generator,
steps_per_epoch=ceil(0.75 * (df.size / BATCH_SIZE)),
validation_data=test_generator,
validation_steps=ceil(0.25 * (df.size / BATCH_SIZE)),
epochs=10,
callbacks=callbacks,
verbose=1
)
hist_df = | pd.DataFrame(history.history) | pandas.DataFrame |
"""
Read splice junction output files from STAR aligner (SJ.out.tab)
"""
import os
import joblib
import numpy as np
import pandas as pd
from ..common import JUNCTION_ID, JUNCTION_START, JUNCTION_STOP, READS, \
JUNCTION_MOTIF, EXON_START, EXON_STOP, CHROM, STRAND, ANNOTATED, \
SAMPLE_ID, UNIQUE_READS, MULTIMAP_READS, MAX_OVERHANG
from .core import add_exons_and_junction_ids
COLUMN_NAMES = (CHROM, JUNCTION_START, JUNCTION_STOP, STRAND,
JUNCTION_MOTIF, ANNOTATED, UNIQUE_READS, MULTIMAP_READS,
MAX_OVERHANG)
def int_to_junction_motif(n):
if n == 0:
return 'non-canonical'
if n == 1:
return 'GT/AG'
if n == 2:
# Negative strand: CT/AC
return 'GT/AG'
if n == 3:
return 'GC/AG'
if n == 4:
# Negative strand: CT/GC
return 'GC/AG'
if n == 5:
return 'AT/AC'
if n == 6:
# Negative strand: GT/AT
return 'AT/AC'
def read_sj_out_tab(filename):
"""Read an SJ.out.tab file as produced by the RNA-STAR aligner into a
pandas Dataframe
Parameters
----------
filename : str of filename or file handle
Filename of the SJ.out.tab file you want to read in
Returns
-------
sj : pandas.DataFrame
Dataframe of splice junctions with the columns,
('chrom', 'junction_start', 'junction_stop', 'strand',
'junction_motif', 'exon_start', 'exon_stop', 'annotated',
'unique_junction_reads', 'multimap_junction_reads', 'max_overhang')
"""
sj = pd.read_table(filename, header=None, names=COLUMN_NAMES, sep='\s+')
sj[JUNCTION_MOTIF] = sj[JUNCTION_MOTIF].map(int_to_junction_motif)
# Convert unknown strands to explicitly say "undefined"
sj[STRAND] = sj[STRAND].replace(0, 'undefined')
# Convert integer strand to symbol
# Use index-based replacement because it's 100x faster than map
rows = sj.strand == 1
sj.loc[rows, STRAND] = '+'
rows = sj.strand == 2
sj.loc[rows, STRAND] = '-'
# Translate negative strand intron motifs
# rows = sj.strand == '-'
# sj.loc[rows, 'intron_motif'] = sj.intron_motif[rows].map(
# lambda x: NEG_STRAND_INTRON_MOTIF[x])
sj.annotated = sj.annotated.astype(bool)
sj = add_exons_and_junction_ids(sj)
return sj
def _read_single_filename(filename, sample_id_func, ignore_multimapping=False):
splice_junction = read_sj_out_tab(filename)
sample_id = sample_id_func(filename)
sample_id = sample_id.split('SJ.out.tab')[0].rstrip('.')
splice_junction[SAMPLE_ID] = sample_id
if not ignore_multimapping:
splice_junction[READS] = splice_junction[UNIQUE_READS] \
+ splice_junction[MULTIMAP_READS]
else:
splice_junction[READS] = splice_junction[UNIQUE_READS]
return splice_junction
def read_multiple_sj_out_tab(filenames, ignore_multimapping=False,
sample_id_func=os.path.basename, n_jobs=-1):
"""Read the splice junction files and return a tall, tidy dataframe
Adds a column called "sample_id" based on the basename of the file, minus
"SJ.out.tab"
Parameters
----------
filenames : iterator
A list or other iterator of filenames to read
multimapping : bool
If True, include the multimapped reads in total read count
sample_id_func : function
A function to extract the sample id from the filenames
Returns
-------
metadata : pandas.DataFrame
A tidy dataframe, where each row has the observed reads for a sample
"""
dfs = joblib.Parallel(n_jobs=n_jobs)(
joblib.delayed(_read_single_filename)(
filename, sample_id_func, ignore_multimapping)
for filename in filenames)
splice_junctions = | pd.concat(dfs, ignore_index=True) | pandas.concat |
## GitHub: dark-teal-coder
import pandas as pd
import numpy as np
import requests
from bs4 import BeautifulSoup
from fpdf import FPDF
import datetime
import string
import os
## Get datetime information
current_datetime = datetime.datetime.now()
current_year = current_datetime.year
## Get the running script path
script_path = os.path.dirname(os.path.abspath(__file__))
## Get the current working directiory
cwd = os.path.abspath(os.getcwd())
# print(script_path, cwd)
def read_noc(noc_filepath):
"""This function reads a data file containing a table of National Occupational Classification (NOC) codes related to computer
science and information technology jobs and returns the data in DataFrame format."""
try:
## Use Pandas to read in csv file
## Python parsing engine for RegEx delimiters
df_noc = pd.read_csv(noc_filepath, sep=', ', header=0, engine='python')
except FileNotFoundError:
print(f"The following file cannot be found:", noc_filepath, sep='\n')
except:
print("An unknown error occurs while reading in the following file causing the program to exit prematurely:", noc_filepath,
sep='\n')
else:
## Unify the headers
df_noc.columns = df_noc.columns.str.lower()
## Trim leading and ending spaces in the headers
## Ref.: https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.rename.html
## (inplace=True) means not to return a new DataFrame
df_noc.rename(columns=lambda x: x.strip(), inplace=True)
# print(df_noc)
return df_noc
def get_page(url_code, c):
"""This function scrapes wage data of 10 tech occupations classified by NOC from Job Bank and returns the data list."""
url_base = "https://www.jobbank.gc.ca/wagereport/occupation/"
## Add URL code to the end of the base URL to go to different wage report pages on Job Bank
url = url_base + str(url_code[c])
html_response = requests.get(url)
## The .content attribute holds raw bytes, which can be decoded better than the .text attribute.
html_doc = BeautifulSoup(html_response.content, 'html.parser')
# print(html_doc)
data_list = []
# wage_table = html_doc.find(id="wage-occ-report")
# print(wage_table)
nation_wages = html_doc.find("tr", class_="areaGroup national")
data_list.append(nation_wages.text.strip().split())
province_wages = html_doc.find_all("tr", class_="areaGroup province prov")
for prov_wage in province_wages:
data_list.append(prov_wage.text.strip().rsplit(maxsplit=3))
# print([row for row in data_list])
return data_list
def write_excel(filepath_in, df_noc, url_code):
writer = pd.ExcelWriter(filepath_in, engine='xlsxwriter')
headers_nation = ['NOC', 'Occupation', 'URL Code', 'Low', 'Mid', 'High']
headers_province = ['Province', 'Low', 'Mid', 'High']
## Each iteration will scrape a webpage and change the data for 1 NOC into a DataFrame
df_tech_wages_ca = pd.DataFrame()
df_tech_wages_prov = | pd.DataFrame() | pandas.DataFrame |
import copy
import json
import traceback
from pathlib import Path
from logging import getLogger
from multiprocessing import Pool
import numpy as np
import pandas as pd
from tqdm import tqdm
from submit_main import feature_extraction
from log import init_logger
def preprocess(path):
logger = getLogger('root')
try:
with open(path, 'r') as f:
steps = json.load(f)['steps']
rewards = np.array([step[0]['reward'] for step in steps])
rewards = np.diff(np.append(0, rewards))
history = [[] for _ in range(100)]
list_history_df = []
for r in range(0, 2000):
if r >= 1:
history[steps[r][0]['action']].append(rewards[r])
history[steps[r][1]['action']].append(-1)
new_df = pd.DataFrame()
new_df['history'] = copy.deepcopy(history)
new_df['round'] = r
new_df['game_id'] = path.stem
new_df['threshold'] = steps[r][0]['observation']['thresholds']
last_opponent_chosen = np.zeros(100)
if r >= 1:
last_opponent_chosen[steps[r][1]['action']] = 1
new_df['last_opponent_chosen'] = last_opponent_chosen
second_last_opponent_chosen = np.zeros(100)
if r >= 2:
second_last_opponent_chosen[steps[r-1][1]['action']] = 1
new_df['second_last_opponent_chosen'] = second_last_opponent_chosen
third_last_opponent_chosen = np.zeros(100)
if r >= 3:
third_last_opponent_chosen[steps[r-2][1]['action']] = 1
new_df['third_last_opponent_chosen'] = third_last_opponent_chosen
new_df['opponent_repeat_twice'] = last_opponent_chosen * second_last_opponent_chosen
new_df['opponent_repeat_three_times'] = new_df['opponent_repeat_twice'] * third_last_opponent_chosen
list_history_df.append(new_df)
history_df = | pd.concat(list_history_df, axis=0) | pandas.concat |
# MIT License
#
# Copyright (c) 2021, <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from math import exp, sqrt
import numpy as np
import pandas as pd
from scipy import optimize
import findiff
from qa_tools.utils import *
def _calc_distance(r1, r2):
"""Calculates the Euclidean distance between two points.
Parameters
----------
r1 : :obj:`numpy.ndarray`
Cartesian coordinates of a point with shape ``(3,)``.
r2 : :obj:`numpy.ndarray`
Cartesian coordinates of a point with shape ``(3,)``.
"""
return np.linalg.norm(r1 - r2)
def _json_parse_qc(system_label, json_calc, only_converged=False):
"""Converts a system's PySCF calculation into quantum chemistry dataframe
rows for all lambda values.
Parameters
----------
system_label : :obj:`str`
The string that defines the atoms and or molecules.
json_calc : :obj:`dict`
All data from a single calculation involving a specific system, charge,
and multiplicity.
only_converged : :obj:`bool`, optional
Only include data where calculations converged. Defaults to ``False``.
Returns
-------
:obj:`list`
A list of dictionaries for new pandas dataframe rows.
"""
df_rows = []
# If possible, check all calculations have converged
if only_converged and 'cc_converged' in json_calc.keys():
if not np.all(np.array(json_calc['cc_converged'])): return []
# Getting row-dependent data.
qa_lambdas = json_calc['qa_lambdas']
electronic_energies = json_calc['electronic_energies']
scf_converged = json_calc['scf_converged']
# Keys that are not present in every calculation.
if 'cc_converged' in json_calc.keys():
cc_converged = json_calc['cc_converged']
else:
cc_converged = [None for _ in scf_converged]
if 'hf_energies' in json_calc.keys():
hf_energies = json_calc['hf_energies']
else:
hf_energies = [None for _ in electronic_energies]
if 'triples_corrections' in json_calc.keys():
triples_corrections = json_calc['triples_corrections']
else:
triples_corrections = [None for _ in electronic_energies]
if 'scf_spin_squared' in json_calc.keys():
scf_spin_squared = json_calc['scf_spin_squared']
else:
scf_spin_squared = [None for _ in electronic_energies]
if 'cc_spin_squared' in json_calc.keys():
cc_spin_squared = json_calc['cc_spin_squared']
else:
cc_spin_squared = [None for _ in electronic_energies]
if 'broken_symmetry' in json_calc.keys():
broken_sym = json_calc['broken_symmetry']
else:
broken_sym = None
# Adds df row for every lambda.
for i in range(len(qa_lambdas)):
df_dict = {'system': system_label}
# Checks convergence stuff.
if scf_converged[i] and (cc_converged[i] is None or cc_converged[i]):
converged = True
else:
converged = False
if only_converged and not converged:
continue
# Adds common information for df rows
df_dict['atomic_numbers'] = np.array(json_calc['atomic_numbers'])
df_dict['charge'] = json_calc['molecular_charge']
df_dict['multiplicity'] = json_calc['molecular_multiplicity']
df_dict['n_electrons'] = json_calc['n_electrons']
df_dict['qc_method'] = json_calc['model']['method']
df_dict['basis_set'] = json_calc['model']['basis']
df_dict['converged'] = converged
# Handles energy components for post-HF and DFT methods.
if hf_energies[i] is not None:
df_dict['hf_energy'] = hf_energies[i]
try:
df_dict['correlation_energy'] = electronic_energies[i] - hf_energies[i]
except TypeError:
df_dict['correlation_energy'] = np.nan
else:
df_dict['hf_energy'] = electronic_energies[i]
df_dict['correlation_energy'] = None
df_dict['cc_spin_squared'] = cc_spin_squared[i]
df_dict['scf_spin_squared'] = scf_spin_squared[i]
df_dict['triples_correction'] = triples_corrections[i]
df_dict['broken_sym'] = broken_sym
# Important ones go in front and back.
df_dict['lambda_value'] = float(qa_lambdas[i])
df_dict['electronic_energy'] = electronic_energies[i]
if len(df_dict['atomic_numbers']) == 2:
geo = np.array(json_calc['molecule']['geometry'])
df_dict['bond_length'] = _calc_distance(geo[0], geo[1])
df_rows.append(df_dict)
return df_rows
def _json_parse_qats(system_label, json_calc):
"""Converts a system's PySCF calculation into quantum chemistry with
Taylor series dataframe row.
Parameters
----------
system_label : :obj:`str`
The string that defines the atoms and or molecules.
json_calc : :obj:`dict`
All data from a single calculation in a single dictionary.
Returns
-------
:obj:`list`
A list of dictionaries for new pandas dataframe rows.
"""
df_dict = {'system': system_label}
df_dict['atomic_numbers'] = np.array(json_calc['atomic_numbers'])
df_dict['charge'] = json_calc['molecular_charge']
df_dict['multiplicity'] = json_calc['molecular_multiplicity']
df_dict['n_electrons'] = json_calc['n_electrons']
df_dict['qc_method'] = json_calc['model']['method']
df_dict['basis_set'] = json_calc['model']['basis']
qa_lambdas = json_calc['qa_lambdas']
df_dict['lambda_range'] = (
int(min(qa_lambdas)), int(max(qa_lambdas))
)
df_dict['finite_diff_delta'] = json_calc['finite_diff_delta']
df_dict['finite_diff_acc'] = json_calc['finite_diff_acc']
df_dict['poly_coeffs'] = np.array(json_calc['qats_poly_coeffs'])
if len(df_dict['atomic_numbers']) == 2:
geo = np.array(json_calc['molecule']['geometry'])
df_dict['bond_length'] = _calc_distance(geo[0], geo[1])
return [df_dict]
def qc_dframe(json_dict, only_converged=False):
"""Prepares a Pandas dataframe of quantum chemistry data from a JSON file.
Parameters
----------
json_dict : :obj:`dict`
A loaded JSON file containing data organized by system label
(e.g., `'h'`, `'mg'`, etc.). Under each system label is the individual
JSON dictionary of that state's calculation with the standard format of
`atoms.chrg.mult-pyscf-qcmethod.basis`; for example,
`'h.chrg-1.mult1-pyscf-ccsd.augccpvqz'`.
only_converged : :obj:`bool`, optional
Only include data where calculations converged. Defaults to ``True``.
Returns
-------
:obj:`pandas.core.frame.DataFrame`
A data frame with the following columns: system, atomic_numbers, charge,
multiplicity, n_electrons, qc_method, basis_set, lambda,
electronic_energy, hf_energy, and correlation_energy.
"""
prelim_df = []
for system_label in json_dict.keys():
for state_label in json_dict[system_label].keys():
for calc_label in json_dict[system_label][state_label].keys():
if 'electronic_energies' in json_dict[system_label][state_label][calc_label].keys():
calc_data = json_dict[system_label][state_label][calc_label]
prelim_df.extend(
_json_parse_qc(
system_label, calc_data, only_converged=only_converged
)
)
else:
for calc_label2 in json_dict[system_label][state_label][calc_label].keys():
calc_data = json_dict[system_label][state_label][calc_label][calc_label2]
prelim_df.extend(
_json_parse_qc(
system_label, calc_data, only_converged=only_converged
)
)
return pd.DataFrame(prelim_df)
def hf_error(A, hf_energies, cardinals, alpha):
"""Evaluates the error of the HF extrapolation.
0 = [A exp(-alpha * sqrt(Y)) - E_scf^(Y)] - [A exp(-alpha * sqrt(X)) - E_scf^(X)]
Parameters
----------
A : :obj:`float`
A system dependent parameter (that will be fit).
hf_energies : :obj:`tuple` (:obj:`float`)
The small (X) to large (Y) basis set hf energies, respectively.
alpha : :obj:`float`
The basis-set dependent constant alpha.
cardinals : :obj:`tuple`
The X and Y cardinal numbers of the basis sets, respectively.
Returns
-------
:obj:`float`
Error in the hf extrapolation procedure in the `A` parameter.
"""
hf_x, hf_y = hf_energies
cardinal_x, cardinal_y = cardinals
error_x = ((A * exp(-alpha * sqrt(cardinal_x))) - hf_x)
error_y = ((A * exp(-alpha * sqrt(cardinal_y))) - hf_y)
error = error_y - error_x
return error
def extrapolate_hf(hf_energies, cardinals, alpha):
"""Extrapolates the HF energy from a post-HF energy.
Based on E_scf^(X) = E_scf^(infinity) + A exp(-alpha * sqrt(X))
where E_scf^(X) is the SCF energy of cardinal number X, E_scf^(infinity) is
the CBS extrapolated energy, A and alpha are constants. Typically, alpha is
a basis-set dependent constant and A needs to be fitted.
Since we have two energies, we can minimize E_scf^(Y) - E_scf^(X) =
A exp(-alpha * sqrt(Y)) - A exp(-alpha * sqrt(X)).
Parameters
----------
hf_energies : :obj:`tuple` (:obj:`float`)
The small (X) to large (Y) basis set hf energies, respectively.
cardinals : :obj:`tuple`
The X and Y cardinal numbers of the basis sets, respectively.
alpha : :obj:`float`
The basis-set dependent constant alpha.
Returns
-------
:obj:`float`
Extrapolated hf energy.
"""
roots = optimize.fsolve(
hf_error, 1, args=(hf_energies, cardinals, alpha)
)
A = roots[0]
cbs_hf = hf_energies[0] - A*exp(-alpha * sqrt(cardinals[0]))
return cbs_hf
def extrapolate_correlation(correlation_energies, cardinals, beta):
"""Extrapolates the correlation energy.
For more information, see Equation 2 in DOI: 10.1021/ct100396y.
Parameters
----------
correlation_energies : :obj:`tuple` (:obj:`float`)
The small (X) to large (Y) basis set correlation energies, respectively.
cardinals : :obj:`tuple`
The X and Y cardinal numbers of the basis sets, respectively.
beta : :obj:`float`
The basis-set dependent constant beta.
Returns
-------
:obj:`float`
Extrapolated correlation energy.
"""
correlation_x, correlation_y = correlation_energies
cardinal_x, cardinal_y = cardinals
numerator = (cardinal_x**beta * correlation_x) - (cardinal_y**beta * correlation_y)
denominator = cardinal_x**beta - cardinal_y**beta
cbs_correlation = numerator / denominator
return cbs_correlation
def add_cbs_extrap_qc_df(
df_qc, cbs_basis_key='aug', basis_set_lower='aug-cc-pVTZ',
basis_set_higher='aug-cc-pVQZ'):
"""Extrapolates post-HF energies and adds CBS rows.
Parameters
----------
df_qc : :obj:`pandas.DataFrame`
A dataframe with quantum chemistry calculation data.
cbs_basis_key : :obj:`str`, optional
Which basis set family to extrapolate. Must be keys of the extrapolation
dictionaries.
basis_set_lower : :obj:`str`, optional
The smaller basis set (with a lower cardinal number).
basis_set_higher : :obj:`str`, optional
The larger basis set (with a higher cardinal number).
Returns
-------
:obj:`pandas.DataFrame`
QC dataframe with CBS extrapolated data added.
"""
lower_cardinal = basis_cardinals[basis_set_lower]
upper_cardinal = basis_cardinals[basis_set_higher]
cbs_cardinal_key = f'{lower_cardinal}/{upper_cardinal}'
# Only extrapolate post-HF methods.
lower_df = df_qc[
(df_qc.basis_set == basis_set_lower)
& (df_qc.qc_method.transform(lambda x: x.lower()).isin(post_hf_methods))
]
df_cbs_prelim = []
for row_info in zip(
lower_df['system'], lower_df['charge'], lower_df['multiplicity'],
lower_df['qc_method'], lower_df['lambda_value'],
lower_df['hf_energy'], lower_df['correlation_energy'],
lower_df['triples_correction']
):
# Calculation with lower basis set.
system, charge, multiplicity, qc_method, lambda_value, \
hf_lower, correlation_lower, triples_lower = row_info
# Calculation with higher basis set.
calc_upper = df_qc.query(
'system == @system' \
'& charge == @charge' \
'& multiplicity == @multiplicity' \
'& qc_method == @qc_method' \
'& basis_set == @basis_set_higher' \
'& lambda_value == @lambda_value' \
)
if len(calc_upper) == 0:
# Assumes that there is a missing, possibly unconverged, calculation.
continue
else:
assert len(calc_upper) == 1
hf_upper = calc_upper.iloc[0]['hf_energy']
correlation_upper = calc_upper.iloc[0]['correlation_energy']
triples_upper = calc_upper.iloc[0]['triples_correction']
# CBS extrapolation
cbs_hf = extrapolate_hf(
(hf_lower, hf_upper),
(lower_cardinal, upper_cardinal),
cbs_extrap_alphas[cbs_basis_key][cbs_cardinal_key]
)
if np.isnan(correlation_lower) or np.isnan(correlation_upper):
cbs_correlation = np.nan
cbs_total = cbs_hf
else:
cbs_correlation = extrapolate_correlation(
(correlation_lower, correlation_upper),
(lower_cardinal, upper_cardinal),
cbs_extrap_betas[cbs_basis_key][cbs_cardinal_key]
)
cbs_total = cbs_hf + cbs_correlation
# Building CBS row.
cbs_calc = calc_upper.iloc[0].to_dict()
cbs_calc['basis_set'] = f'CBS-{cbs_basis_key}'
cbs_calc['electronic_energy'] = cbs_total
cbs_calc['hf_energy'] = cbs_hf
cbs_calc['correlation_energy'] = cbs_correlation
# Triples CBS extrapolation.
if not np.isnan(triples_lower) or not np.isnan(triples_upper):
cbs_triples = extrapolate_correlation(
(triples_lower, triples_upper),
(lower_cardinal, upper_cardinal),
cbs_extrap_betas[cbs_basis_key][cbs_cardinal_key]
)
cbs_calc['triples_correction'] = cbs_triples
df_cbs_prelim.append(cbs_calc)
df_cbs_prelim = pd.DataFrame(df_cbs_prelim)
df_cbs = df_qc.append(df_cbs_prelim)
return df_cbs
def qats_dframe(json_dict):
"""Prepares a Pandas dataframe of QATS-relevant data.
Parameters
----------
json_dict : :obj:`dict`
A loaded JSON file containing data organized by system label
(e.g., `'h'`, `'mg'`, etc.). Under each system label is the state_label
that specifies charge and multiplicity (e.g., `'c.charg0.mult3'`).
Nested under that are individual JSON dictionaries of that state's
calculation with the standard format of
`atoms.chrg.mult-pyscf-qcmethod.basis`; for example,
`'h.chrg-1.mult1-pyscf-ccsd.augccpvqz'`.
Returns
-------
:obj:`pandas.DataFrame`
A dataframe with the following columns: system, atomic_numbers, charge,
multiplicity, n_electrons, qc_method, basis_set, lambda_range,
finite_diff_delta, finite_diff_acc, poly_coeff.
"""
prelim_df = []
# Loops through every system.
for system_label in json_dict.keys():
# Loops through every state.
for state_label in json_dict[system_label].keys():
# Loops through every calculation.
for calc_label in json_dict[system_label][state_label].keys():
if 'qats_poly_coeffs' in json_dict[system_label][state_label][calc_label].keys():
calc_data = json_dict[system_label][state_label][calc_label]
prelim_df.extend(_json_parse_qats(system_label, calc_data))
else:
for calc_label2 in json_dict[system_label][state_label][calc_label].keys():
calc_data = json_dict[system_label][state_label][calc_label][calc_label2]
prelim_df.extend(_json_parse_qats(system_label, calc_data))
return | pd.DataFrame(prelim_df) | pandas.DataFrame |
import argparse
import logging
import time
import pandas as pd
import jinja2
import time
import matplotlib.pyplot as plt
import numpy as np
plt.rcParams['font.sans-serif'] = [u'Songti SC']
plt.rcParams['axes.unicode_minus'] = False
parser = argparse.ArgumentParser()
parser.add_argument('--drop', nargs='?', const=1, type=bool, default=False)
args = parser.parse_args()
def logger():
LOG_FORMAT = "%(asctime)s - %(levelname)s - %(message)s"
DATE_FORMAT = "%m/%d/%Y %H:%M:%S %p"
logging.basicConfig(level=logging.INFO, format=LOG_FORMAT, datefmt=DATE_FORMAT)
def to_report(report):
# 售前支持 - 技术方案
# 售前支持 - 技术交流
# 售前支持 - PoC
# 售前支持 - 投标工作
# 售前支持 - 市场推广
# 项目交付 - 项目管理
# 内部工作 - 提供培训
# 内部工作 - 参与培训
# 内部工作 - 技术突破
# 内部工作 - 证书过审
# 内部工作 - 控标点过审
# 内部工作 - 合同过审
# 内部工作 - 团队管理
# 内部工作 - 离职交接
# 行政事务
# 休假
all_presales = report.groupby([u'发起人'])[u'耗时'].sum().sort_values(ascending=True)
all_presales.index.name = None
all_presales.plot(kind='barh',figsize=(10,10),fontsize='15')
plt.savefig('all_presales.svg')
all_tasks = report.groupby(u'中类')[u'耗时'].sum().sort_values(ascending=True)
all_tasks.index.name = None
all_tasks.plot(kind='barh',figsize=(10,10),fontsize='15')
plt.savefig('all_tasks.svg')
all_projects = report.groupby(u'项目名称')[u'耗时'].sum().sort_values(ascending=True)
all_projects.index.name=None
all_projects.plot(kind='barh',figsize=(15,15),fontsize='15')
plt.savefig('all_projects.svg')
# Presales By Task Report
presale_by_task_report = pd.DataFrame(index=all_presales.index, columns=all_tasks.index)
presale_by_task_report=presale_by_task_report.fillna(0)
presale_by_task_report.index.name=None
for i in all_presales.index:
presale_tasks = report[report.发起人 == i].groupby([u'中类'])[u'耗时'].sum()
for j in all_tasks.index:
if j in presale_tasks.index:
presale_by_task_report[j][i]=presale_tasks[j]
ax=presale_by_task_report.plot.barh(stacked=True,figsize=(20,20),fontsize='15');
for i, v in enumerate(all_presales):
ax.text(v+1, i, str(v), color='black', fontweight='bold', fontsize=13)
plt.savefig('presale_by_task.svg')
# Project By Task Report
project_by_task_report = pd.DataFrame(index=all_projects.index, columns=all_tasks.index)
project_by_task_report=project_by_task_report.fillna(0)
project_by_task_report.index.name=None
for i in all_projects.index:
project_tasks = report[report.项目名称 == i].groupby([u'中类'])[u'耗时'].sum()
for j in all_tasks.index:
if j in project_tasks.index:
project_by_task_report[j][i]=project_tasks[j]
ax=project_by_task_report.plot.barh(stacked=True,figsize=(80,80),fontsize='50');
ax.legend(fontsize=35)
for i, v in enumerate(all_projects):
ax.text(v, i, str(v), color='black', fontweight='bold', fontsize=45)
plt.savefig('project_by_task.svg')
# Project By Presale Report
project_by_presale_report = pd.DataFrame(index=all_projects.index, columns=all_presales.index)
project_by_presale_report = project_by_presale_report.fillna(0)
project_by_presale_report.index.name = None
for i in all_projects.index:
project_tasks = report[report.项目名称 == i].groupby([u'发起人'])[u'耗时'].sum()
for j in all_presales.index:
if j in project_tasks.index:
project_by_presale_report[j][i]=project_tasks[j]
ax=project_by_presale_report.plot.barh(stacked=True,figsize=(80,80),fontsize='50');
ax.legend(fontsize=35)
for i, v in enumerate(all_projects):
ax.text(v, i, str(v), color='black', fontweight='bold', fontsize=45)
plt.savefig('project_by_presale.svg')
# Project vs. Presales by Week Report
presales_count=report.groupby([u'周'])[u'发起人'].apply(lambda x:len(set(x)))
projects_count=report.groupby([u'周'])[u'项目名称'].apply(lambda x:len(set(x)))
presale_vs_project_by_week_report = pd.DataFrame(index=presales_count.index, columns=['售前人数', '项目个数', '人均支持项目数'])
presale_vs_project_by_week_report=presale_vs_project_by_week_report.fillna(0)
presale_vs_project_by_week_report.index.name=None
presale_vs_project_by_week_report[u'售前人数']=presales_count
presale_vs_project_by_week_report[u'项目个数']=projects_count
presale_vs_project_by_week_report[u'人均支持项目数']=np.around(presale_vs_project_by_week_report[u'项目个数']/presale_vs_project_by_week_report[u'售前人数'], decimals=1)
# Echarts Template handling
env = jinja2.Environment(loader=jinja2.FileSystemLoader(searchpath=''))
template_echarts = env.get_template('template_echarts.html')
presale_by_task_chart_data = {
'legend': list(presale_by_task_report.columns.values),
'y_data': list(presale_by_task_report.index.values),
'series': []
}
for (column_name, column_data) in presale_by_task_report.iteritems():
presale_by_task_chart_data['series'].append({
'name':column_name,
'type': 'bar',
'stack': 'total',
'label': {
'show': 'true'
},
'emphasis': {
'focus': 'series'
},
'data':list(column_data)
})
project_by_task_chart_data = {
'legend': list(project_by_task_report.columns.values),
'y_data': list(project_by_task_report.index.values),
'series': []
}
for (column_name, column_data) in project_by_task_report.iteritems():
project_by_task_chart_data['series'].append({
'name':column_name,
'type': 'bar',
'stack': 'total',
'label': {
'show': 'true'
},
'emphasis': {
'focus': 'series'
},
'data':list(column_data)
})
project_by_presale_chart_data = {
'legend': list(project_by_presale_report.columns.values),
'y_data': list(project_by_presale_report.index.values),
'series': []
}
for (column_name, column_data) in project_by_presale_report.iteritems():
project_by_presale_chart_data['series'].append({
'name':column_name,
'type': 'bar',
'stack': 'total',
'label': {
'show': 'true'
},
'emphasis': {
'focus': 'series'
},
'data':list(column_data)
})
presale_vs_project_by_week_chart_data = {
'legend': list(presale_vs_project_by_week_report.columns.values),
'x_data': list(presale_vs_project_by_week_report.index.values),
'series': []
}
for (column_name, column_data) in presale_vs_project_by_week_report.iteritems():
if column_name == '人均支持项目数':
presale_vs_project_by_week_chart_data['series'].append({
'name':column_name,
'type': 'line',
'yAxisIndex': '1',
'label': {
'show': 'true'
},
'data':list(column_data)
})
else:
presale_vs_project_by_week_chart_data['series'].append({
'name':column_name,
'type': 'bar',
'label': {
'show': 'true'
},
'data':list(column_data)
})
html = template_echarts.render(
presale_by_task_chart_data = presale_by_task_chart_data,
project_by_task_chart_data = project_by_task_chart_data,
project_by_presale_chart_data = project_by_presale_chart_data,
presale_vs_project_by_week_chart_data = presale_vs_project_by_week_chart_data
)
# Write the Echarts HTML file
with open('report_echarts.html', 'w') as f:
f.write(html)
def main():
logger()
df = pd.DataFrame()
total_weekly_reports = | pd.read_excel("data/周报.xlsx", sheet_name="周报") | pandas.read_excel |
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
sns.set_theme(style="darkgrid")
def visualize_result(fractions, xlabel='year',ylabel='fraction [%]', title='\'distans\' used in ads Aug 15 - Sept 14'):
sns.set_theme(style="darkgrid")
df = pd.DataFrame(fractions.items(), columns=[xlabel,ylabel])
g = sns.relplot(x=xlabel, y=ylabel, kind="line", marker='o', data=df)
g.fig.autofmt_xdate()
plt.title(title)
plt.xticks(list(fractions.keys()))
plt.show()
def visualize_histogram(fractions, xlabel='kompetens', ylabel ='Del', title = 'Kompetenser'):
sns.set_theme(style="whitegrid")
df = pd.DataFrame(fractions, columns = [xlabel,ylabel])
sns.set_color_codes("pastel")
sns.barplot(x=ylabel, y=xlabel, data=df,
label=ylabel, color="b", orient="h")
plt.title(title)
sns.despine(left=True, bottom=True)
plt.show()
def visualize_correlation_matrix(data, xlabel = 'ssyk1', ylabel = 'ssyk2'):
df3 = | pd.DataFrame(data=data) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""PyGraal_Livrable_2_ITERATION_1.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1doBNy8ywSlzrGvYFNBsLZGvAUpFTYhcR
Pour réaliser cette première itération, nous repartons du dataset auquel ont été apporté les modifications suivantes:
- Suppression des NaNs sur la variable cible ('Q5')
- Remplacement des NaNs par le mode pour chaque variable correspondant à des questions à choix unique
- Encodage des colonnes des questions à choix multiples par 0/1 selon NaN ou valeur
- Réduction du Dataset aux entrées des participants professionnels (ayant une profession précise, hors 'étudiant', 'sans emploi' et 'autre')
"""
from google.colab import drive
drive.mount('/content/drive')
# Commented out IPython magic to ensure Python compatibility.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
import seaborn as sns
sns.set_theme()
df = pd.read_csv('/content/drive/MyDrive/PyGraal/df_pro.csv', index_col=0)
"""# Analyse de la variable cible
Comme vu précédemment, la valeur cible est 'Q5' (poste actuellement occupé) et après un premier traitement, elle contient 10 valeurs uniques.
"""
plt.figure (figsize=(10,10))
plt.pie(df['Q5'].value_counts(),
autopct = lambda x: str(round(x, 2)) + '%',
labels=df['Q5'].value_counts().index,
pctdistance=0.7,
shadow =True,
colors = ['#dddddd', '#81d8d0','#ffff66', '#ff7f50',
'#a0db8e', '#c0d6e4', '#ffc0cb', '#ffa500', '#808080' , '#6897bb'])
plt.title("Distribution du panel de répondants par poste hors étudiant/autres/sans emploi",fontsize=15, fontweight='bold');
"""Le domaine de la Data est en constante évolution, ses métiers également. De ce fait, nous allons restreindre notre analyse au 5 principaux métiers, qui représentent à eux seuls près de 80% du panel professionnel interrogé.
Pour appuyer cette réflexion, nous nous sommes inspirés de cet article précisant qu'au sein même du métier de Data Scientist il y avait des différenciations:
https://www.journaldunet.com/solutions/dsi/1506951-de-l-avenir-du-metier-de-data-scientist-e/
## Filtre du data set sur les 5 principaux métiers
"""
#Liste Top 5 des professions du panel de répondant
top_5 =df['Q5'].value_counts().head().index.tolist()
top_5
#Création du df_top5
df_top5 = df[df['Q5'].isin (top_5)]
print('Notre dataset contient à présent', df_top5.shape[0], 'entrées et', df_top5.shape[1],'colonnes.')
"""Pour rappel, notre objectif est de créer un modèle capable de proposer un poste en fonction de compétences et d'outils associés.
Par conséquent, en analysant les questions posées, nous pouvons supprimer une partie des colonnes.
"""
#Aperçu du data set
df.sample(2)
"""## Voici la liste des colonnes concernées et notre raisonnement:
## Colonnes à supprimer
Q1 -> Age -> Dans un souci d'éthique, cette variable ne peut-être un élément différenciant pour une suggestion de poste
Q2 -> Genre-> Dans un souci d'éthique, cette variable ne peut-être un élément différenciant pour une suggestion de poste
Q3 -> Pays -> Dans un souci d'éthique, cette variable ne peut-être un élément différenciant pour une suggestion de poste
Q4 -> Niveau d'études-> Dans un souci d'éthique, cette variable ne peut-être un élément différenciant pour une suggestion de poste
Q8 -> Langage de programmation que la personne recommanderait -> Il s'agit d'une recommandation donc point de vue subjectif et non d'une compétence liée à un poste précis
Q11 -> Computing platform -> Il s'agit d'une habitude donc point de vue subjectif et non d'une compétence liée à un poste précis
Q12 -> Hardware / GPU ? TPU -> Il s'agit d'une habitude donc point de vue subjectif et non d'une compétence liée à un poste précis
Q13 -> Nb fois TPU used -> Il s'agit d'une habitude donc point de vue subjectif et non d'une compétence liée à un poste précis
Q18 -> Computer vision methods -> Question dépendant d'une réponse précédente et n'ayant pas été posée à tout le panel
Q19 -> NLP Methods -> Question dépendant d'une réponse précédente et n'ayant pas été posée à tout le panel
Q20 taille entreprise-> Question liée à l'entreprise et non au poste du répondant
Q21 combien de personnes en data
Q22 ML implémenté
Q24 salaire
Q25 combien d’argent dépensé
27A -> cloud computing products -> Question dépendant d'une réponse précédente et n'ayant pas été posée à tout le panel
28A -> ML products -> Question dépendant d'une réponse précédente et n'ayant pas été posée à tout le panel
Q30 -> Big Data products -> Question dépendant d'une réponse précédente et n'ayant pas été posée à tout le panel
Q32 -> BI Tools -> Question dépendant d'une réponse précédente et n'ayant pas été posée à tout le panel
Q34-A -> Automated ML Tools -> Question dépendant d'une réponse précédente et n'ayant pas été posée à tout le panel
Q36 -> Plateforme de publication -> Il ne s'agit pas d'une compétence technique ou d'un outil lié au poste
Q37 -> Plateforme de formation -> Il ne s'agit pas d'une compétence technique ou d'un outil lié au poste
Q38 -> Primary Tools for Data Analysis -> Il s'agit d'une réponse par texte libre
Q39 -> media favori -> Il ne s'agit pas d'une compétence technique ou d'un outil lié au poste
Q26_B à Q35_B -> Question dépendant d'une réponse précédente et n'ayant pas été posée à tout le panel
## Suppression des colonnes questions B
"""
#1) Sélection des questions avec 'B:'
quest_B =[]
for i in df_top5.columns.tolist():
if 'B:' in i:
quest_B.append(i)
print('Il y a',len(quest_B),'colonnes de questions partie B.')
quest_B
#2) Suppression des colonnes dans le DataFrame
df_top5 = df_top5.drop(quest_B, axis=1)
print('Notre dataset contient à présent', df_top5.shape[0], 'entrées et',df_top5.shape[1], 'colonnes.')
#Les 91 colonnes ont bien été supprimées.
"""##Suppression des autres colonnes
"""
#Recherche des colonnes avec + de 2 valeurs
#quest_u regroupe les colonnes des questions choix unique
#quest_u regroupe les colonnes des questions choix multiple
quest_u =[]
quest_m =[]
for i in df:
if len(df[i].unique())>2:
quest_u.append(i)
else:
quest_m.append(i)
#Création de la liste des colonnes à supprimer
col_to_drop =[]
for i in ['Q1', 'Q2', 'Q3', 'Q4', 'Q8',
'Q11', 'Q12', 'Q13', 'Q18', 'Q19',
'Q20', 'Q21', 'Q22', 'Q24', 'Q25', 'Q27A', 'Q28A',
'Q34A', 'Q36', 'Q37', 'Q38', 'Q39']:
if i not in col_to_drop:
if i in quest_u:
col_to_drop.append(i)
else:
for j in quest_m:
if i in j:
col_to_drop.append(j)
print('Nombre de colonnes à supprimer :', len(col_to_drop))
print(col_to_drop)
#Suppression de ces colonnes et création d'un nouveau df
df_clean = df_top5.drop(col_to_drop,axis=1)
print('Notre dataset contient à présent', df_clean.shape[0], 'entrées et',df_clean.shape[1], 'colonnes.')
df_clean.sample(2)
"""## Encodage des colonnes restantes:
L'ensemble des questions à choix multiple a déjà été traité précédemment.
Il nous reste à encoder Q6 et Q15.
"""
#Q6 Années d'expérience en programmation est une variable ordinale => encodage de 0 à 6
print(df_clean['Q6'].unique().tolist())
df_clean['Q6'] = df_clean['Q6'].replace(['I have never written code', '< 1 years', '1-2 years', '3-5 years', '5-10 years', '10-20 years', '20+ years'], [0,1,2,3,4,5,6])
#Vérif Q6
print(df_clean['Q6'].unique().tolist())
#Q15 Années d'expérience en programmation est une variable ordinale => encodage de 0 à 8
print(df_clean['Q15'].unique().tolist())
df_clean['Q15'] = df_clean['Q15'].replace(['I do not use machine learning methods','Under 1 year', '1-2 years','2-3 years','3-4 years','4-5 years','5-10 years','10-20 years','20 or more years'], [0,1,2,3,4,5,6,7,8])
#Vérif Q15
print(df_clean['Q15'].unique().tolist())
df_clean.sample(2)
#Retravail des intitulés de colonnes pour supprimer les espaces et caractères spéciaux
#df_clean = df_clean.rename(columns=lambda x: x.replace(' ', '_'))
#df_clean = df_clean.rename(columns=lambda x: x.replace(':', '_'))
"""# Itération 1: Choix du modèle d'apprentissage"""
#Création du vecteur 'target' contenant la variable cible 'Q5' et d'un Data Frame 'feats' contenant les différentes features.
target = df_clean['Q5']
feats=df_clean.drop('Q5', axis=1)
#Séparation du dataset en train set et test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(feats, target, test_size=0.2, random_state=200)
"""Notre variable cible étant une variable catégorielle composée de classes, nous utiliserons par la suite des modèles de classification. """
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import StandardScaler
sc= StandardScaler()
X_train_scaled = sc.fit_transform(X_train)
X_test_scaled = sc.transform(X_test)
"""### Méthode Arbre de Décision"""
dtc = DecisionTreeClassifier()
dtc.fit(X_train_scaled, y_train)
dtc_y_pred = dtc.predict(X_test_scaled)
print('Score Train set du DecisionTree :',round(dtc.score(X_train_scaled, y_train),3)*100,'%')
print('Score Test set du DecisionTree :', round(dtc.score(X_test_scaled, y_test),5)*100,'%')
from sklearn.metrics import classification_report
print('Rapport de Classification Arbre de Décision')
print(classification_report(y_test, dtc_y_pred))
print("Matrice de confusion de l'Arbre de Décision")
pd.crosstab(y_test, dtc_y_pred, rownames=['Classe réelle'], colnames=['Classe prédite'])
"""### Méthide de Régression Logistique"""
lr = LogisticRegression()
lr.fit(X_train_scaled, y_train)
lr_y_pred = lr.predict(X_test_scaled)
print('Score Train set de la Regression Logistique',round(lr.score(X_train_scaled, y_train),4)*100,'%')
print('Score Test set de la Regression Logistique',round(lr.score(X_test_scaled, y_test),3)*100,'%')
print('Rapport de Classification Regression Logistique')
print(classification_report(y_test, lr_y_pred))
print("Matrice de confusion de la Regression Logistique")
| pd.crosstab(y_test, lr_y_pred, rownames=['Classe réelle'], colnames=['Classe prédite']) | pandas.crosstab |
from datetime import datetime
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
Index,
MultiIndex,
Series,
qcut,
)
import pandas._testing as tm
def cartesian_product_for_groupers(result, args, names, fill_value=np.NaN):
"""Reindex to a cartesian production for the groupers,
preserving the nature (Categorical) of each grouper
"""
def f(a):
if isinstance(a, (CategoricalIndex, Categorical)):
categories = a.categories
a = Categorical.from_codes(
np.arange(len(categories)), categories=categories, ordered=a.ordered
)
return a
index = MultiIndex.from_product(map(f, args), names=names)
return result.reindex(index, fill_value=fill_value).sort_index()
_results_for_groupbys_with_missing_categories = {
# This maps the builtin groupby functions to their expected outputs for
# missing categories when they are called on a categorical grouper with
# observed=False. Some functions are expected to return NaN, some zero.
# These expected values can be used across several tests (i.e. they are
# the same for SeriesGroupBy and DataFrameGroupBy) but they should only be
# hardcoded in one place.
"all": np.NaN,
"any": np.NaN,
"count": 0,
"corrwith": np.NaN,
"first": np.NaN,
"idxmax": np.NaN,
"idxmin": np.NaN,
"last": np.NaN,
"mad": np.NaN,
"max": np.NaN,
"mean": np.NaN,
"median": np.NaN,
"min": np.NaN,
"nth": np.NaN,
"nunique": 0,
"prod": np.NaN,
"quantile": np.NaN,
"sem": np.NaN,
"size": 0,
"skew": np.NaN,
"std": np.NaN,
"sum": 0,
"var": np.NaN,
}
def test_apply_use_categorical_name(df):
cats = qcut(df.C, 4)
def get_stats(group):
return {
"min": group.min(),
"max": group.max(),
"count": group.count(),
"mean": group.mean(),
}
result = df.groupby(cats, observed=False).D.apply(get_stats)
assert result.index.names[0] == "C"
def test_basic():
cats = Categorical(
["a", "a", "a", "b", "b", "b", "c", "c", "c"],
categories=["a", "b", "c", "d"],
ordered=True,
)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
exp_index = CategoricalIndex(list("abcd"), name="b", ordered=True)
expected = DataFrame({"a": [1, 2, 4, np.nan]}, index=exp_index)
result = data.groupby("b", observed=False).mean()
tm.assert_frame_equal(result, expected)
cat1 = Categorical(["a", "a", "b", "b"], categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
# single grouper
gb = df.groupby("A", observed=False)
exp_idx = CategoricalIndex(["a", "b", "z"], name="A", ordered=True)
expected = DataFrame({"values": Series([3, 7, 0], index=exp_idx)})
result = gb.sum()
tm.assert_frame_equal(result, expected)
# GH 8623
x = DataFrame(
[[1, "<NAME>"], [2, "<NAME>"], [1, "<NAME>"]],
columns=["person_id", "person_name"],
)
x["person_name"] = Categorical(x.person_name)
g = x.groupby(["person_id"], observed=False)
result = g.transform(lambda x: x)
tm.assert_frame_equal(result, x[["person_name"]])
result = x.drop_duplicates("person_name")
expected = x.iloc[[0, 1]]
tm.assert_frame_equal(result, expected)
def f(x):
return x.drop_duplicates("person_name").iloc[0]
result = g.apply(f)
expected = x.iloc[[0, 1]].copy()
expected.index = Index([1, 2], name="person_id")
expected["person_name"] = expected["person_name"].astype("object")
tm.assert_frame_equal(result, expected)
# GH 9921
# Monotonic
df = DataFrame({"a": [5, 15, 25]})
c = pd.cut(df.a, bins=[0, 10, 20, 30, 40])
result = df.a.groupby(c, observed=False).transform(sum)
tm.assert_series_equal(result, df["a"])
tm.assert_series_equal(
df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df["a"]
)
tm.assert_frame_equal(df.groupby(c, observed=False).transform(sum), df[["a"]])
tm.assert_frame_equal(
df.groupby(c, observed=False).transform(lambda xs: np.max(xs)), df[["a"]]
)
# Filter
tm.assert_series_equal(df.a.groupby(c, observed=False).filter(np.all), df["a"])
tm.assert_frame_equal(df.groupby(c, observed=False).filter(np.all), df)
# Non-monotonic
df = DataFrame({"a": [5, 15, 25, -5]})
c = pd.cut(df.a, bins=[-10, 0, 10, 20, 30, 40])
result = df.a.groupby(c, observed=False).transform(sum)
tm.assert_series_equal(result, df["a"])
tm.assert_series_equal(
df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df["a"]
)
tm.assert_frame_equal(df.groupby(c, observed=False).transform(sum), df[["a"]])
tm.assert_frame_equal(
df.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df[["a"]]
)
# GH 9603
df = DataFrame({"a": [1, 0, 0, 0]})
c = pd.cut(df.a, [0, 1, 2, 3, 4], labels=Categorical(list("abcd")))
result = df.groupby(c, observed=False).apply(len)
exp_index = CategoricalIndex(c.values.categories, ordered=c.values.ordered)
expected = Series([1, 0, 0, 0], index=exp_index)
expected.index.name = "a"
tm.assert_series_equal(result, expected)
# more basic
levels = ["foo", "bar", "baz", "qux"]
codes = np.random.randint(0, 4, size=100)
cats = Categorical.from_codes(codes, levels, ordered=True)
data = DataFrame(np.random.randn(100, 4))
result = data.groupby(cats, observed=False).mean()
expected = data.groupby(np.asarray(cats), observed=False).mean()
exp_idx = CategoricalIndex(levels, categories=cats.categories, ordered=True)
expected = expected.reindex(exp_idx)
tm.assert_frame_equal(result, expected)
grouped = data.groupby(cats, observed=False)
desc_result = grouped.describe()
idx = cats.codes.argsort()
ord_labels = np.asarray(cats).take(idx)
ord_data = data.take(idx)
exp_cats = Categorical(
ord_labels, ordered=True, categories=["foo", "bar", "baz", "qux"]
)
expected = ord_data.groupby(exp_cats, sort=False, observed=False).describe()
tm.assert_frame_equal(desc_result, expected)
# GH 10460
expc = Categorical.from_codes(np.arange(4).repeat(8), levels, ordered=True)
exp = CategoricalIndex(expc)
tm.assert_index_equal((desc_result.stack().index.get_level_values(0)), exp)
exp = Index(["count", "mean", "std", "min", "25%", "50%", "75%", "max"] * 4)
tm.assert_index_equal((desc_result.stack().index.get_level_values(1)), exp)
def test_level_get_group(observed):
# GH15155
df = DataFrame(
data=np.arange(2, 22, 2),
index=MultiIndex(
levels=[CategoricalIndex(["a", "b"]), range(10)],
codes=[[0] * 5 + [1] * 5, range(10)],
names=["Index1", "Index2"],
),
)
g = df.groupby(level=["Index1"], observed=observed)
# expected should equal test.loc[["a"]]
# GH15166
expected = DataFrame(
data=np.arange(2, 12, 2),
index=MultiIndex(
levels=[CategoricalIndex(["a", "b"]), range(5)],
codes=[[0] * 5, range(5)],
names=["Index1", "Index2"],
),
)
result = g.get_group("a")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("ordered", [True, False])
def test_apply(ordered):
# GH 10138
dense = Categorical(list("abc"), ordered=ordered)
# 'b' is in the categories but not in the list
missing = Categorical(list("aaa"), categories=["a", "b"], ordered=ordered)
values = np.arange(len(dense))
df = DataFrame({"missing": missing, "dense": dense, "values": values})
grouped = df.groupby(["missing", "dense"], observed=True)
# missing category 'b' should still exist in the output index
idx = MultiIndex.from_arrays([missing, dense], names=["missing", "dense"])
expected = DataFrame([0, 1, 2.0], index=idx, columns=["values"])
# GH#21636 tracking down the xfail, in some builds np.mean(df.loc[[0]])
# is coming back as Series([0., 1., 0.], index=["missing", "dense", "values"])
# when we expect Series(0., index=["values"])
result = grouped.apply(lambda x: np.mean(x))
tm.assert_frame_equal(result, expected)
# we coerce back to ints
expected = expected.astype("int")
result = grouped.mean()
tm.assert_frame_equal(result, expected)
result = grouped.agg(np.mean)
tm.assert_frame_equal(result, expected)
# but for transform we should still get back the original index
idx = MultiIndex.from_arrays([missing, dense], names=["missing", "dense"])
expected = Series(1, index=idx)
result = grouped.apply(lambda x: 1)
tm.assert_series_equal(result, expected)
def test_observed(observed):
# multiple groupers, don't re-expand the output space
# of the grouper
# gh-14942 (implement)
# gh-10132 (back-compat)
# gh-8138 (back-compat)
# gh-8869
cat1 = Categorical(["a", "a", "b", "b"], categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
df["C"] = ["foo", "bar"] * 2
# multiple groupers with a non-cat
gb = df.groupby(["A", "B", "C"], observed=observed)
exp_index = MultiIndex.from_arrays(
[cat1, cat2, ["foo", "bar"] * 2], names=["A", "B", "C"]
)
expected = DataFrame({"values": Series([1, 2, 3, 4], index=exp_index)}).sort_index()
result = gb.sum()
if not observed:
expected = cartesian_product_for_groupers(
expected, [cat1, cat2, ["foo", "bar"]], list("ABC"), fill_value=0
)
tm.assert_frame_equal(result, expected)
gb = df.groupby(["A", "B"], observed=observed)
exp_index = MultiIndex.from_arrays([cat1, cat2], names=["A", "B"])
expected = DataFrame({"values": [1, 2, 3, 4]}, index=exp_index)
result = gb.sum()
if not observed:
expected = cartesian_product_for_groupers(
expected, [cat1, cat2], list("AB"), fill_value=0
)
tm.assert_frame_equal(result, expected)
# https://github.com/pandas-dev/pandas/issues/8138
d = {
"cat": Categorical(
["a", "b", "a", "b"], categories=["a", "b", "c"], ordered=True
),
"ints": [1, 1, 2, 2],
"val": [10, 20, 30, 40],
}
df = DataFrame(d)
# Grouping on a single column
groups_single_key = df.groupby("cat", observed=observed)
result = groups_single_key.mean()
exp_index = CategoricalIndex(
list("ab"), name="cat", categories=list("abc"), ordered=True
)
expected = DataFrame({"ints": [1.5, 1.5], "val": [20.0, 30]}, index=exp_index)
if not observed:
index = CategoricalIndex(
list("abc"), name="cat", categories=list("abc"), ordered=True
)
expected = expected.reindex(index)
tm.assert_frame_equal(result, expected)
# Grouping on two columns
groups_double_key = df.groupby(["cat", "ints"], observed=observed)
result = groups_double_key.agg("mean")
expected = DataFrame(
{
"val": [10, 30, 20, 40],
"cat": Categorical(
["a", "a", "b", "b"], categories=["a", "b", "c"], ordered=True
),
"ints": [1, 2, 1, 2],
}
).set_index(["cat", "ints"])
if not observed:
expected = cartesian_product_for_groupers(
expected, [df.cat.values, [1, 2]], ["cat", "ints"]
)
tm.assert_frame_equal(result, expected)
# GH 10132
for key in [("a", 1), ("b", 2), ("b", 1), ("a", 2)]:
c, i = key
result = groups_double_key.get_group(key)
expected = df[(df.cat == c) & (df.ints == i)]
tm.assert_frame_equal(result, expected)
# gh-8869
# with as_index
d = {
"foo": [10, 8, 4, 8, 4, 1, 1],
"bar": [10, 20, 30, 40, 50, 60, 70],
"baz": ["d", "c", "e", "a", "a", "d", "c"],
}
df = DataFrame(d)
cat = pd.cut(df["foo"], np.linspace(0, 10, 3))
df["range"] = cat
groups = df.groupby(["range", "baz"], as_index=False, observed=observed)
result = groups.agg("mean")
groups2 = df.groupby(["range", "baz"], as_index=True, observed=observed)
expected = groups2.agg("mean").reset_index()
tm.assert_frame_equal(result, expected)
def test_observed_codes_remap(observed):
d = {"C1": [3, 3, 4, 5], "C2": [1, 2, 3, 4], "C3": [10, 100, 200, 34]}
df = DataFrame(d)
values = pd.cut(df["C1"], [1, 2, 3, 6])
values.name = "cat"
groups_double_key = df.groupby([values, "C2"], observed=observed)
idx = MultiIndex.from_arrays([values, [1, 2, 3, 4]], names=["cat", "C2"])
expected = DataFrame({"C1": [3, 3, 4, 5], "C3": [10, 100, 200, 34]}, index=idx)
if not observed:
expected = cartesian_product_for_groupers(
expected, [values.values, [1, 2, 3, 4]], ["cat", "C2"]
)
result = groups_double_key.agg("mean")
tm.assert_frame_equal(result, expected)
def test_observed_perf():
# we create a cartesian product, so this is
# non-performant if we don't use observed values
# gh-14942
df = DataFrame(
{
"cat": np.random.randint(0, 255, size=30000),
"int_id": np.random.randint(0, 255, size=30000),
"other_id": np.random.randint(0, 10000, size=30000),
"foo": 0,
}
)
df["cat"] = df.cat.astype(str).astype("category")
grouped = df.groupby(["cat", "int_id", "other_id"], observed=True)
result = grouped.count()
assert result.index.levels[0].nunique() == df.cat.nunique()
assert result.index.levels[1].nunique() == df.int_id.nunique()
assert result.index.levels[2].nunique() == df.other_id.nunique()
def test_observed_groups(observed):
# gh-20583
# test that we have the appropriate groups
cat = Categorical(["a", "c", "a"], categories=["a", "b", "c"])
df = DataFrame({"cat": cat, "vals": [1, 2, 3]})
g = df.groupby("cat", observed=observed)
result = g.groups
if observed:
expected = {"a": Index([0, 2], dtype="int64"), "c": Index([1], dtype="int64")}
else:
expected = {
"a": Index([0, 2], dtype="int64"),
"b": Index([], dtype="int64"),
"c": Index([1], dtype="int64"),
}
tm.assert_dict_equal(result, expected)
def test_observed_groups_with_nan(observed):
# GH 24740
df = DataFrame(
{
"cat": Categorical(["a", np.nan, "a"], categories=["a", "b", "d"]),
"vals": [1, 2, 3],
}
)
g = df.groupby("cat", observed=observed)
result = g.groups
if observed:
expected = {"a": | Index([0, 2], dtype="int64") | pandas.Index |
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 18 15:04:50 2018
@authors: a.pakbin, <NAME>
"""
import numpy as np
from copy import copy
import pandas as pd
pd.set_option('mode.chained_assignment', None)
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import roc_auc_score
import random as rnd
from xgboost.sklearn import XGBClassifier
import sys
import os
import matplotlib.pyplot as plt
import re
def data_reader(data_address, file_name, non_attribute_column_names=None,label_column_name=None):
data=pd.read_csv(data_address+'/'+file_name)
if non_attribute_column_names:
columns_to_drop=list(set(non_attribute_column_names)-set([label_column_name]))
data=data.drop(columns_to_drop, axis=1)
return data
def matrix_partitioner(df, proportion, label=None):
number_of_ones=int(round(proportion*len(df)))
ones=np.ones(number_of_ones)
zeros=np.zeros(len(df)-number_of_ones)
ones_and_zeros=np.append(ones,zeros)
permuted=np.random.permutation(ones_and_zeros)
boolean_permuted=permuted>0
if label:
return [df[boolean_permuted].reset_index(),df[~boolean_permuted].reset_index(),label[boolean_permuted],label[~boolean_permuted]]
else:
return [df[boolean_permuted].reset_index(),df[~boolean_permuted].reset_index()]
def dataframe_partitioner(df, output_label, proportion):
y=df[output_label].values
X=df.drop([output_label], axis=1)
return matrix_partitioner(X,label=y,proportion=proportion)
def one_hot_detacher(X, categorical_column_names):
one_hot_column_names=list()
for categorical_column in categorical_column_names:
for column_name in X.columns:
if column_name.startswith(categorical_column):
one_hot_column_names.append(column_name)
one_hot=X[one_hot_column_names]
X.drop(one_hot_column_names, axis=1, inplace=True)
return [X, one_hot]
def one_hot_attacher(X, one_hot):
return X.join(one_hot)
def normalize(X, data_type, categorical_column_names, training_mean=None, training_std=None):
[X, one_hot]=one_hot_detacher(X, categorical_column_names)
if data_type=='train_set':
mean=np.mean(X,axis=0)
std=np.var(X, axis=0)
elif data_type=='test_set':
mean=training_mean
std=training_std
aux_std=copy(std)
aux_std[aux_std==0]=1
normalized=(X-mean)/aux_std
complete_normalized=one_hot_attacher(normalized, one_hot)
if data_type=='train_set':
return [complete_normalized, mean, std]
elif data_type=='test_set':
return complete_normalized
def train_test_normalizer(X_train, X_test, categorical_column_names):
[X_TRAIN_NORMALIZED, X_TRAIN_MEAN, X_TRAIN_STD]=normalize(X=X_train, data_type='train_set', categorical_column_names=categorical_column_names)
X_TEST_NORMALIZED=normalize(X=X_test, data_type='test_set', categorical_column_names=categorical_column_names, training_mean=X_TRAIN_MEAN, training_std=X_TRAIN_STD)
return [X_TRAIN_NORMALIZED, X_TEST_NORMALIZED]
def possible_values_finder(data, categorical_column_names):
column_dict = dict()
for categorical_column_name in categorical_column_names:
unique_vals = list(set([str(x) for x in data[categorical_column_name].unique()])-set(['nan','NaN','NAN','null']))
column_dict[categorical_column_name]=unique_vals
return column_dict
def one_hot_encoder(X, categorical_column_names, possible_values):
for categorical_column_name in categorical_column_names:
possible_values_ = possible_values[categorical_column_name]
new_vals = [categorical_column_name + '_' + str(s) for s in possible_values_]
dummies = pd.get_dummies(X[categorical_column_name], prefix=categorical_column_name)
dummies = dummies.T.reindex(new_vals).T.fillna(0)
X = X.drop([categorical_column_name], axis=1)
X = X.join(dummies)
return X
def train_test_one_hot_encoder(X_train, X_test, categorical_column_names, possible_values):
X_TRAIN=one_hot_encoder(X_train, categorical_column_names, possible_values)
X_TEST=one_hot_encoder(X_test, categorical_column_names, possible_values)
return [X_TRAIN, X_TEST]
def categorical_distribution_finder(X, categorical_column_names):
NAMES=list()
DISTS=list()
for categorical_column_name in categorical_column_names:
names=list()
nom_of_all=0
quantity=list()
grouped= X.groupby([categorical_column_name])
for category, group in grouped:
names.append(category)
quantity.append(len(group))
nom_of_all=nom_of_all+len(group)
distribution = [float(x) / nom_of_all for x in quantity]
NAMES.append(names)
DISTS.append(distribution)
return(NAMES, DISTS)
def categorical_imputer(X, categorical_column_names, data_type='train', names=None, distributions=None):
if data_type=='train':
[names, distributions]=categorical_distribution_finder(X, categorical_column_names)
for idx, categorical_column_name in enumerate(categorical_column_names):
for i in range(0, len(X)):
if pd.isnull(X[categorical_column_name].iloc[i]):
X[categorical_column_name].iloc[i]=np.random.choice(names[idx], p=distributions[idx])
if data_type=='train':
return [X, names, distributions]
elif data_type=='test':
return X
def numerical_imputer(X, training_mean=None):
if training_mean is None:
training_mean=X.mean()
imputed=X.fillna(training_mean)
return [imputed, training_mean]
else:
imputed=X.fillna(training_mean)
return imputed
#
# X_train and X_test are data-frames of MIMIC3 data with certain columns dropped
# - the numerical imputation is straightforward: any missing values are replaced
# with the mean value for that column
#
def train_test_imputer(X_train, X_test, categorical_column_names):
[X_TRAIN_CAT_IMPUTED, NAMES, DISTS]=categorical_imputer(X_train, categorical_column_names)
X_TEST_CAT_IMPUTED=categorical_imputer(X_test, categorical_column_names, 'test', NAMES, DISTS)
[X_TRAIN_IMPUTED, X_TRAIN_MEAN]=numerical_imputer(X_TRAIN_CAT_IMPUTED)
X_TEST_IMPUTED=numerical_imputer(X_TEST_CAT_IMPUTED, X_TRAIN_MEAN)
return [X_TRAIN_IMPUTED, X_TEST_IMPUTED]
def auc_calculator(model, X, y, num_of_folds):
auc_list=list()
skf=StratifiedKFold(n_splits=num_of_folds, shuffle=True, random_state=rnd.randint(1,1e6))
for train_index, test_index in skf.split(X,y):
X_train, X_test = X.iloc[train_index], X.iloc[test_index]
y_train, y_test = y[train_index], y[test_index]
model.fit(X_train, y_train)
predictions=model.predict_proba(X_test)[:,1]
try:
auc=roc_auc_score(y_true=y_test, y_score=predictions)
except ValueError:
print("Exception in roc_auc_score(): trying to ignore")
auc = 0
auc_list.append(auc)
return sum(auc_list)/len(auc_list)
def grid_search(X, y, num_of_folds, verbose, first_dim, second_dim=None, third_dim=None, return_auc_values=False):
best_auc=0
best_auc_setting=None
auc_matrix=np.zeros((len(first_dim),len(second_dim),len(third_dim)))
for max_depth_index, max_depth in enumerate(first_dim):
for n_estimator_index, n_estimator in enumerate(second_dim):
for learning_rate_index, learning_rate in enumerate(third_dim):
model=XGBClassifier(max_depth=int(max_depth), n_estimators=int(n_estimator), learning_rate=learning_rate)
auc=auc_calculator(model, X, y, num_of_folds)
auc_matrix[max_depth_index, n_estimator_index, learning_rate_index]=auc
if auc>best_auc:
best_auc=auc
best_auc_setting=[max_depth,n_estimator,learning_rate]
if verbose==True:
sys.stdout.write('\r GRID SEARCHING XGB: progress: {0:.3f} % ...'.format(
(max_depth_index*(len(second_dim)*len(third_dim))+
n_estimator_index*(len(third_dim))+
learning_rate_index
+1)/(len(first_dim)*len(second_dim)*len(third_dim))*100))
print ('\n')
if return_auc_values:
return [best_auc_setting,auc_matrix]
else:
return best_auc_setting
def vectors_to_csv(address, file_name, vector_one, label_one, vector_two=None, label_two=None,vector_three=None, label_three=None):
if vector_two is None:
df=pd.DataFrame(data={label_one:vector_one})
elif vector_three is None:
df=pd.DataFrame(data={label_one:vector_one, label_two:vector_two})
else:
df=pd.DataFrame(data={label_one:vector_one, label_two:vector_two, label_three:vector_three})
df.to_csv(address+'/'+file_name+'.csv')
def create_subfolder_if_not_existing(dir):
if not os.path.exists(dir):
os.makedirs(dir)
def save_roc_curve(data_address, TPR, FPR, auc):
plt.figure()
plt.title('Receiver Operating Characteristic')
plt.plot(FPR, TPR, 'b', label = 'AUC = %0.2f' % auc)
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
# plt.show()
plt.savefig(data_address)
plt.close()
def feature_importance_updator(accumulative_feature_importance, new_importance):
if accumulative_feature_importance is None:
return new_importance
else:
return accumulative_feature_importance+new_importance
def feature_importance_saver(address, col_names, accumulative_feature_importance, num_of_folds):
mean_feature_importances=accumulative_feature_importance/num_of_folds
DF=pd.DataFrame(data={'FEATURE': col_names, 'IMPORTANCE': mean_feature_importances})
DF.to_csv(address+'/'+'feature_importances.csv')
DF=DF.sort_values(by='IMPORTANCE', ascending=False).reset_index(drop=True)
DF.to_csv(address+'/'+'feature_importances_sorted.csv')
def first_matching_ICD9_finder(code, convertor_dict):
ones=range(0,10)
for one in ones:
try:
Matching_ICD9s_name=convertor_dict[10*code+one]
return Matching_ICD9s_name
except:
continue
return 'UNKNOWN'
def convert_ICD9_codes(features_list, conversion_tables_address):
ICD9Codes=pd.read_csv(conversion_tables_address+'/'+'D_ICD_PROCEDURES.csv.gz')
convertor_dict=dict(zip(ICD9Codes['ICD9_CODE'],ICD9Codes['LONG_TITLE']))
feature_names = ['ICD9_'+str(feature[5:])+'_'+ first_matching_ICD9_finder(int(feature[5:]), convertor_dict)
if feature.startswith('ICD9_')
else feature
for feature in features_list]
return feature_names
def convert_items_n_labitems(features_list, conversion_tables_address):
RE_INT = re.compile(r'^[-+]?([1-9]\d*|0)$')
df_D_ITEMS = pd.read_csv(conversion_tables_address+'/'+'D_ITEMS.csv.gz')
df_D_LABITEMS = pd.read_csv(conversion_tables_address+'/'+'D_LABITEMS.csv.gz')
df_items = pd.concat([df_D_ITEMS[['ITEMID','LABEL']], df_D_LABITEMS[['ITEMID','LABEL']]]).set_index('ITEMID')
feature_names = [df_items.loc[int(feature.split('_')[0])].LABEL+' ('+feature.split('_')[1] + ')'
if RE_INT.match(feature.split('_')[0])
else feature for feature in features_list ]
return feature_names
def convert_numbers_to_names(features_list, conversion_tables_address):
return convert_ICD9_codes(convert_items_n_labitems(features_list, conversion_tables_address), conversion_tables_address)
#
# Coarsens the ICD codes to a higher level
# by dropping the last code digit - but, it looks like there may be some
# issues with the original code as it treats the ICD codes as numbers rather
# than strings and so doesn't take into account the semantically meaningful
# leading and trailing zeros.
#
def ICD9_categorizer(X):
# Get a list of the ICD columns in input X
ICD9_COLUMN_NAMES=[col for col in X.columns if str(col).startswith('ICD9_')]
# Make a DF that holds the ICD codes only for input X (?)
ICD9_categorized=pd.DataFrame(index=range(0,len(X)), columns=['ICD9_'+str(x) for x in range(0,1000)]).fillna(0)
# For each ICD column name:
for ICD9_column_name in ICD9_COLUMN_NAMES:
# Discard the last digit in the code number by doing integer division by 10
index=int(int(ICD9_column_name[5:])/10)
FITTING_CATEGORY='ICD9_'+str(index)
ICD9_categorized[FITTING_CATEGORY]=ICD9_categorized[FITTING_CATEGORY]+X[ICD9_column_name]
X=X.drop(ICD9_COLUMN_NAMES, axis=1)
X=X.join(ICD9_categorized)
return X
def save_fold_data(writing_dir, fold_number, icustay_id_train, X_TRAIN_NORMALIZED, y_train, icustay_id_test, X_TEST_NORMALIZED, y_test, convert_names, conversion_tables_address=None):
ICUSTAY_ID_TRAIN=pd.DataFrame(data={'ICUSTAY_ID': icustay_id_train})
Y_TRAIN=pd.DataFrame(data={'LABEL': y_train})
X_TRAIN_NORMALIZED=X_TRAIN_NORMALIZED.reset_index().drop(['index'],axis=1)
TRAINING= | pd.concat([ICUSTAY_ID_TRAIN, X_TRAIN_NORMALIZED, Y_TRAIN], axis=1) | pandas.concat |
import pandas as pd
import numpy as np
import numpy.random as nr
from scipy import stats
from scipy.stats.stats import pearsonr, spearmanr
from scipy.special import digamma
import scipy.spatial as ss
from scipy.spatial.ckdtree import cKDTree
from sklearn.neighbors import NearestNeighbors
from math import log, pi, hypot, fabs, sqrt
import utils.utils as utils
# === Correlation matrix ===
@utils.traceLogLight
def pearsonCorrelation(data):
""" Computes the Pearson's coefficient for every pair of variables provided
Parameters
----------
data: list of lists where each list contains the observations of a single variable
: Array of rows with the same sizes (discrete & continuous)
Return
------
result: correlation matrix along with the p-value of the signficance test of the coefficients
significance level legend: 3(***) -> p-value<0.01 -> The coefficient is significant at 99%
2(**) -> p-value<0.05 ->The coefficient is significant at 95%
1(*) -> p-value<0.1 -> The coefficient is significant at 90%
"""
for i in range(len(data) - 1):
assert len(data[i]) == len(
data[i + 1]), "The provided samples should have the same length"
# transform the list of samples to dataframe
df = | pd.DataFrame(data) | pandas.DataFrame |
import numpy as np
import pandas as pd
import re
import math
import multiprocessing
from abc import ABC, abstractmethod
from sklearn import metrics
from bokeh.layouts import gridplot, layout
from bokeh import events
from sklearn.linear_model import LinearRegression
from bokeh.plotting import figure, output_notebook, show
from bokeh.models import ColumnDataSource, Circle, HoverTool, TapTool, LabelSet, Rect, LinearColorMapper, MultiLine, Patch, Patches, CustomJS, Text, Title
from itertools import product
from sklearn.model_selection import ParameterGrid
from sklearn import preprocessing
from itertools import combinations
from copy import deepcopy
from ..plot import scatter, scatterCI, boxplot, distribution, permutation_test, roc_cv, scatter_ellipse
from ..utils import color_scale, dict_perc, nested_getattr, dict_95ci, dict_median_scores
class BaseCrossVal(ABC):
"""Base class for crossval: kfold."""
@abstractmethod
def __init__(self, model, X, Y, param_dict, folds=10, n_mc=1, n_boot=0, n_cores=-1, ci=95, stratify=True):
# Store basic inputs
self.model = model
self.X = X
self.Y = Y
self.num_param = len(param_dict)
self.folds = folds
self.n_boot = n_boot
self.n_mc = n_mc
# Note; if self.mc is 0, change to 1
if self.n_mc == 0:
self.n_mc = 1
self.ci = ci
# Save param_dict
# Make sure each parameter is in a list
for key, value in param_dict.items():
if not isinstance(value, (list, tuple, np.ndarray)):
param_dict[key] = [value]
self.param_dict = param_dict
self.param_list = list(ParameterGrid(param_dict))
# Create a second dict, with parameters with more than 1 variable e.g. n_comp = [1, 2]
self.param_dict2 = {}
for key, value in param_dict.items():
if len(value) > 1:
self.param_dict2 = {**self.param_dict2, **{key: value}}
self.param_list2 = list(ParameterGrid(self.param_dict2))
self.stratify = True
# if n_cores = -1, set n_cores to max_cores
max_num_cores = multiprocessing.cpu_count()
self.n_cores = n_cores
if self.n_cores > max_num_cores:
self.n_cores = -1
print("Number of cores set too high. It will be set to the max number of cores in the system.", flush=True)
if self.n_cores == -1:
self.n_cores = max_num_cores
print("Number of cores set to: {}".format(max_num_cores))
@abstractmethod
def calc_ypred(self):
"""Calculates ypred full and ypred cv."""
pass
@abstractmethod
def calc_stats(self):
"""Calculates binary statistics from ypred full and ypred cv."""
pass
def _format_table(self, stats_list):
"""Make stats pretty (pandas table -> proper names in columns)."""
table = pd.DataFrame(stats_list).T
param_list_string = []
for i in range(len(self.param_list)):
param_list_string.append(str(self.param_list[i]))
table.columns = param_list_string
return table
def run(self):
"""Runs all functions prior to plot."""
# Check that param_dict is not for epochs
# Epoch is a special case
print("Running ...")
check_epoch = []
for i in self.param_dict2.keys():
check_epoch.append(i)
if check_epoch == ["epochs"]:
# Get epoch max
epoch_list = []
for i in self.param_list2:
for k, v in i.items():
epoch_list.append(v)
# Print and Calculate
self.calc_ypred_epoch()
print("returning stats at 'x' epoch interval during training until epoch={}.".format(epoch_list[-1]))
else:
self.calc_ypred()
self.calc_stats()
print("Done!")
def plot_projections(self, components="all", label=None, size=12, scatter2=False, legend="all", plot="ci", meanfull=True, roc_title=False, orthog_line=True, grid_line=False, **kwargs):
if components == "all":
components = None
legend_scatter = False
legend_dist = False
legend_roc = False
if legend in [True,'all']:
legend_scatter = True
legend_dist = True
legend_roc = True
if legend in [False,'none',None]:
legend_scatter = False
legend_dist = False
legend_roc = False
if legend is "scatter":
legend_scatter = True
if legend is "dist":
legend_dist = True
if legend is "roc":
legend_roc = True
# if scatter_show == None:
# scatter_show = 0
# elif scatter_show == "None":
# scatter_show = 0
# elif scatter_show == "Inner":
# scatter_show = 1
# elif scatter_show == "Full":
# scatter_show = 2
# elif scatter_show == "CV":
# scatter_show = 3
# elif scatter_show == "All":
# scatter_show = 4
# else:
# raise ValueError("scatter has to be either 'None', 'Inner', 'Full', 'CV', 'All'")
if plot in ["ci", "CI"]:
plot_num = 0
elif plot in ["innerci", "MeanCI", "meanci"]:
plot_num = 1
elif plot in ["full", "FULL", "Full"]:
plot_num = 2
elif plot in ["CV", "cv", "Cv"]:
plot_num = 3
elif plot in ["all", "ALL", "All"]:
plot_num = 4
else:
raise ValueError("plot has to be either 'ci', 'meanci', 'full', 'cv', 'all'.")
# if param == None:
# p = -1
# else:
# try:
# p = np.where(np.array(self.param_list) == param)[0][0]
# except IndexError:
# p = -1
perfect_param = {}
for key,value in self.param_dict.items():
if len(value) > 1:
if key is "n_components" or key is "n_neurons":
perfect_param[key] = value[-1]
elif key in kwargs:
if kwargs[key] in value:
perfect_param[key] = kwargs[key]
else:
print("Check Again! Value '{}' entered for {} doesn't exist.".format(key, kwargs[key]))
perfect_param[key] = value[-1]
else:
perfect_param[key] = value[-1]
print("{} set to {}. To change this, enter the key/value as an input argument e.g. {}={}".format(key, value[-1], key, value[1]))
else:
perfect_param[key] = value[0]
p = np.where(np.array(perfect_param) == self.param_list)[0][0]
if self.model.__name__ == 'NN_SigmoidSigmoid' or self.model.__name__ == "NN_LinearSigmoid":
lv_name = "Neuron"
else:
lv_name = "LV"
x_scores_full = self.x_scores_full[p]
x_scores_cv = np.median(np.array(self.x_scores_cv[p]), axis=0)
x_scores_cvall = np.array(self.x_scores_cv[p])
pctvar_ = self.pctvar_[p]
y_loadings_ = self.y_loadings_[p][0]
scatterplot = scatter2
num_x_scores = len(x_scores_full.T)
sigmoid = False
if components == None:
components = np.array(range(num_x_scores)) + 1
else:
components = np.sort(np.array(components))
comb_x_scores = list(combinations(np.array(components) - 1, 2))
for i in components:
if i > num_x_scores:
raise ValueError("Component {} does not exist.".format(i))
order = np.argsort(pctvar_)[::-1]
y_loadings_ = y_loadings_[order]
x_scores_full = x_scores_full[:, order]
x_scores_cv = x_scores_cv[:, order]
pctvar_ = pctvar_[order]
# If there is only 1 component, Need to plot x_score vs. peak (as opposided to x_score[i] vs. x_score[j])
if len(components) == 1:
print('Components must be > 1 to plot projections')
else:
if num_x_scores == 1:
pass
else:
# Width/height of each scoreplot
width_height = int(950 / len(components))
circle_size_scoreplot = size / len(components)
label_font = str(13 - len(components)) + "pt"
title_font = str(13 - len(components)) + "pt"
# Create empty grid
grid = np.full((num_x_scores, num_x_scores), None)
# Append each scoreplot
for i in range(len(comb_x_scores)):
# Make a copy (as it overwrites the input label/group)
if label is None:
group_copy = self.Y.copy()
label_copy = pd.Series(self.Y, name='Class').apply(str)
else:
newlabel = np.array(label)
label_copy = deepcopy(label)
#group_copy = deepcopy(newlabel)
group_copy = self.Y.copy()
# Scatterplot
x, y = comb_x_scores[i]
xlabel = "{} {} ({:0.1f}%)".format(lv_name, x + 1, pctvar_[x])
ylabel = "{} {} ({:0.1f}%)".format(lv_name, y + 1, pctvar_[y])
gradient = y_loadings_[y] / y_loadings_[x]
x_full = x_scores_full[:, x].tolist()
y_full = x_scores_full[:, y].tolist()
x_cv = x_scores_cv[:, x].tolist()
y_cv = x_scores_cv[:, y].tolist()
x_orig = x_scores_full[:, x].tolist()
y_orig = x_scores_full[:, y].tolist()
max_range = max(np.max(np.abs(x_scores_full[:, x])), np.max(np.abs(x_scores_cv[:, y])))
new_range_min = -max_range - 0.05 * max_range
new_range_max = max_range + 0.05 * max_range
new_range = (new_range_min, new_range_max)
new_xrange = new_range
new_yrange = new_range
regY_full = self.Y
regX_full = np.array([x_full, y_full]).T
reg_stat = LinearRegression().fit(regX_full, regY_full)
#gradient = reg_stat.coef_[1] / reg_stat.coef_[0]
grid[y, x] = scatter_ellipse(x_orig, y_orig, x_cv, y_cv, label=label_copy, group=group_copy, title="", xlabel=xlabel, ylabel=ylabel, width=width_height, height=width_height, legend=legend_scatter, size=circle_size_scoreplot, label_font_size=label_font, hover_xy=False, xrange=new_xrange, yrange=new_yrange, gradient=gradient, ci95=True, scatterplot=scatterplot, extraci95_x=x_cv, extraci95_y=y_cv, extraci95=True, scattershow=plot_num, extraci95_x2=x_full, extraci95_y2=y_full, orthog_line=orthog_line, grid_line=grid_line, legend_title=True, font_size=label_font)
# Append each distribution curve
group_dist = np.concatenate((self.Y, (self.Y + 2)))
dist_label1 = np.array(label_copy[self.Y==0])[0]
dist_label2 = np.array(label_copy[self.Y==1])[0]
dist_label = [str(dist_label1), str(dist_label2)]
for i in components:
i = i - 1
score_dist = np.concatenate((x_scores_full[:, i], x_scores_cv[:, i]))
xlabel = "{} {} ({:0.1f}%)".format(lv_name, i + 1, pctvar_[i])
grid[i, i] = distribution(score_dist, group=group_dist, group_label=dist_label, kde=True, title="", xlabel=xlabel, ylabel="p.d.f.", width=width_height, height=width_height, label_font_size=label_font, sigmoid=sigmoid, legend=legend_dist, plot_num=plot_num, grid_line=grid_line, legend_title=True, font_size=label_font)
# Append each roc curve
for i in range(len(comb_x_scores)):
x, y = comb_x_scores[i]
idx_x = order[x]
idx_y = order[y]
# Get the optimal combination of x_scores based on rotation of y_loadings_
# theta = math.atan(1)
x_stat = x_scores_full[:, x]
y_stat = x_scores_full[:, y]
regY_stat = self.Y
regX_stat = np.array([x_stat, y_stat]).T
reg_stat = LinearRegression().fit(regX_stat, regY_stat)
grad_stat = reg_stat.coef_[1] / reg_stat.coef_[0]
theta = math.atan(grad_stat)
#ypred_stat = x_stat * math.cos(theta_stat) + y_stat * math.sin(theta_stat) # Optimal line
x_rotate = x_scores_full[:, x] * math.cos(theta) + x_scores_full[:, y] * math.sin(theta)
#x_rotate_boot = x_scores_cv[:, x] * math.cos(theta) + x_scores_cv[:, y] * math.sin(theta)
# gradient = y_loadings_[y] / y_loadings_[x]
# theta = math.atan(gradient)
# x_rotate = x_scores_full[:, x] * math.cos(theta) + x_scores_full[:, y] * math.sin(theta)
# x_rotate_boot = x_scores_cv[:, x] * math.cos(theta) + x_scores_cv[:, y] * math.sin(theta)
self.x_rotate = x_rotate
group_copy = self.Y.copy()
self.x_rotate_boot = []
for i in range(len(x_scores_cvall)):
x_rot = x_scores_cvall[i][:, idx_x] * math.cos(theta) + x_scores_cvall[i][:, idx_y] * math.sin(theta)
self.x_rotate_boot.append(x_rot)
self.x_rotate_boot = np.array(self.x_rotate_boot)
x_rotate_boot = self.x_rotate_boot
# Get Stat
# ROC Plot with x_rotate
# fpr, tpr, tpr_ci = roc_calculate(group_copy, x_rotate, bootnum=100)
# fpr_boot, tpr_boot, tpr_ci_boot = roc_calculate(group_copy, x_rotate_boot, bootnum=100)
# grid[x, y] = roc_plot(fpr, tpr, tpr_ci, width=width_height, height=width_height, xlabel="1-Specificity (LV{}/LV{})".format(x + 1, y + 1), ylabel="Sensitivity (LV{}/LV{})".format(x + 1, y + 1), legend=False, label_font_size=label_font, roc2=True, fpr2=fpr_boot, tpr2=tpr_boot, tpr_ci2=tpr_ci_boot)
grid[x, y] = roc_cv(x_rotate, x_rotate_boot, group_copy, width=width_height, height=width_height, xlabel="1-Specificity ({}{}/{}{})".format(lv_name, x + 1, lv_name, y + 1), ylabel="Sensitivity ({}{}/{}{})".format(lv_name, x + 1, lv_name, y + 1), legend=legend_roc, label_font_size=label_font, title_font_size=title_font, show_title=roc_title, plot_num=plot_num, grid_line=grid_line)
# Bokeh grid
fig = gridplot(grid.tolist())
output_notebook()
show(fig)
def plot(self, metric="r2q2", scale=1, color_scaling="tanh", rotate_xlabel=True, model="kfold", legend=True, color_beta=[10, 10, 10], ci=95, diff1_heat=True, style=1, method='absolute', alt=True, grid_line=False):
"""Create a full/cv plot using based on metric selected.
Parameters
----------
metric : string, (default "r2q2")
metric has to be either "r2q2", "auc", "acc", "f1score", "prec", "sens", or "spec".
"""
# Check model is parametric if using 'r2q2'
if metric == "r2q2":
if self.model.parametric is False:
print("metric changed from 'r2q2' to 'auc' as the model is non-parametric.")
metric = "auc"
# Plot based on the number of parameters
if len(self.param_dict2) == 1:
fig = self._plot_param1(metric=metric, scale=scale, rotate_xlabel=rotate_xlabel, model=model, legend=legend, ci=ci, method=method, style=style, alt=alt, grid_line=grid_line)
elif len(self.param_dict2) == 2:
fig = self._plot_param2(metric=metric, scale=scale, color_scaling=color_scaling, model=model, legend=legend, color_beta=color_beta, ci=ci, diff1_heat=diff1_heat, style=style, method=method, alt=alt, grid_line=grid_line)
else:
raise ValueError("plot function only works for 1 or 2 parameters, there are {}.".format(len(self.param_dict2)))
# Show plot
output_notebook()
show(fig)
def _plot_param1(self, metric="r2q2", scale=1, rotate_xlabel=True, model="kfold", title_align="center", legend=True, ci=95, method='absolute', style=0, alt=True, grid_line=False):
"""Used for plot function if the number of parameters is 1."""
size_a = 13
size_b = 10
if len(self.param_list) > 14:
size_a = size_a - 2
size_b = size_b - 2
# Get ci
if self.n_mc > 1:
std_list = []
for i in range(len(self.param_list)):
std_full_i = dict_perc(self.full_loop[i], ci=ci)
std_cv_i = dict_perc(self.cv_loop[i], ci=ci)
std_full_i = {k + "full": v for k, v in std_full_i.items()}
std_cv_i = {k + "cv": v for k, v in std_cv_i.items()}
std_cv_i["R²"] = std_full_i.pop("R²full")
std_cv_i["Q²"] = std_cv_i.pop("R²cv")
std_combined = {**std_full_i, **std_cv_i}
std_list.append(std_combined)
self.table_std = self._format_table(std_list) # Transpose, Add headers
self.table_std = self.table_std.reindex(index=np.sort(self.table_std.index))
# Choose metric to plot
metric_title = np.array(["ACCURACY", "AIC", "AUC", "BIC", "F1-SCORE", "PRECISION", "R²", "SENSITIVITY", "SPECIFICITY", "SSE"])
metric_list = np.array(["acc", "aic", "auc", "bic", "f1score", "prec", "r2q2", "sens", "spec", "sse"])
metric_idx = np.where(metric_list == metric)[0][0]
# get full, cv, and diff
full = self.table.iloc[2 * metric_idx + 1]
cv = self.table.iloc[2 * metric_idx]
diff = abs(full - cv)
full_text = self.table.iloc[2 * metric_idx + 1].name
cv_text = self.table.iloc[2 * metric_idx].name
if metric == "r2q2":
diff_text = "| R²-Q² |"
y_axis_text = "R² & Q²"
full_legend = "R²"
cv_legend = "Q²"
else:
diff_text = full_text[:-4] + "diff"
y_axis_text = full_text[:-4]
if model == "kfold":
full_legend = "FULL"
cv_legend = "CV"
else:
full_legend = "TRAIN"
cv_legend = "TEST"
full_text = full_text[:-4] + "train"
cv_text = full_text[:-5] + "test"
if method is 'ratio':
diff = abs(1 - (cv / full))
if metric == "r2q2":
diff_text = "1 - (Q² / R²)"
else:
diff_text = "1 - (" + full_text[:-4] + "cv /" + full_text[:-4] + "full)"
if alt is True:
diff = abs((full - cv) / full)
if metric == "r2q2":
diff_text = "| (R² - Q²) / R² |"
else:
diff_text = "| (" + full_text[:-4] + "full - " + full_text[:-4] + "cv) / " + full_text[:-4] + "full |"
elif method in ['absolute', 'abs']:
pass
else:
raise ValueError("method needs to be 'absolute' or 'ratio'.")
# round full, cv, and diff for hovertool
full_hover = []
cv_hover = []
diff_hover = []
for j in range(len(full)):
full_hover.append("%.2f" % round(full[j], 2))
cv_hover.append("%.2f" % round(cv[j], 2))
diff_hover.append("%.2f" % round(diff[j], 2))
# get key, values (as string) from param_dict (key -> title, values -> x axis values)
for k, v in self.param_dict2.items():
key_title = k
key_xaxis = k
values = v
values_string = [str(i) for i in values]
values_string = []
for i in values:
if i == 0:
values_string.append(str(i))
elif 0.0001 > i:
values_string.append("%0.2e" % i)
elif 10000 < i:
values_string.append("%0.2e" % i)
else:
values_string.append(str(i))
# if parameter starts with n_ e.g. n_components change title to 'no. of components', xaxis to 'components'
if key_title.startswith("n_"):
key_xaxis = key_xaxis[2:]
key_xaxis = key_xaxis.title()
key_title = "no. of " + key_xaxis
else:
key_title = key_title.replace("_", " ")
key_title = key_title.title()
key_xaxis = key_title
# if key_xaxis.endswith("s") == True:
# key_xaxis = key_xaxis[:-1]
# store data in ColumnDataSource for Bokeh
data = dict(full=full, cv=cv, diff=diff, full_hover=full_hover, cv_hover=cv_hover, diff_hover=diff_hover, values_string=values_string)
source = ColumnDataSource(data=data)
# fig1_yrange = (min(diff) - max(0.1 * (min(diff)), 0.07), max(diff) + max(0.1 * (max(diff)), 0.07))
# fig1_xrange = (min(cv) - max(0.1 * (min(cv)), 0.07), max(cv) + max(0.1 * (max(cv)), 0.07))
fig1_title = diff_text + " vs. " + cv_text
# Plot width/height
width = int(485 * scale)
height = int(405 * scale)
# fig1_yrange = (min(diff) - max(0.1 * (min(diff)), 0.07), max(diff) + max(0.1 * (max(diff)), 0.07))
# fig1_xrange = (min(cv) - max(0.1 * (min(cv)), 0.07), max(cv) + max(0.1 * (max(cv)), 0.07))
# x_range=(min(cv_score) - 0.03, max(cv_score) + 0.03), y_range=(min(diff_score) - 0.03, max(diff_score) + 0.03)
# Figure 1 (DIFFERENCE (R2 - Q2) vs. Q2)
fig1 = figure(x_axis_label=cv_text, y_axis_label=diff_text, title=fig1_title, tools="tap,pan,wheel_zoom,box_zoom,reset,save,lasso_select,box_select", plot_width=width, plot_height=height, x_range=(min(cv) - 0.03, max(cv) + 0.03), y_range=(min(diff) - 0.03, max(diff) + 0.03))
# Figure 1: Add a line
fig1_line = fig1.line(cv, diff, line_width=3, line_color="black", line_alpha=0.25)
# Figure 1: Add circles (interactive click)
fig1_circ = fig1.circle("cv", "diff", size=size_a, alpha=0.7, color="green", source=source)
fig1_circ.selection_glyph = Circle(fill_color="green", line_width=2, line_color="black")
fig1_circ.nonselection_glyph.fill_color = "green"
fig1_circ.nonselection_glyph.fill_alpha = 0.4
fig1_circ.nonselection_glyph.line_color = "white"
# Figure 1: Add hovertool
fig1.add_tools(HoverTool(renderers=[fig1_circ], tooltips=[(key_xaxis, "@values_string"), (full_text, "@full_hover"), (cv_text, "@cv_hover"), (diff_text, "@diff_hover")]))
# Figure 1: Extra formating
fig1.axis.major_label_text_font_size = "8pt"
if metric is "r2q2" or metric is "auc":
fig1.title.text_font_size = "12pt"
fig1.xaxis.axis_label_text_font_size = "10pt"
fig1.yaxis.axis_label_text_font_size = "10pt"
else:
fig1.title.text_font_size = "10pt"
fig1.xaxis.axis_label_text_font_size = "9pt"
fig1.yaxis.axis_label_text_font_size = "9pt"
# Figure 2: full/cv
fig2_title = y_axis_text + " over " + key_title
fig2 = figure(x_axis_label=key_xaxis, y_axis_label=y_axis_text, title=fig2_title, plot_width=width, plot_height=height, x_range=pd.unique(values_string), tools="pan,wheel_zoom,box_zoom,reset,save,lasso_select,box_select")
if style == 0:
# Figure 2: Add Confidence Intervals if n_mc > 1
if self.n_mc > 1:
# get full, cv, and diff
full_std = self.table_std.iloc[2 * metric_idx + 1]
cv_std = self.table_std.iloc[2 * metric_idx]
lower_ci_full = pd.Series(name=full_std.name, dtype="object")
upper_ci_full = pd.Series(name=full_std.name, dtype="object")
for key, values in full_std.iteritems():
lower_ci_full[key] = values[0]
upper_ci_full[key] = values[1]
lower_ci_cv = pd.Series(name=cv_std.name, dtype="object")
upper_ci_cv = pd.Series(name=cv_std.name, dtype="object")
for key, values in cv_std.iteritems():
lower_ci_cv[key] = values[0]
upper_ci_cv[key] = values[1]
# Plot as a patch
x_patch = np.hstack((values_string, values_string[::-1]))
y_patch_r2 = np.hstack((lower_ci_full, upper_ci_full[::-1]))
y_patch_q2 = np.hstack((lower_ci_cv, upper_ci_cv[::-1]))
fig2.patch(x_patch, y_patch_q2, alpha=0.10, color="blue")
# kfold monte-carlo does not have ci for R2
if model is not "kfold":
fig2.patch(x_patch, y_patch_r2, alpha=0.10, color="red")
# Figure 2: add full
fig2_line_full = fig2.line(values_string, full, line_color="red", line_width=2)
fig2_circ_full = fig2.circle("values_string", "full", line_color="red", fill_color="white", fill_alpha=1, size=8, source=source, legend=full_legend)
fig2_circ_full.selection_glyph = Circle(line_color="red", fill_color="white", line_width=2)
fig2_circ_full.nonselection_glyph.line_color = "red"
fig2_circ_full.nonselection_glyph.fill_color = "white"
fig2_circ_full.nonselection_glyph.line_alpha = 0.4
# Figure 2: add cv
fig2_line_cv = fig2.line(values_string, cv, line_color="blue", line_width=2)
fig2_circ_cv = fig2.circle("values_string", "cv", line_color="blue", fill_color="white", fill_alpha=1, size=8, source=source, legend=cv_legend)
fig2_circ_cv.selection_glyph = Circle(line_color="blue", fill_color="white", line_width=2)
fig2_circ_cv.nonselection_glyph.line_color = "blue"
fig2_circ_cv.nonselection_glyph.fill_color = "white"
fig2_circ_cv.nonselection_glyph.line_alpha = 0.4
elif style == 1:
# Figure 2: Add Confidence Intervals if n_mc > 1
if self.n_mc > 1:
# get full, cv, and diff
full_std = self.table_std.iloc[2 * metric_idx + 1]
cv_std = self.table_std.iloc[2 * metric_idx]
lower_ci_full = pd.Series(name=full_std.name, dtype="object")
upper_ci_full = pd.Series(name=full_std.name, dtype="object")
for key, values in full_std.iteritems():
lower_ci_full[key] = values[0]
upper_ci_full[key] = values[1]
lower_ci_cv = pd.Series(name=cv_std.name, dtype="object")
upper_ci_cv = pd.Series(name=cv_std.name, dtype="object")
for key, values in cv_std.iteritems():
lower_ci_cv[key] = values[0]
upper_ci_cv[key] = values[1]
# Plot as a patch
x_patch = np.hstack((values_string, values_string[::-1]))
y_patch_r2 = np.hstack((lower_ci_full, upper_ci_full[::-1]))
y_patch_q2 = np.hstack((lower_ci_cv, upper_ci_cv[::-1]))
fig2.patch(x_patch, y_patch_q2, alpha=0.10, color="green")
# kfold monte-carlo does not have ci for R2
if model is not "kfold":
fig2.patch(x_patch, y_patch_r2, alpha=0.10, color="green")
# Figure 2: add full
fig2_line_full = fig2.line(values_string, full, line_color="green", line_width=3, legend=full_legend)
fig2_circ_full = fig2.circle("values_string", "full", line_color="green", fill_color="white", fill_alpha=1, size=size_b, source=source)
fig2_circ_full.selection_glyph = Circle(line_color="green", fill_color="white", line_width=2)
fig2_circ_full.nonselection_glyph.line_color = "green"
fig2_circ_full.nonselection_glyph.fill_color = "white"
fig2_circ_full.nonselection_glyph.line_alpha = 0.4
# Figure 2: add cv
fig2_line_cv = fig2.line(values_string, cv, line_color="green", line_width=3, line_dash='dashed', legend=cv_legend)
fig2_circ_cv = fig2.circle("values_string", "cv", line_color="green", fill_color="white", fill_alpha=1, size=size_b, source=source)
fig2_circ_cv.selection_glyph = Circle(line_color="green", fill_color="white", line_width=2)
fig2_circ_cv.nonselection_glyph.line_color = "green"
fig2_circ_cv.nonselection_glyph.fill_color = "white"
fig2_circ_cv.nonselection_glyph.line_alpha = 0.4
else:
raise ValueError("Style needs to be 0 or 1.")
# Add hovertool and taptool
fig2.add_tools(HoverTool(renderers=[fig2_circ_full], tooltips=[(full_text, "@full_hover")], mode="vline"))
fig2.add_tools(HoverTool(renderers=[fig2_circ_cv], tooltips=[(cv_text, "@cv_hover")], mode="vline"))
fig2.add_tools(TapTool(renderers=[fig2_circ_full, fig2_circ_cv]))
# Figure 2: Extra formating
fig2.axis.major_label_text_font_size = "8pt"
if metric is "r2q2" or metric is "auc":
fig2.title.text_font_size = "12pt"
fig2.xaxis.axis_label_text_font_size = "10pt"
fig2.yaxis.axis_label_text_font_size = "10pt"
else:
fig2.title.text_font_size = "10pt"
fig2.xaxis.axis_label_text_font_size = "9pt"
fig2.yaxis.axis_label_text_font_size = "9pt"
# Rotate
if rotate_xlabel is True:
fig2.xaxis.major_label_orientation = np.pi / 2
# Figure 2: legend
if legend is True:
fig2.legend.visible = True
fig2.legend.location = "bottom_right"
else:
fig2.legend.visible = False
if grid_line == False:
fig1.xgrid.visible = False
fig1.ygrid.visible = False
fig2.xgrid.visible = False
fig2.ygrid.visible = False
# if legend == None or legend == False:
# fig2.legend.visible = False
# else:
# fig2.legend.location = legend
# fig2.legend.location = legend
# Hide legend if it is clicked
# def show_hide_legend(legend=fig2.legend[0]):
# legend.visible = not legend.visible
# print(py2js(show_hide_legend))
# fig2.js_on_event(events.DoubleTap, CustomJS.from_py_func(show_hide_legend))
# Center title
if title_align == "center":
fig1.title.align = "center"
fig2.title.align = "center"
# Create a grid and output figures
grid = np.full((1, 2), None)
grid[0, 0] = fig1
grid[0, 1] = fig2
fig = gridplot(grid.tolist(), merge_tools=True)
return fig
def _plot_param2(self, metric="r2q2", xlabel=None, orientation=0, alternative=False, scale=1, heatmap_xaxis_rotate=90, color_scaling="tanh", line=False, model="kfold", title_align="center", legend=True, color_beta=[10, 10, 10], ci=95, diff1_heat=True, style=1, method='ratio', alt=True, grid_line=False):
# legend always None
legend = None
# check color_beta
if type(color_beta) != list:
raise ValueError("color_beta needs to be a list of 3 values e.g. [10, 10, 10]")
if len(color_beta) != 3:
raise ValueError("color_beta needs to be a list of 3 values e.g. [10, 10, 10]")
if method is 'ratio':
color_beta[2] = color_beta[2] / 10
elif method in ['absolute', 'abs']:
pass
else:
raise ValueError("method needs to be 'absolute' or 'ratio'.")
# Get ci
if self.n_mc > 1:
std_list = []
for i in range(len(self.param_list)):
std_full_i = dict_perc(self.full_loop[i], ci=ci)
std_cv_i = dict_perc(self.cv_loop[i], ci=ci)
std_full_i = {k + "full": v for k, v in std_full_i.items()}
std_cv_i = {k + "cv": v for k, v in std_cv_i.items()}
std_cv_i["R²"] = std_full_i.pop("R²full")
std_cv_i["Q²"] = std_cv_i.pop("R²cv")
std_combined = {**std_full_i, **std_cv_i}
std_list.append(std_combined)
self.table_std = self._format_table(std_list) # Transpose, Add headers
self.table_std = self.table_std.reindex(index=np.sort(self.table_std.index))
metric_list = np.array(["acc", "aic", "auc", "bic", "f1score", "prec", "r2q2", "sens", "spec", "sse"])
metric_idx = np.where(metric_list == metric)[0][0]
# get full, cv, and diff
full_score = self.table.iloc[2 * metric_idx + 1]
cv_score = self.table.iloc[2 * metric_idx]
diff_score = abs(full_score - cv_score)
full_title = self.table.iloc[2 * metric_idx + 1].name
cv_title = self.table.iloc[2 * metric_idx].name
diff_title = full_title[:-4] + "diff"
if diff1_heat == False:
diff_heat_title = diff_title
diff_heat_score = diff_score
else:
diff_heat_title = "1 - " + full_title[:-4] + "diff"
diff_heat_score = 1 - diff_score
y_axis_text = full_title[:-4]
if metric is "r2q2":
full_title = 'R²'
cv_title = 'Q²'
diff_title = "| R² - Q² |"
if diff1_heat == False:
diff_heat_title = diff_title
else:
diff_heat_title = "1 - | R² - Q² |"
y_axis_text = "R² & Q²"
if method is 'ratio':
diff_score = abs(1 - (cv_score / full_score))
diff_heat_score = abs(cv_score / full_score)
if metric == "r2q2":
diff_title = "| 1 - (Q² / R²) |"
diff_heat_title = "| Q² / R² |"
else:
diff_title = "| 1 - (" + full_title[:-4] + "cv /" + full_title[:-4] + "full) |"
diff_heat_title = "| " + full_title[:-4] + "cv /" + full_title[:-4] + "full" + " |"
if alt is True:
diff_score = abs((full_score - cv_score) / full_score)
diff_heat_score = abs((full_score - cv_score) / full_score)
if metric == "r2q2":
#diff_title = "| 1 - (Q² / R²) |"
diff_title = "| (R² - Q²) / R² |"
diff_heat_title = "| (R² - Q²) / R² |"
else:
#diff_title = "1 - (" + full_title[:-4] + "cv /" + full_title[:-4] + "full)"
diff_title = "| (" + full_title[:-4] + "full - " + full_title[:-4] + "cv) / " + full_title[:-4] + "full |"
#diff_heat_title = "| " + full_title[:-4] + "cv /" + full_title[:-4] + "full" + " |"
diff_heat_title = diff_title = "| (" + full_title[:-4] + "full - " + full_title[:-4] + "cv) / " + full_title[:-4] + "full |"
if model == "kfold":
full_legend = "FULL"
cv_legend = "CV"
else:
full_legend = "TRAIN"
cv_legend = "TEST"
full_title = full_title[:-4] + "train"
cv_title = full_title[:-5] + "test"
if metric is "r2q2":
full_title = 'R²'
cv_title = 'Q²'
# round full, cv, and diff for hovertool
full_hover = []
cv_hover = []
diff_hover = []
for j in range(len(full_score)):
full_hover.append("%.2f" % round(full_score[j], 2))
cv_hover.append("%.2f" % round(cv_score[j], 2))
diff_hover.append("%.2f" % round(diff_score[j], 2))
# If n_mc > 1
if self.n_mc > 1:
# get full, cv, and diff
full_std = self.table_std.iloc[2 * metric_idx + 1]
cv_std = self.table_std.iloc[2 * metric_idx]
lower_ci_full = | pd.Series(name=full_std.name, dtype="object") | pandas.Series |
"""
Construct the graph representation of brain imaging and population graph
"""
import os
import numpy as np
import pandas as pd
from sklearn.preprocessing import OneHotEncoder
from sklearn.metrics.pairwise import cosine_similarity
def brain_graph(logs, atlas, path, data_folder):
if not os.path.exists(path):
os.makedirs(path)
# the global mean is not included in ho_labels.csv
atlas.loc[-1] = [3455, 'Background']
print(atlas.shape)
# label the regions as right/left/global mean
label = []
for e in atlas['area'].values:
if e.startswith('Left'):
label.append(0)
elif e.startswith('Right'):
label.append(1)
else:
label.append(-1)
atlas['label'] = label
atlas.sort_values('index', inplace=True)
atlas = atlas.reset_index().drop('level_0', axis=1)
###################
# Adjacent matrix #
###################
print('Processing the adjacent matrix...')
# now the index in [0, 110]
adj = np.zeros([111, 111])
not_right = [i for i in range(111) if atlas['label'][i] != 1]
not_left = [i for i in range(111) if atlas['label'][i] != 0]
not_gb = [i for i in range(111) if atlas['label'][i] != -1]
# Build the bipartite brain graph
for idx in range(111):
if atlas['label'][idx] == 0:
adj[idx, not_left] = 1
elif atlas['label'][idx] == 1:
adj[idx, not_right] = 1
elif atlas['label'][idx] == -1:
adj[idx, not_gb] = 1
# now form the sparse adj matrix
# node id:[1, 111*871]
node_ids = np.array_split(np.arange(1, 111 * 871 + 1), 871)
adj_matrix = []
for i in range(871):
node_id = node_ids[i]
for j in range(111):
for k in range(111):
if adj[j, k]:
adj_matrix.append([node_id[j], node_id[k]])
# save sparse adj matrix
pd.DataFrame(adj_matrix).to_csv(os.path.join(path, 'ABIDE_A.txt'), index=False, header=False)
print('Done!')
###################
# Graph indicator #
###################
print('processing the graph indicator...')
indicator = np.repeat(np.arange(1, 872), 111)
pd.DataFrame(indicator).to_csv(os.path.join(path, 'ABIDE_graph_indicator.txt'), index=False, header=False)
print('Done!')
###################
# Graph labels #
###################
print('processing the graph labels...')
graph_labels = logs[['label']]
graph_labels.to_csv(os.path.join(path, 'ABIDE_graph_labels.txt'), index=False, header=False)
print('Done!')
###################
# Node Attributes #
###################
print('processing the node attributes...')
# follow the order in log.csv
files = logs['file_name']
node_att = pd.DataFrame([])
for file in files:
file_path = os.path.join(data_folder, file)
# data collected from different site
# may have different time length (rows in the data file)
# Here I simply cut them off according to
# the shortest one, 78.
ho_rois = pd.read_csv(file_path, sep='\t').iloc[:78, :].T
node_att = pd.concat([node_att, ho_rois])
node_att.to_csv(os.path.join(path, 'ABIDE_node_attributes.txt'), index=False, header=False)
print('The shape of node attributes is (%d, %d)' % node_att.shape)
print('Done!')
###################
# Node labels #
###################
print('processing the node labels...')
# Make sure all the downloaded files have the same column (brian regions) order
cols = list( | pd.read_csv(file_path, sep='\t') | pandas.read_csv |
from astropy.io import fits
from astropy.wcs import WCS
import numpy as np
import os
import tracemalloc
import pandas as pd
import matplotlib as mpl
import io
from skimage.transform import resize
import cv2
from astronomaly.base.base_dataset import Dataset
from astronomaly.base import logging_tools
mpl.use('Agg')
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas # noqa: E402, E501
import matplotlib.pyplot as plt # noqa: E402
def convert_array_to_image(arr, plot_cmap='hot'):
"""
Function to convert an array to a png image ready to be served on a web
page.
Parameters
----------
arr : np.ndarray
Input image
Returns
-------
png image object
Object ready to be passed directly to the frontend
"""
with mpl.rc_context({'backend': 'Agg'}):
fig = plt.figure(figsize=(1, 1), dpi=4 * arr.shape[1])
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
plt.imshow(arr, cmap=plot_cmap)
output = io.BytesIO()
FigureCanvas(fig).print_png(output)
plt.close(fig)
return output
def apply_transform(cutout, transform_function):
"""
Applies the transform function(s) given at initialisation to the image.
Parameters
----------
cutout : np.ndarray
Cutout of image
Returns
-------
np.ndarray
Transformed cutout
"""
if transform_function is not None:
try:
len(transform_function)
new_cutout = cutout
for f in transform_function:
new_cutout = f(new_cutout)
cutout = new_cutout
except TypeError: # Simple way to test if there's only one function
cutout = transform_function(cutout)
return cutout
class AstroImage:
def __init__(self, filenames, file_type='fits', fits_index=0, name=''):
"""
Lightweight wrapper for an astronomy image from a fits file
Parameters
----------
filenames : list of files
Filename of fits file to be read. Can be length one if there's only
one file or multiple if there are multiband images
fits_index : integer
Which HDU object in the list to work with
"""
print('Reading image data from %s...' % filenames[0])
self.filenames = filenames
self.file_type = file_type
self.metadata = {}
self.wcs = None
self.fits_index = fits_index
self.hdul_list = []
try:
for f in filenames:
hdul = fits.open(f, memmap=True)
self.hdul_list.append(hdul)
except FileNotFoundError:
raise FileNotFoundError("File", f, "not found")
# get a test sample
self.get_image_data(0, 10, 0, 10)
if len(name) == 0:
self.name = self._strip_filename()
else:
self.name = name
print('Done!')
def get_image_data(self, row_start, row_end, col_start, col_end):
"""Returns the image data from a fits HDUlist object
Parameters
----------
Returns
-------
np.array
Image data
"""
images = []
rs = row_start
re = row_end
cs = col_start
ce = col_end
for hdul in self.hdul_list:
if self.fits_index is None:
for i in range(len(hdul)):
self.fits_index = i
# snap1 = tracemalloc.take_snapshot()
dat = hdul[self.fits_index].data
# snap2 = tracemalloc.take_snapshot()
# diff = snap2.compare_to(snap1, 'lineno')
# print(diff[0].size_diff)
if dat is not None:
if len(dat.shape) > 2:
dat = dat[0][0]
image = dat[rs:re, cs:ce]
break
self.metadata = dict(hdul[self.fits_index].header)
if self.wcs is None:
self.wcs = WCS(hdul[self.fits_index].header, naxis=2)
else:
dat = hdul[self.fits_index].data
if len(dat.shape) > 2:
dat = dat[0][0]
image = dat[rs:re, cs:ce]
if len(image.shape) > 2:
image = np.squeeze(image)
images.append(image)
if len(images) > 1:
# Should now be a 3d array with multiple channels
image = np.dstack(images)
self.metadata['NAXIS3'] = image.shape[-1]
else:
image = images[0] # Was just the one image
return image
def get_image_shape(self):
"""
Efficiently returns the shape of the image.
Returns
-------
tuple
Image shape
"""
return (self.metadata['NAXIS1'], self.metadata['NAXIS2'])
def clean_up(self):
"""
Closes all open fits files so they don't remain in memory.
"""
print("Closing Fits files...")
for hdul in self.hdul_list:
hdul.close()
logging_tools.log("Fits files closed successfully.")
print("Files closed.")
def _strip_filename(self):
"""
Tiny utility function to make a nice formatted version of the image
name from the input filename string
Returns
-------
string
Formatted file name
"""
s1 = self.filenames[0].split(os.path.sep)[-1]
# extension = s1.split('.')[-1]
return s1
def get_coords(self, x, y):
"""
Returns the RA and DEC coordinates for a given set of pixels.
Parameters
----------
x : int
x pixel value
y : y
y pixel value
Returns
-------
ra, dec
Sky coordinates
"""
return self.wcs.wcs_pix2world(x, y, 0)
class ImageDataset(Dataset):
def __init__(self, fits_index=None, window_size=128, window_shift=None,
display_image_size=128, band_prefixes=[], bands_rgb={},
transform_function=None, display_transform_function=None,
plot_square=False, catalogue=None,
plot_cmap='hot', **kwargs):
"""
Read in a set of images either from a directory or from a list of file
paths (absolute). Inherits from Dataset class.
Parameters
----------
filename : str
If a single file (of any time) is to be read from, the path can be
given using this kwarg.
directory : str
A directory can be given instead of an explicit list of files. The
child class will load all appropriate files in this directory.
list_of_files : list
Instead of the above, a list of files to be loaded can be
explicitly given.
output_dir : str
The directory to save the log file and all outputs to. Defaults to
'./'
fits_index : integer, optional
If these are fits files, specifies which HDU object in the list to
work with
window_size : int, tuple or list, optional
The size of the cutout in pixels. If an integer is provided, the
cutouts will be square. Otherwise a list of
[window_size_x, window_size_y] is expected.
window_shift : int, tuple or list, optional
The size of the window shift in pixels. If the shift is less than
the window size, a sliding window is used to create cutouts. This
can be particularly useful for (for example) creating a training
set for an autoencoder. If an integer is provided, the shift will
be the same in both directions. Otherwise a list of
[window_shift_x, window_shift_y] is expected.
display_image_size : The size of the image to be displayed on the
web page. If the image is smaller than this, it will be
interpolated up to the higher number of pixels. If larger, it will
be downsampled.
band_prefixes : list
Allows you to specify a prefix for an image which corresponds to a
band identifier. This has to be a prefix and the rest of the image
name must be identical in order for Astronomaly to detect these
images should be stacked together.
bands_rgb : Dictionary
Maps the input bands (in separate folders) to rgb values to allow
false colour image plotting. Note that here you can only select
three bands to plot although you can use as many bands as you like
in band_prefixes. The dictionary should have 'r', 'g' and 'b' as
keys with the band prefixes as values.
transform_function : function or list, optional
The transformation function or list of functions that will be
applied to each cutout. The function should take an input 2d array
(the cutout) and return an output 2d array. If a list is provided,
each function is applied in the order of the list.
catalogue : pandas.DataFrame or similar
A catalogue of the positions of sources around which cutouts will
be extracted. Note that a cutout of size "window_size" will be
extracted around these positions and must be the same for all
sources.
plot_square : bool, optional
If True this will add a white border indicating the boundaries of
the original cutout when the image is displayed in the webapp.
plot_cmap : str, optional
The colormap with which to plot the image
"""
super().__init__(fits_index=fits_index, window_size=window_size,
window_shift=window_shift,
display_image_size=display_image_size,
band_prefixes=band_prefixes, bands_rgb=bands_rgb,
transform_function=transform_function,
display_transform_function=display_transform_function,
plot_square=plot_square, catalogue=catalogue,
plot_cmap=plot_cmap,
**kwargs)
self.known_file_types = ['fits', 'fits.fz', 'fits.gz',
'FITS', 'FITS.fz', 'FITS.gz']
self.data_type = 'image'
images = {}
tracemalloc.start()
if len(band_prefixes) != 0:
# Get the matching images in different bands
bands_files = {}
for p in band_prefixes:
for f in self.files:
if p in f:
start_ind = f.find(p)
end_ind = start_ind + len(p)
flname = f[end_ind:]
if flname not in bands_files.keys():
bands_files[flname] = [f]
else:
bands_files[flname] += [f]
for k in bands_files.keys():
extension = k.split('.')[-1]
# print(k, extension)
if extension == 'fz' or extension == 'gz':
extension = '.'.join(k.split('.')[-2:])
if extension in self.known_file_types:
try:
astro_img = AstroImage(bands_files[k],
file_type=extension,
fits_index=fits_index,
name=k)
images[k] = astro_img
except Exception as e:
msg = "Cannot read image " + k + "\n \
Exception is: " + (str)(e)
logging_tools.log(msg, level="ERROR")
# Also convert the rgb dictionary into an index dictionary
# corresponding
if len(bands_rgb) == 0:
self.bands_rgb = {'r': 0, 'g': 1, 'b': 2}
else:
self.bands_rgb = {}
for k in bands_rgb.keys():
band = bands_rgb[k]
ind = band_prefixes.index(band)
self.bands_rgb[k] = ind
else:
for f in self.files:
extension = f.split('.')[-1]
if extension == 'fz' or extension == 'gz':
extension = '.'.join(f.split('.')[-2:])
if extension in self.known_file_types:
try:
astro_img = AstroImage([f],
file_type=extension,
fits_index=fits_index)
images[astro_img.name] = astro_img
except Exception as e:
msg = "Cannot read image " + f + "\n \
Exception is: " + (str)(e)
logging_tools.log(msg, level="ERROR")
if len(list(images.keys())) == 0:
msg = "No images found, Astronomaly cannot proceed."
logging_tools.log(msg, level="ERROR")
raise IOError(msg)
try:
self.window_size_x = window_size[0]
self.window_size_y = window_size[1]
except TypeError:
self.window_size_x = window_size
self.window_size_y = window_size
# Allows sliding windows
if window_shift is not None:
try:
self.window_shift_x = window_shift[0]
self.window_shift_y = window_shift[1]
except TypeError:
self.window_shift_x = window_shift
self.window_shift_y = window_shift
else:
self.window_shift_x = self.window_size_x
self.window_shift_y = self.window_size_y
self.images = images
self.transform_function = transform_function
if display_transform_function is None:
self.display_transform_function = transform_function
else:
self.display_transform_function = display_transform_function
self.plot_square = plot_square
self.plot_cmap = plot_cmap
self.catalogue = catalogue
self.display_image_size = display_image_size
self.band_prefixes = band_prefixes
self.metadata = | pd.DataFrame(data=[]) | pandas.DataFrame |
import numpy as np
import pandas as pd
from numba import njit
from datetime import datetime
import pytest
from itertools import product
from sklearn.model_selection import TimeSeriesSplit
import vectorbt as vbt
from vectorbt.generic import nb
seed = 42
day_dt = np.timedelta64(86400000000000)
df = pd.DataFrame({
'a': [1, 2, 3, 4, np.nan],
'b': [np.nan, 4, 3, 2, 1],
'c': [1, 2, np.nan, 2, 1]
}, index=pd.DatetimeIndex([
datetime(2018, 1, 1),
datetime(2018, 1, 2),
datetime(2018, 1, 3),
datetime(2018, 1, 4),
datetime(2018, 1, 5)
]))
group_by = np.array(['g1', 'g1', 'g2'])
@njit
def i_or_col_pow_nb(i_or_col, x, pow):
return np.power(x, pow)
@njit
def pow_nb(x, pow):
return np.power(x, pow)
@njit
def nanmean_nb(x):
return np.nanmean(x)
@njit
def i_col_nanmean_nb(i, col, x):
return np.nanmean(x)
@njit
def i_nanmean_nb(i, x):
return np.nanmean(x)
@njit
def col_nanmean_nb(col, x):
return np.nanmean(x)
# ############# accessors.py ############# #
class TestAccessors:
def test_shuffle(self):
pd.testing.assert_series_equal(
df['a'].vbt.shuffle(seed=seed),
pd.Series(
np.array([2.0, np.nan, 3.0, 1.0, 4.0]),
index=df['a'].index,
name=df['a'].name
)
)
np.testing.assert_array_equal(
df['a'].vbt.shuffle(seed=seed).values,
nb.shuffle_1d_nb(df['a'].values, seed=seed)
)
pd.testing.assert_frame_equal(
df.vbt.shuffle(seed=seed),
pd.DataFrame(
np.array([
[2., 2., 2.],
[np.nan, 4., 1.],
[3., 3., 2.],
[1., np.nan, 1.],
[4., 1., np.nan]
]),
index=df.index,
columns=df.columns
)
)
@pytest.mark.parametrize(
"test_value",
[-1, 0., np.nan],
)
def test_fillna(self, test_value):
pd.testing.assert_series_equal(df['a'].vbt.fillna(test_value), df['a'].fillna(test_value))
pd.testing.assert_frame_equal(df.vbt.fillna(test_value), df.fillna(test_value))
@pytest.mark.parametrize(
"test_n",
[1, 2, 3, 4, 5],
)
def test_bshift(self, test_n):
pd.testing.assert_series_equal(df['a'].vbt.bshift(test_n), df['a'].shift(-test_n))
np.testing.assert_array_equal(
df['a'].vbt.bshift(test_n).values,
nb.bshift_nb(df['a'].values, test_n)
)
pd.testing.assert_frame_equal(df.vbt.bshift(test_n), df.shift(-test_n))
@pytest.mark.parametrize(
"test_n",
[1, 2, 3, 4, 5],
)
def test_fshift(self, test_n):
pd.testing.assert_series_equal(df['a'].vbt.fshift(test_n), df['a'].shift(test_n))
np.testing.assert_array_equal(
df['a'].vbt.fshift(test_n).values,
nb.fshift_1d_nb(df['a'].values, test_n)
)
pd.testing.assert_frame_equal(df.vbt.fshift(test_n), df.shift(test_n))
def test_diff(self):
pd.testing.assert_series_equal(df['a'].vbt.diff(), df['a'].diff())
np.testing.assert_array_equal(df['a'].vbt.diff().values, nb.diff_1d_nb(df['a'].values))
pd.testing.assert_frame_equal(df.vbt.diff(), df.diff())
def test_pct_change(self):
pd.testing.assert_series_equal(df['a'].vbt.pct_change(), df['a'].pct_change(fill_method=None))
np.testing.assert_array_equal(df['a'].vbt.pct_change().values, nb.pct_change_1d_nb(df['a'].values))
pd.testing.assert_frame_equal(df.vbt.pct_change(), df.pct_change(fill_method=None))
def test_ffill(self):
pd.testing.assert_series_equal(df['a'].vbt.ffill(), df['a'].ffill())
pd.testing.assert_frame_equal(df.vbt.ffill(), df.ffill())
def test_product(self):
assert df['a'].vbt.product() == df['a'].product()
np.testing.assert_array_equal(df.vbt.product(), df.product())
def test_cumsum(self):
pd.testing.assert_series_equal(df['a'].vbt.cumsum(), df['a'].cumsum())
pd.testing.assert_frame_equal(df.vbt.cumsum(), df.cumsum())
def test_cumprod(self):
pd.testing.assert_series_equal(df['a'].vbt.cumprod(), df['a'].cumprod())
pd.testing.assert_frame_equal(df.vbt.cumprod(), df.cumprod())
@pytest.mark.parametrize(
"test_window,test_minp",
list(product([1, 2, 3, 4, 5], [1, None]))
)
def test_rolling_min(self, test_window, test_minp):
if test_minp is None:
test_minp = test_window
pd.testing.assert_series_equal(
df['a'].vbt.rolling_min(test_window, minp=test_minp),
df['a'].rolling(test_window, min_periods=test_minp).min()
)
pd.testing.assert_frame_equal(
df.vbt.rolling_min(test_window, minp=test_minp),
df.rolling(test_window, min_periods=test_minp).min()
)
pd.testing.assert_frame_equal(
df.vbt.rolling_min(test_window),
df.rolling(test_window).min()
)
@pytest.mark.parametrize(
"test_window,test_minp",
list(product([1, 2, 3, 4, 5], [1, None]))
)
def test_rolling_max(self, test_window, test_minp):
if test_minp is None:
test_minp = test_window
pd.testing.assert_series_equal(
df['a'].vbt.rolling_max(test_window, minp=test_minp),
df['a'].rolling(test_window, min_periods=test_minp).max()
)
pd.testing.assert_frame_equal(
df.vbt.rolling_max(test_window, minp=test_minp),
df.rolling(test_window, min_periods=test_minp).max()
)
pd.testing.assert_frame_equal(
df.vbt.rolling_max(test_window),
df.rolling(test_window).max()
)
@pytest.mark.parametrize(
"test_window,test_minp",
list(product([1, 2, 3, 4, 5], [1, None]))
)
def test_rolling_mean(self, test_window, test_minp):
if test_minp is None:
test_minp = test_window
pd.testing.assert_series_equal(
df['a'].vbt.rolling_mean(test_window, minp=test_minp),
df['a'].rolling(test_window, min_periods=test_minp).mean()
)
pd.testing.assert_frame_equal(
df.vbt.rolling_mean(test_window, minp=test_minp),
df.rolling(test_window, min_periods=test_minp).mean()
)
pd.testing.assert_frame_equal(
df.vbt.rolling_mean(test_window),
df.rolling(test_window).mean()
)
@pytest.mark.parametrize(
"test_window,test_minp,test_ddof",
list(product([1, 2, 3, 4, 5], [1, None], [0, 1]))
)
def test_rolling_std(self, test_window, test_minp, test_ddof):
if test_minp is None:
test_minp = test_window
pd.testing.assert_series_equal(
df['a'].vbt.rolling_std(test_window, minp=test_minp, ddof=test_ddof),
df['a'].rolling(test_window, min_periods=test_minp).std(ddof=test_ddof)
)
pd.testing.assert_frame_equal(
df.vbt.rolling_std(test_window, minp=test_minp, ddof=test_ddof),
df.rolling(test_window, min_periods=test_minp).std(ddof=test_ddof)
)
pd.testing.assert_frame_equal(
df.vbt.rolling_std(test_window),
df.rolling(test_window).std()
)
@pytest.mark.parametrize(
"test_window,test_minp,test_adjust",
list(product([1, 2, 3, 4, 5], [1, None], [False, True]))
)
def test_ewm_mean(self, test_window, test_minp, test_adjust):
if test_minp is None:
test_minp = test_window
pd.testing.assert_series_equal(
df['a'].vbt.ewm_mean(test_window, minp=test_minp, adjust=test_adjust),
df['a'].ewm(span=test_window, min_periods=test_minp, adjust=test_adjust).mean()
)
pd.testing.assert_frame_equal(
df.vbt.ewm_mean(test_window, minp=test_minp, adjust=test_adjust),
df.ewm(span=test_window, min_periods=test_minp, adjust=test_adjust).mean()
)
pd.testing.assert_frame_equal(
df.vbt.ewm_mean(test_window),
df.ewm(span=test_window).mean()
)
@pytest.mark.parametrize(
"test_window,test_minp,test_adjust,test_ddof",
list(product([1, 2, 3, 4, 5], [1, None], [False, True], [0, 1]))
)
def test_ewm_std(self, test_window, test_minp, test_adjust, test_ddof):
if test_minp is None:
test_minp = test_window
pd.testing.assert_series_equal(
df['a'].vbt.ewm_std(test_window, minp=test_minp, adjust=test_adjust, ddof=test_ddof),
df['a'].ewm(span=test_window, min_periods=test_minp, adjust=test_adjust).std(ddof=test_ddof)
)
pd.testing.assert_frame_equal(
df.vbt.ewm_std(test_window, minp=test_minp, adjust=test_adjust, ddof=test_ddof),
df.ewm(span=test_window, min_periods=test_minp, adjust=test_adjust).std(ddof=test_ddof)
)
pd.testing.assert_frame_equal(
df.vbt.ewm_std(test_window),
df.ewm(span=test_window).std()
)
@pytest.mark.parametrize(
"test_minp",
[1, 3]
)
def test_expanding_min(self, test_minp):
pd.testing.assert_series_equal(
df['a'].vbt.expanding_min(minp=test_minp),
df['a'].expanding(min_periods=test_minp).min()
)
pd.testing.assert_frame_equal(
df.vbt.expanding_min(minp=test_minp),
df.expanding(min_periods=test_minp).min()
)
pd.testing.assert_frame_equal(
df.vbt.expanding_min(),
df.expanding().min()
)
@pytest.mark.parametrize(
"test_minp",
[1, 3]
)
def test_expanding_max(self, test_minp):
pd.testing.assert_series_equal(
df['a'].vbt.expanding_max(minp=test_minp),
df['a'].expanding(min_periods=test_minp).max()
)
pd.testing.assert_frame_equal(
df.vbt.expanding_max(minp=test_minp),
df.expanding(min_periods=test_minp).max()
)
pd.testing.assert_frame_equal(
df.vbt.expanding_max(),
df.expanding().max()
)
@pytest.mark.parametrize(
"test_minp",
[1, 3]
)
def test_expanding_mean(self, test_minp):
pd.testing.assert_series_equal(
df['a'].vbt.expanding_mean(minp=test_minp),
df['a'].expanding(min_periods=test_minp).mean()
)
pd.testing.assert_frame_equal(
df.vbt.expanding_mean(minp=test_minp),
df.expanding(min_periods=test_minp).mean()
)
pd.testing.assert_frame_equal(
df.vbt.expanding_mean(),
df.expanding().mean()
)
@pytest.mark.parametrize(
"test_minp,test_ddof",
list(product([1, 3], [0, 1]))
)
def test_expanding_std(self, test_minp, test_ddof):
pd.testing.assert_series_equal(
df['a'].vbt.expanding_std(minp=test_minp, ddof=test_ddof),
df['a'].expanding(min_periods=test_minp).std(ddof=test_ddof)
)
pd.testing.assert_frame_equal(
df.vbt.expanding_std(minp=test_minp, ddof=test_ddof),
df.expanding(min_periods=test_minp).std(ddof=test_ddof)
)
pd.testing.assert_frame_equal(
df.vbt.expanding_std(),
df.expanding().std()
)
def test_apply_along_axis(self):
pd.testing.assert_frame_equal(
df.vbt.apply_along_axis(i_or_col_pow_nb, 2, axis=0),
df.apply(pow_nb, args=(2,), axis=0, raw=True)
)
pd.testing.assert_frame_equal(
df.vbt.apply_along_axis(i_or_col_pow_nb, 2, axis=1),
df.apply(pow_nb, args=(2,), axis=1, raw=True)
)
@pytest.mark.parametrize(
"test_window,test_minp",
list(product([1, 2, 3, 4, 5], [1, None]))
)
def test_rolling_apply(self, test_window, test_minp):
if test_minp is None:
test_minp = test_window
pd.testing.assert_series_equal(
df['a'].vbt.rolling_apply(test_window, i_col_nanmean_nb, minp=test_minp),
df['a'].rolling(test_window, min_periods=test_minp).apply(nanmean_nb, raw=True)
)
pd.testing.assert_frame_equal(
df.vbt.rolling_apply(test_window, i_col_nanmean_nb, minp=test_minp),
df.rolling(test_window, min_periods=test_minp).apply(nanmean_nb, raw=True)
)
pd.testing.assert_frame_equal(
df.vbt.rolling_apply(test_window, i_col_nanmean_nb),
df.rolling(test_window).apply(nanmean_nb, raw=True)
)
pd.testing.assert_frame_equal(
df.vbt.rolling_apply(3, i_nanmean_nb, on_matrix=True),
pd.DataFrame(
np.array([
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[2.75, 2.75, 2.75],
[np.nan, np.nan, np.nan]
]),
index=df.index,
columns=df.columns
)
)
@pytest.mark.parametrize(
"test_minp",
[1, 3]
)
def test_expanding_apply(self, test_minp):
pd.testing.assert_series_equal(
df['a'].vbt.expanding_apply(i_col_nanmean_nb, minp=test_minp),
df['a'].expanding(min_periods=test_minp).apply(nanmean_nb, raw=True)
)
pd.testing.assert_frame_equal(
df.vbt.expanding_apply(i_col_nanmean_nb, minp=test_minp),
df.expanding(min_periods=test_minp).apply(nanmean_nb, raw=True)
)
pd.testing.assert_frame_equal(
df.vbt.expanding_apply(i_col_nanmean_nb),
df.expanding().apply(nanmean_nb, raw=True)
)
pd.testing.assert_frame_equal(
df.vbt.expanding_apply(i_nanmean_nb, on_matrix=True),
pd.DataFrame(
np.array([
[np.nan, np.nan, np.nan],
[2.0, 2.0, 2.0],
[2.2857142857142856, 2.2857142857142856, 2.2857142857142856],
[2.4, 2.4, 2.4],
[2.1666666666666665, 2.1666666666666665, 2.1666666666666665]
]),
index=df.index,
columns=df.columns
)
)
def test_groupby_apply(self):
pd.testing.assert_series_equal(
df['a'].vbt.groupby_apply(np.asarray([1, 1, 2, 2, 3]), i_col_nanmean_nb),
df['a'].groupby(np.asarray([1, 1, 2, 2, 3])).apply(lambda x: nanmean_nb(x.values))
)
pd.testing.assert_frame_equal(
df.vbt.groupby_apply(np.asarray([1, 1, 2, 2, 3]), i_col_nanmean_nb),
df.groupby(np.asarray([1, 1, 2, 2, 3])).agg({
'a': lambda x: nanmean_nb(x.values),
'b': lambda x: nanmean_nb(x.values),
'c': lambda x: nanmean_nb(x.values)
}), # any clean way to do column-wise grouping in pandas?
)
def test_groupby_apply_on_matrix(self):
pd.testing.assert_frame_equal(
df.vbt.groupby_apply(np.asarray([1, 1, 2, 2, 3]), i_nanmean_nb, on_matrix=True),
pd.DataFrame(
np.array([
[2., 2., 2.],
[2.8, 2.8, 2.8],
[1., 1., 1.]
]),
index=pd.Int64Index([1, 2, 3], dtype='int64'),
columns=df.columns
)
)
@pytest.mark.parametrize(
"test_freq",
['1h', '3d', '1w'],
)
def test_resample_apply(self, test_freq):
pd.testing.assert_series_equal(
df['a'].vbt.resample_apply(test_freq, i_col_nanmean_nb),
df['a'].resample(test_freq).apply(lambda x: nanmean_nb(x.values))
)
pd.testing.assert_frame_equal(
df.vbt.resample_apply(test_freq, i_col_nanmean_nb),
df.resample(test_freq).apply(lambda x: nanmean_nb(x.values))
)
pd.testing.assert_frame_equal(
df.vbt.resample_apply('3d', i_nanmean_nb, on_matrix=True),
pd.DataFrame(
np.array([
[2.28571429, 2.28571429, 2.28571429],
[2., 2., 2.]
]),
index=pd.DatetimeIndex(['2018-01-01', '2018-01-04'], dtype='datetime64[ns]', freq='3D'),
columns=df.columns
)
)
def test_applymap(self):
@njit
def mult_nb(i, col, x):
return x * 2
pd.testing.assert_series_equal(
df['a'].vbt.applymap(mult_nb),
df['a'].map(lambda x: x * 2)
)
pd.testing.assert_frame_equal(
df.vbt.applymap(mult_nb),
df.applymap(lambda x: x * 2)
)
def test_filter(self):
@njit
def greater_nb(i, col, x):
return x > 2
pd.testing.assert_series_equal(
df['a'].vbt.filter(greater_nb),
df['a'].map(lambda x: x if x > 2 else np.nan)
)
pd.testing.assert_frame_equal(
df.vbt.filter(greater_nb),
df.applymap(lambda x: x if x > 2 else np.nan)
)
def test_apply_and_reduce(self):
@njit
def every_nth_nb(col, a, n):
return a[::n]
@njit
def sum_nb(col, a, b):
return np.nansum(a) + b
assert df['a'].vbt.apply_and_reduce(every_nth_nb, sum_nb, apply_args=(2,), reduce_args=(3,)) == \
df['a'].iloc[::2].sum() + 3
pd.testing.assert_series_equal(
df.vbt.apply_and_reduce(every_nth_nb, sum_nb, apply_args=(2,), reduce_args=(3,)),
df.iloc[::2].sum().rename('apply_and_reduce') + 3
)
pd.testing.assert_series_equal(
df.vbt.apply_and_reduce(
every_nth_nb, sum_nb, apply_args=(2,),
reduce_args=(3,), wrap_kwargs=dict(time_units=True)),
(df.iloc[::2].sum().rename('apply_and_reduce') + 3) * day_dt
)
def test_reduce(self):
@njit
def sum_nb(col, a):
return np.nansum(a)
assert df['a'].vbt.reduce(sum_nb) == df['a'].sum()
pd.testing.assert_series_equal(
df.vbt.reduce(sum_nb),
df.sum().rename('reduce')
)
pd.testing.assert_series_equal(
df.vbt.reduce(sum_nb, wrap_kwargs=dict(time_units=True)),
df.sum().rename('reduce') * day_dt
)
pd.testing.assert_series_equal(
df.vbt.reduce(sum_nb, group_by=group_by),
pd.Series([20.0, 6.0], index=['g1', 'g2']).rename('reduce')
)
@njit
def argmax_nb(col, a):
a = a.copy()
a[np.isnan(a)] = -np.inf
return np.argmax(a)
assert df['a'].vbt.reduce(argmax_nb, to_idx=True) == df['a'].idxmax()
pd.testing.assert_series_equal(
df.vbt.reduce(argmax_nb, to_idx=True),
df.idxmax().rename('reduce')
)
pd.testing.assert_series_equal(
df.vbt.reduce(argmax_nb, to_idx=True, flatten=True, group_by=group_by),
pd.Series(['2018-01-02', '2018-01-02'], dtype='datetime64[ns]', index=['g1', 'g2']).rename('reduce')
)
@njit
def min_and_max_nb(col, a):
out = np.empty(2)
out[0] = np.nanmin(a)
out[1] = np.nanmax(a)
return out
pd.testing.assert_series_equal(
df['a'].vbt.reduce(
min_and_max_nb, to_array=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])),
pd.Series([np.nanmin(df['a']), np.nanmax(df['a'])], index=['min', 'max'], name='a')
)
pd.testing.assert_frame_equal(
df.vbt.reduce(
min_and_max_nb, to_array=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])),
df.apply(lambda x: pd.Series(np.asarray([np.nanmin(x), np.nanmax(x)]), index=['min', 'max']), axis=0)
)
pd.testing.assert_frame_equal(
df.vbt.reduce(
min_and_max_nb, to_array=True, group_by=group_by,
wrap_kwargs=dict(name_or_index=['min', 'max'])),
pd.DataFrame([[1.0, 1.0], [4.0, 2.0]], index=['min', 'max'], columns=['g1', 'g2'])
)
@njit
def argmin_and_argmax_nb(col, a):
# nanargmin and nanargmax
out = np.empty(2)
_a = a.copy()
_a[np.isnan(_a)] = np.inf
out[0] = np.argmin(_a)
_a = a.copy()
_a[np.isnan(_a)] = -np.inf
out[1] = np.argmax(_a)
return out
pd.testing.assert_series_equal(
df['a'].vbt.reduce(
argmin_and_argmax_nb, to_idx=True, to_array=True,
wrap_kwargs=dict(name_or_index=['idxmin', 'idxmax'])),
pd.Series([df['a'].idxmin(), df['a'].idxmax()], index=['idxmin', 'idxmax'], name='a')
)
pd.testing.assert_frame_equal(
df.vbt.reduce(
argmin_and_argmax_nb, to_idx=True, to_array=True,
wrap_kwargs=dict(name_or_index=['idxmin', 'idxmax'])),
df.apply(lambda x: pd.Series(np.asarray([x.idxmin(), x.idxmax()]), index=['idxmin', 'idxmax']), axis=0)
)
pd.testing.assert_frame_equal(
df.vbt.reduce(argmin_and_argmax_nb, to_idx=True, to_array=True,
flatten=True, order='C', group_by=group_by,
wrap_kwargs=dict(name_or_index=['idxmin', 'idxmax'])),
pd.DataFrame([['2018-01-01', '2018-01-01'], ['2018-01-02', '2018-01-02']],
dtype='datetime64[ns]', index=['idxmin', 'idxmax'], columns=['g1', 'g2'])
)
pd.testing.assert_frame_equal(
df.vbt.reduce(argmin_and_argmax_nb, to_idx=True, to_array=True,
flatten=True, order='F', group_by=group_by,
wrap_kwargs=dict(name_or_index=['idxmin', 'idxmax'])),
pd.DataFrame([['2018-01-01', '2018-01-01'], ['2018-01-04', '2018-01-02']],
dtype='datetime64[ns]', index=['idxmin', 'idxmax'], columns=['g1', 'g2'])
)
def test_squeeze_grouped(self):
pd.testing.assert_frame_equal(
df.vbt.squeeze_grouped(i_col_nanmean_nb, group_by=group_by),
pd.DataFrame([
[1.0, 1.0],
[3.0, 2.0],
[3.0, np.nan],
[3.0, 2.0],
[1.0, 1.0]
], index=df.index, columns=['g1', 'g2'])
)
def test_flatten_grouped(self):
pd.testing.assert_frame_equal(
df.vbt.flatten_grouped(group_by=group_by, order='C'),
pd.DataFrame([
[1.0, 1.0],
[np.nan, np.nan],
[2.0, 2.0],
[4.0, np.nan],
[3.0, np.nan],
[3.0, np.nan],
[4.0, 2.0],
[2.0, np.nan],
[np.nan, 1.0],
[1.0, np.nan]
], index=np.repeat(df.index, 2), columns=['g1', 'g2'])
)
pd.testing.assert_frame_equal(
df.vbt.flatten_grouped(group_by=group_by, order='F'),
pd.DataFrame([
[1.0, 1.0],
[2.0, 2.0],
[3.0, np.nan],
[4.0, 2.0],
[np.nan, 1.0],
[np.nan, np.nan],
[4.0, np.nan],
[3.0, np.nan],
[2.0, np.nan],
[1.0, np.nan]
], index=np.tile(df.index, 2), columns=['g1', 'g2'])
)
@pytest.mark.parametrize(
"test_name,test_func,test_func_nb",
[
('min', lambda x, **kwargs: x.min(**kwargs), nb.nanmin_nb),
('max', lambda x, **kwargs: x.max(**kwargs), nb.nanmax_nb),
('mean', lambda x, **kwargs: x.mean(**kwargs), nb.nanmean_nb),
('median', lambda x, **kwargs: x.median(**kwargs), nb.nanmedian_nb),
('std', lambda x, **kwargs: x.std(**kwargs, ddof=0), nb.nanstd_nb),
('count', lambda x, **kwargs: x.count(**kwargs), nb.nancnt_nb),
('sum', lambda x, **kwargs: x.sum(**kwargs), nb.nansum_nb)
],
)
def test_funcs(self, test_name, test_func, test_func_nb):
# numeric
assert test_func(df['a'].vbt) == test_func(df['a'])
pd.testing.assert_series_equal(
test_func(df.vbt),
test_func(df).rename(test_name)
)
pd.testing.assert_series_equal(
test_func(df.vbt, group_by=group_by),
pd.Series([
test_func(df[['a', 'b']].stack()),
test_func(df['c'])
], index=['g1', 'g2']).rename(test_name)
)
np.testing.assert_array_equal(test_func(df).values, test_func_nb(df.values))
pd.testing.assert_series_equal(
test_func(df.vbt, wrap_kwargs=dict(time_units=True)),
test_func(df).rename(test_name) * day_dt
)
# boolean
bool_ts = df == df
assert test_func(bool_ts['a'].vbt) == test_func(bool_ts['a'])
pd.testing.assert_series_equal(
test_func(bool_ts.vbt),
test_func(bool_ts).rename(test_name)
)
pd.testing.assert_series_equal(
test_func(bool_ts.vbt, wrap_kwargs=dict(time_units=True)),
test_func(bool_ts).rename(test_name) * day_dt
)
@pytest.mark.parametrize(
"test_name,test_func",
[
('idxmin', lambda x, **kwargs: x.idxmin(**kwargs)),
('idxmax', lambda x, **kwargs: x.idxmax(**kwargs))
],
)
def test_arg_funcs(self, test_name, test_func):
assert test_func(df['a'].vbt) == test_func(df['a'])
pd.testing.assert_series_equal(
test_func(df.vbt),
test_func(df).rename(test_name)
)
pd.testing.assert_series_equal(
test_func(df.vbt, group_by=group_by),
pd.Series([
test_func(df[['a', 'b']].stack())[0],
test_func(df['c'])
], index=['g1', 'g2'], dtype='datetime64[ns]').rename(test_name)
)
def test_describe(self):
pd.testing.assert_series_equal(
df['a'].vbt.describe(),
df['a'].describe()
)
pd.testing.assert_frame_equal(
df.vbt.describe(percentiles=None),
df.describe(percentiles=None)
)
pd.testing.assert_frame_equal(
df.vbt.describe(percentiles=[]),
df.describe(percentiles=[])
)
test_against = df.describe(percentiles=np.arange(0, 1, 0.1))
pd.testing.assert_frame_equal(
df.vbt.describe(percentiles=np.arange(0, 1, 0.1)),
test_against
)
pd.testing.assert_frame_equal(
df.vbt.describe(percentiles=np.arange(0, 1, 0.1), group_by=group_by),
pd.DataFrame({
'g1': df[['a', 'b']].stack().describe(percentiles=np.arange(0, 1, 0.1)).values,
'g2': df['c'].describe(percentiles=np.arange(0, 1, 0.1)).values
}, index=test_against.index)
)
def test_drawdown(self):
pd.testing.assert_series_equal(
df['a'].vbt.drawdown(),
df['a'] / df['a'].expanding().max() - 1
)
pd.testing.assert_frame_equal(
df.vbt.drawdown(),
df / df.expanding().max() - 1
)
def test_drawdowns(self):
assert type(df['a'].vbt.drawdowns) is vbt.Drawdowns
assert df['a'].vbt.drawdowns.wrapper.freq == df['a'].vbt.wrapper.freq
assert df['a'].vbt.drawdowns.wrapper.ndim == df['a'].ndim
assert df.vbt.drawdowns.wrapper.ndim == df.ndim
def test_to_mapped_array(self):
np.testing.assert_array_equal(
df.vbt.to_mapped_array().values,
np.array([1., 2., 3., 4., 4., 3., 2., 1., 1., 2., 2., 1.])
)
np.testing.assert_array_equal(
df.vbt.to_mapped_array().col_arr,
np.array([0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2])
)
np.testing.assert_array_equal(
df.vbt.to_mapped_array().idx_arr,
np.array([0, 1, 2, 3, 1, 2, 3, 4, 0, 1, 3, 4])
)
np.testing.assert_array_equal(
df.vbt.to_mapped_array(dropna=False).values,
np.array([1., 2., 3., 4., np.nan, np.nan, 4., 3., 2., 1., 1., 2., np.nan, 2., 1.])
)
np.testing.assert_array_equal(
df.vbt.to_mapped_array(dropna=False).col_arr,
np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2])
)
np.testing.assert_array_equal(
df.vbt.to_mapped_array(dropna=False).idx_arr,
np.array([0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
)
def test_zscore(self):
pd.testing.assert_series_equal(
df['a'].vbt.zscore(),
(df['a'] - df['a'].mean()) / df['a'].std(ddof=0)
)
pd.testing.assert_frame_equal(
df.vbt.zscore(),
(df - df.mean()) / df.std(ddof=0)
)
def test_split(self):
splitter = TimeSeriesSplit(n_splits=2)
(train_df, train_indexes), (test_df, test_indexes) = df['a'].vbt.split(splitter)
pd.testing.assert_frame_equal(
train_df,
pd.DataFrame(
np.array([
[1.0, 1.0],
[2.0, 2.0],
[3.0, 3.0],
[np.nan, 4.0]
]),
index=pd.RangeIndex(start=0, stop=4, step=1),
columns=pd.Int64Index([0, 1], dtype='int64', name='split_idx')
)
)
target = [
pd.DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03'],
dtype='datetime64[ns]', name='split_0', freq=None),
pd.DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04'],
dtype='datetime64[ns]', name='split_1', freq=None)
]
for i in range(len(target)):
pd.testing.assert_index_equal(
train_indexes[i],
target[i]
)
pd.testing.assert_frame_equal(
test_df,
pd.DataFrame(
np.array([
[4.0, np.nan]
]),
index=pd.RangeIndex(start=0, stop=1, step=1),
columns=pd.Int64Index([0, 1], dtype='int64', name='split_idx')
)
)
target = [
pd.DatetimeIndex(['2018-01-04'], dtype='datetime64[ns]', name='split_0', freq=None),
pd.DatetimeIndex(['2018-01-05'], dtype='datetime64[ns]', name='split_1', freq=None)
]
for i in range(len(target)):
pd.testing.assert_index_equal(
test_indexes[i],
target[i]
)
(train_df, train_indexes), (test_df, test_indexes) = df.vbt.split(splitter)
pd.testing.assert_frame_equal(
train_df,
pd.DataFrame(
np.array([
[1.0, np.nan, 1.0, 1.0, np.nan, 1.0],
[2.0, 4.0, 2.0, 2.0, 4.0, 2.0],
[3.0, 3.0, np.nan, 3.0, 3.0, np.nan],
[np.nan, np.nan, np.nan, 4.0, 2.0, 2.0]
]),
index=pd.RangeIndex(start=0, stop=4, step=1),
columns=pd.MultiIndex.from_tuples([
(0, 'a'),
(0, 'b'),
(0, 'c'),
(1, 'a'),
(1, 'b'),
(1, 'c')
], names=['split_idx', None])
)
)
target = [
pd.DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03'],
dtype='datetime64[ns]', name='split_0', freq=None),
pd.DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04'],
dtype='datetime64[ns]', name='split_1', freq=None)
]
for i in range(len(target)):
pd.testing.assert_index_equal(
train_indexes[i],
target[i]
)
pd.testing.assert_frame_equal(
test_df,
pd.DataFrame(
np.array([
[4.0, 2.0, 2.0, np.nan, 1.0, 1.0]
]),
index=pd.RangeIndex(start=0, stop=1, step=1),
columns=pd.MultiIndex.from_tuples([
(0, 'a'),
(0, 'b'),
(0, 'c'),
(1, 'a'),
(1, 'b'),
(1, 'c')
], names=['split_idx', None])
)
)
target = [
pd.DatetimeIndex(['2018-01-04'], dtype='datetime64[ns]', name='split_0', freq=None),
pd.DatetimeIndex(['2018-01-05'], dtype='datetime64[ns]', name='split_1', freq=None)
]
for i in range(len(target)):
pd.testing.assert_index_equal(
test_indexes[i],
target[i]
)
def test_range_split(self):
pd.testing.assert_frame_equal(
df['a'].vbt.range_split(n=2)[0],
pd.DataFrame(
np.array([
[1., 4.],
[2., np.nan]
]),
index=pd.RangeIndex(start=0, stop=2, step=1),
columns=pd.Int64Index([0, 1], dtype='int64', name='split_idx')
)
)
target = [
pd.DatetimeIndex(['2018-01-01', '2018-01-02'], dtype='datetime64[ns]', name='split_0', freq=None),
pd.DatetimeIndex(['2018-01-04', '2018-01-05'], dtype='datetime64[ns]', name='split_1', freq=None)
]
for i in range(len(target)):
pd.testing.assert_index_equal(
df['a'].vbt.range_split(n=2)[1][i],
target[i]
)
pd.testing.assert_frame_equal(
df['a'].vbt.range_split(range_len=2)[0],
pd.DataFrame(
np.array([
[1., 2., 3., 4.],
[2., 3., 4., np.nan]
]),
index=pd.RangeIndex(start=0, stop=2, step=1),
columns=pd.Int64Index([0, 1, 2, 3], dtype='int64', name='split_idx')
)
)
target = [
pd.DatetimeIndex(['2018-01-01', '2018-01-02'], dtype='datetime64[ns]', name='split_0', freq=None),
pd.DatetimeIndex(['2018-01-02', '2018-01-03'], dtype='datetime64[ns]', name='split_1', freq=None),
pd.DatetimeIndex(['2018-01-03', '2018-01-04'], dtype='datetime64[ns]', name='split_2', freq=None),
pd.DatetimeIndex(['2018-01-04', '2018-01-05'], dtype='datetime64[ns]', name='split_3', freq=None)
]
for i in range(len(target)):
pd.testing.assert_index_equal(
df['a'].vbt.range_split(range_len=2)[1][i],
target[i]
)
pd.testing.assert_frame_equal(
df['a'].vbt.range_split(range_len=2, n=3)[0],
pd.DataFrame(
np.array([
[1., 3., 4.],
[2., 4., np.nan]
]),
index=pd.RangeIndex(start=0, stop=2, step=1),
columns=pd.Int64Index([0, 1, 2], dtype='int64', name='split_idx')
)
)
target = [
pd.DatetimeIndex(['2018-01-01', '2018-01-02'], dtype='datetime64[ns]', name='split_0', freq=None),
pd.DatetimeIndex(['2018-01-03', '2018-01-04'], dtype='datetime64[ns]', name='split_1', freq=None),
pd.DatetimeIndex(['2018-01-04', '2018-01-05'], dtype='datetime64[ns]', name='split_2', freq=None)
]
for i in range(len(target)):
pd.testing.assert_index_equal(
df['a'].vbt.range_split(range_len=2, n=3)[1][i],
target[i]
)
pd.testing.assert_frame_equal(
df['a'].vbt.range_split(range_len=3, n=2)[0],
pd.DataFrame(
np.array([
[1., 3.],
[2., 4.],
[3., np.nan]
]),
index=pd.RangeIndex(start=0, stop=3, step=1),
columns=pd.Int64Index([0, 1], dtype='int64', name='split_idx')
)
)
target = [
pd.DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03'],
dtype='datetime64[ns]', name='split_0', freq=None),
pd.DatetimeIndex(['2018-01-03', '2018-01-04', '2018-01-05'],
dtype='datetime64[ns]', name='split_1', freq=None)
]
for i in range(len(target)):
pd.testing.assert_index_equal(
df['a'].vbt.range_split(range_len=3, n=2)[1][i],
target[i]
)
pd.testing.assert_frame_equal(
df.vbt.range_split(n=2)[0],
pd.DataFrame(
np.array([
[1.0, np.nan, 1.0, 4.0, 2.0, 2.0],
[2.0, 4.0, 2.0, np.nan, 1.0, 1.0]
]),
index=pd.RangeIndex(start=0, stop=2, step=1),
columns=pd.MultiIndex.from_arrays([
pd.Int64Index([0, 0, 0, 1, 1, 1], dtype='int64', name='split_idx'),
pd.Index(['a', 'b', 'c', 'a', 'b', 'c'], dtype='object')
])
)
)
target = [
pd.DatetimeIndex(['2018-01-01', '2018-01-02'], dtype='datetime64[ns]', name='split_0', freq=None),
pd.DatetimeIndex(['2018-01-04', '2018-01-05'], dtype='datetime64[ns]', name='split_1', freq=None)
]
for i in range(len(target)):
pd.testing.assert_index_equal(
df.vbt.range_split(n=2)[1][i],
target[i]
)
pd.testing.assert_frame_equal(
df.vbt.range_split(start_idxs=[0, 1], end_idxs=[2, 3])[0],
pd.DataFrame(
np.array([
[1.0, np.nan, 1.0, 2.0, 4.0, 2.0],
[2.0, 4.0, 2.0, 3.0, 3.0, np.nan],
[3.0, 3.0, np.nan, 4.0, 2.0, 2.0]
]),
index=pd.RangeIndex(start=0, stop=3, step=1),
columns=pd.MultiIndex.from_arrays([
pd.Int64Index([0, 0, 0, 1, 1, 1], dtype='int64', name='split_idx'),
pd.Index(['a', 'b', 'c', 'a', 'b', 'c'], dtype='object')
])
)
)
target = [
pd.DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03'],
dtype='datetime64[ns]', name='split_0', freq=None),
pd.DatetimeIndex(['2018-01-02', '2018-01-03', '2018-01-04'],
dtype='datetime64[ns]', name='split_1', freq=None)
]
for i in range(len(target)):
pd.testing.assert_index_equal(
df.vbt.range_split(start_idxs=[0, 1], end_idxs=[2, 3])[1][i],
target[i]
)
pd.testing.assert_frame_equal(
df.vbt.range_split(start_idxs=df.index[[0, 1]], end_idxs=df.index[[2, 3]])[0],
pd.DataFrame(
np.array([
[1.0, np.nan, 1.0, 2.0, 4.0, 2.0],
[2.0, 4.0, 2.0, 3.0, 3.0, np.nan],
[3.0, 3.0, np.nan, 4.0, 2.0, 2.0]
]),
index=pd.RangeIndex(start=0, stop=3, step=1),
columns=pd.MultiIndex.from_arrays([
pd.Int64Index([0, 0, 0, 1, 1, 1], dtype='int64', name='split_idx'),
pd.Index(['a', 'b', 'c', 'a', 'b', 'c'], dtype='object')
])
)
)
target = [
pd.DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03'],
dtype='datetime64[ns]', name='split_0', freq=None),
pd.DatetimeIndex(['2018-01-02', '2018-01-03', '2018-01-04'],
dtype='datetime64[ns]', name='split_1', freq=None)
]
for i in range(len(target)):
pd.testing.assert_index_equal(
df.vbt.range_split(start_idxs=df.index[[0, 1]], end_idxs=df.index[[2, 3]])[1][i],
target[i]
)
pd.testing.assert_frame_equal(
df.vbt.range_split(start_idxs=df.index[[0]], end_idxs=df.index[[2, 3]])[0],
pd.DataFrame(
np.array([
[1.0, np.nan, 1.0, 1.0, np.nan, 1.0],
[2.0, 4.0, 2.0, 2.0, 4.0, 2.0],
[3.0, 3.0, np.nan, 3.0, 3.0, np.nan],
[np.nan, np.nan, np.nan, 4.0, 2.0, 2.0]
]),
index=pd.RangeIndex(start=0, stop=4, step=1),
columns=pd.MultiIndex.from_arrays([
pd.Int64Index([0, 0, 0, 1, 1, 1], dtype='int64', name='split_idx'),
pd.Index(['a', 'b', 'c', 'a', 'b', 'c'], dtype='object')
])
)
)
target = [
pd.DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03'],
dtype='datetime64[ns]', name='split_0', freq=None),
pd.DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04'],
dtype='datetime64[ns]', name='split_1', freq=None)
]
for i in range(len(target)):
pd.testing.assert_index_equal(
df.vbt.range_split(start_idxs=df.index[[0]], end_idxs=df.index[[2, 3]])[1][i],
target[i]
)
with pytest.raises(Exception) as e_info:
df.vbt.range_split()
with pytest.raises(Exception) as e_info:
df.vbt.range_split(start_idxs=[0, 1])
with pytest.raises(Exception) as e_info:
df.vbt.range_split(end_idxs=[2, 4])
with pytest.raises(Exception) as e_info:
df.vbt.range_split(min_len=10)
with pytest.raises(Exception) as e_info:
df.vbt.range_split(n=10)
def test_rolling_split(self):
(df1, indexes1), (df2, indexes2), (df3, indexes3) = df['a'].vbt.rolling_split(
window_len=4, set_lens=(1, 1), left_to_right=False)
pd.testing.assert_frame_equal(
df1,
pd.DataFrame(
np.array([
[1.0, 2.0],
[2.0, 3.0]
]),
index=pd.RangeIndex(start=0, stop=2, step=1),
columns=pd.Int64Index([0, 1], dtype='int64', name='split_idx')
)
)
target = [
pd.DatetimeIndex(['2018-01-01', '2018-01-02'], dtype='datetime64[ns]', name='split_0', freq=None),
pd.DatetimeIndex(['2018-01-02', '2018-01-03'], dtype='datetime64[ns]', name='split_1', freq=None)
]
for i in range(len(target)):
pd.testing.assert_index_equal(
indexes1[i],
target[i]
)
pd.testing.assert_frame_equal(
df2,
pd.DataFrame(
np.array([
[3.0, 4.0]
]),
index=pd.RangeIndex(start=0, stop=1, step=1),
columns=pd.Int64Index([0, 1], dtype='int64', name='split_idx')
)
)
target = [
pd.DatetimeIndex(['2018-01-03'], dtype='datetime64[ns]', name='split_0', freq=None),
pd.DatetimeIndex(['2018-01-04'], dtype='datetime64[ns]', name='split_1', freq=None)
]
for i in range(len(target)):
pd.testing.assert_index_equal(
indexes2[i],
target[i]
)
pd.testing.assert_frame_equal(
df3,
pd.DataFrame(
np.array([
[4.0, np.nan]
]),
index=pd.RangeIndex(start=0, stop=1, step=1),
columns=pd.Int64Index([0, 1], dtype='int64', name='split_idx')
)
)
target = [
pd.DatetimeIndex(['2018-01-04'], dtype='datetime64[ns]', name='split_0', freq=None),
pd.DatetimeIndex(['2018-01-05'], dtype='datetime64[ns]', name='split_1', freq=None)
]
for i in range(len(target)):
pd.testing.assert_index_equal(
indexes3[i],
target[i]
)
(df1, indexes1), (df2, indexes2), (df3, indexes3) = df['a'].vbt.rolling_split(
window_len=4, set_lens=(1, 1), left_to_right=True)
pd.testing.assert_frame_equal(
df1,
pd.DataFrame(
np.array([
[1.0, 2.0]
]),
index=pd.RangeIndex(start=0, stop=1, step=1),
columns=pd.Int64Index([0, 1], dtype='int64', name='split_idx')
)
)
target = [
pd.DatetimeIndex(['2018-01-01'], dtype='datetime64[ns]', name='split_0', freq=None),
pd.DatetimeIndex(['2018-01-02'], dtype='datetime64[ns]', name='split_1', freq=None)
]
for i in range(len(target)):
pd.testing.assert_index_equal(
indexes1[i],
target[i]
)
pd.testing.assert_frame_equal(
df2,
pd.DataFrame(
np.array([
[2.0, 3.0]
]),
index=pd.RangeIndex(start=0, stop=1, step=1),
columns=pd.Int64Index([0, 1], dtype='int64', name='split_idx')
)
)
target = [
pd.DatetimeIndex(['2018-01-02'], dtype='datetime64[ns]', name='split_0', freq=None),
pd.DatetimeIndex(['2018-01-03'], dtype='datetime64[ns]', name='split_1', freq=None)
]
for i in range(len(target)):
pd.testing.assert_index_equal(
indexes2[i],
target[i]
)
pd.testing.assert_frame_equal(
df3,
pd.DataFrame(
np.array([
[3.0, 4.0],
[4.0, np.nan]
]),
index=pd.RangeIndex(start=0, stop=2, step=1),
columns=pd.Int64Index([0, 1], dtype='int64', name='split_idx')
)
)
target = [
pd.DatetimeIndex(['2018-01-03', '2018-01-04'], dtype='datetime64[ns]', name='split_0', freq=None),
pd.DatetimeIndex(['2018-01-04', '2018-01-05'], dtype='datetime64[ns]', name='split_1', freq=None)
]
for i in range(len(target)):
pd.testing.assert_index_equal(
indexes3[i],
target[i]
)
(df1, indexes1), (df2, indexes2), (df3, indexes3) = df['a'].vbt.rolling_split(
window_len=4, set_lens=(0.25, 0.25), left_to_right=[False, True])
pd.testing.assert_frame_equal(
df1,
pd.DataFrame(
np.array([
[1.0, 2.0],
[2.0, np.nan]
]),
index=pd.RangeIndex(start=0, stop=2, step=1),
columns=pd.Int64Index([0, 1], dtype='int64', name='split_idx')
)
)
target = [
pd.DatetimeIndex(['2018-01-01', '2018-01-02'], dtype='datetime64[ns]', name='split_0', freq=None),
pd.DatetimeIndex(['2018-01-02'], dtype='datetime64[ns]', name='split_1', freq=None)
]
for i in range(len(target)):
pd.testing.assert_index_equal(
indexes1[i],
target[i]
)
pd.testing.assert_frame_equal(
df2,
pd.DataFrame(
np.array([
[3.0, 3.0]
]),
index=pd.RangeIndex(start=0, stop=1, step=1),
columns=pd.Int64Index([0, 1], dtype='int64', name='split_idx')
)
)
target = [
pd.DatetimeIndex(['2018-01-03'], dtype='datetime64[ns]', name='split_0', freq=None),
pd.DatetimeIndex(['2018-01-03'], dtype='datetime64[ns]', name='split_1', freq=None)
]
for i in range(len(target)):
pd.testing.assert_index_equal(
indexes2[i],
target[i]
)
pd.testing.assert_frame_equal(
df3,
pd.DataFrame(
np.array([
[4.0, 4.0],
[np.nan, np.nan]
]),
index=pd.RangeIndex(start=0, stop=2, step=1),
columns=pd.Int64Index([0, 1], dtype='int64', name='split_idx')
)
)
target = [
pd.DatetimeIndex(['2018-01-04'], dtype='datetime64[ns]', name='split_0', freq=None),
pd.DatetimeIndex(['2018-01-04', '2018-01-05'], dtype='datetime64[ns]', name='split_1', freq=None)
]
for i in range(len(target)):
pd.testing.assert_index_equal(
indexes3[i],
target[i]
)
df1, indexes1 = df['a'].vbt.rolling_split(window_len=2, n=2)
pd.testing.assert_frame_equal(
df1,
pd.DataFrame(
np.array([
[1.0, 4.0],
[2.0, np.nan]
]),
index=pd.RangeIndex(start=0, stop=2, step=1),
columns=pd.Int64Index([0, 1], dtype='int64', name='split_idx')
)
)
target = [
| pd.DatetimeIndex(['2018-01-01', '2018-01-02'], dtype='datetime64[ns]', name='split_0', freq=None) | pandas.DatetimeIndex |
from lifelines.datasets import load_waltons
from lifelines import KaplanMeierFitter
from lifelines.utils import median_survival_times
from lifelines.statistics import logrank_test,multivariate_logrank_test
from lifelines import CoxPHFitter
#from lifelines.plotting import add_at_risk_counts
from my_plotting import add_at_risk_counts
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import sys
from matplotlib.backends.backend_pdf import PdfPages
#from sksurv.linear_model import CoxPHSurvivalAnalysis as cpa
#sys.exit()
#%%
cluster_result = pd.read_csv("output/dna_rna_methy_cluster_result.csv")
data = pd.read_csv(r"dataset/Survival_SupplementalTable_S1_20171025_xena_sp",sep="\t")
#cluster_result["sample"] = cluster_result["sample_id"].apply(lambda x:x[:15])
df = pd.merge(cluster_result,data[["sample","PFI.time","PFI"]],how="inner",left_on="sample_id",right_on="sample")
df = df.loc[df["PFI.time"].dropna().index]
#df_brca = df[df["true_label"] == "KIRP"]
#%%
#sys.exit()
cluster_col = "hierachical_separate_clstr"
df_p = | pd.DataFrame() | pandas.DataFrame |
import logging
import yaml
import os
import docker
import re
import sys
from tempfile import NamedTemporaryFile
import numpy as np
import pandas as pd
from pandas.errors import EmptyDataError
from docker.errors import NotFound, APIError
from io import StringIO
# from pynomer.client import NomerClient
# from ..core import URIMapper, URIManager, TaxId
from ..util.taxo_helper import *
pd.options.mode.chained_assignment = None
"""
https://github.com/globalbioticinteractions/globalbioticinteractions/wiki/Taxonomy-Matching
"""
class NoValidColumnException(Exception):
pass
class ConfigurationError(Exception):
pass
def create_mapping(df):
"""
Return a dict that keeps track of duplicated items in a DataFrame
"""
return (
df.reset_index()
.groupby(df.columns.tolist(), dropna=False)["index"]
.agg(["first", tuple])
.set_index("first")["tuple"]
.to_dict()
)
class TaxonomicEntityValidator:
def __init__(self, config):
self.logger = logging.getLogger(__name__)
self.config = config
self.taxo_to_matcher = {
"GBIF": "gbif",
"NCBI": "ncbi",
"IF": "indexfungorum",
"SILVA": "ncbi",
}
self.default_name_matcher = "globalnames"
self.nomer = NomerHelper()
def validate(self, df):
"""For a subset of columns (e.g. consumers and resources),
validate taxonomic ids and/or names against a source taxonomy.
Returns the input DataFrame with new columns containing the valid
ids and names for each query column.
"""
for column_config in self.config.columns:
# Set default values
assert column_config.uri_column != None
column_config.id_column = (
column_config.id_column if "id_column" in column_config else None
)
column_config.name_column = (
column_config.name_column if "name_column" in column_config else None
)
column_config.source_taxonomy = (
column_config.source_taxonomy
if "source_taxonomy" in column_config
else None
)
if not (column_config.id_column or column_config.name_column):
raise NoValidColumnException(
"You should specify at least one valid column containing the taxon names or ids."
)
# Map taxa to target taxonomy
self.logger.info(
f"Validate {df.shape[0]} taxa from columns ({column_config.id_column},{column_config.name_column})"
)
valid_df = self.validate_columns(
df,
id_column=column_config.id_column,
name_column=column_config.name_column,
source_taxonomy=column_config.source_taxonomy,
)
df[column_config.uri_column] = valid_df["iri"]
df[column_config.valid_name_column] = valid_df["valid_name"]
df[column_config.valid_id_column] = valid_df["valid_id"]
return df
def validate_columns(
self, df, id_column=None, name_column=None, source_taxonomy=None
):
"""
Taxonomic entity validation consists in checking that the pair (taxid, name)
is valid in a given taxonomy (both taxid and name are optional, but at least
one of them must exist). This function adds a column "valid_id" and a column
"valid_name" to the input DataFrame. If both values are NaN, the corresponding
entity is considered invalid.
"""
def add_prefix(col, src_taxo):
"""
Add the source taxonomy name as a prefix to all taxids in a column
"""
def return_prefixed(id, src_taxo):
if (
pd.notnull(id) and len(str(id).split(":")) == 2
): # .startswith(src_taxo + ":"):
return (
id
if not pd.isna(
pd.to_numeric(str(id).split(":")[-1], errors="coerce")
)
else np.nan
)
elif pd.notnull(id) and pd.isna(pd.to_numeric(id, errors="coerce")):
return np.nan
elif pd.notnull(id):
return f"{src_taxo}:{id}"
else:
return None
return col.map(lambda id: return_prefixed(id, src_taxo))
assert id_column or name_column
subset = [col for col in [id_column, name_column] if col]
sub_df = df[subset].astype(pd.StringDtype(), errors="ignore")
mapping = create_mapping(
sub_df
) # Mapping from items in drop_df to all duplicates in sub_df
drop_df = sub_df.drop_duplicates(subset=subset).replace("", np.nan)
id_df = None
name_df = None
if id_column:
assert source_taxonomy
if source_taxonomy in self.taxo_to_matcher:
drop_df[id_column] = add_prefix(drop_df[id_column], source_taxonomy)
id_df = drop_df.dropna(subset=[id_column])
if name_column:
drop_df["canonical_name"] = drop_df[name_column]
names = drop_df["canonical_name"].dropna().to_list()
norm_names = self.normalize_names(names)
drop_df.replace({"canonical_name": norm_names}, inplace=True)
if id_df is not None:
name_df = drop_df.loc[~drop_df.index.isin(id_df.index)]
else:
name_df = drop_df.dropna(subset=["canonical_name"])
sub_df["valid_id"] = None
sub_df["valid_name"] = None
sub_df["iri"] = None
if id_df is not None and not id_df.empty:
valid_ids = self.validate_taxids(
id_df, id_column, name_column, source_taxonomy
)
valid_ids = valid_ids.groupby(
["queryId"], dropna=False
) # Get all matches for each id
for index, row in drop_df.iterrows():
id = row[id_column]
if pd.notnull(id) and id in valid_ids.groups:
valid = valid_ids.get_group(id).iloc[0]
for i in mapping[index]:
sub_df.at[i, "valid_id"] = valid["matchId"]
sub_df.at[i, "valid_name"] = valid["matchName"]
sub_df.at[i, "iri"] = valid["iri"]
if name_df is not None and not name_df.empty:
valid_names = self.validate_names(name_df, "canonical_name")
valid_names = valid_names.groupby(
["queryName"], dropna=False
) # Get all matches for each name
for index, row in drop_df.iterrows():
name = row["canonical_name"] # name_column]
if pd.notnull(name) and name in valid_names.groups:
valid = valid_names.get_group(name).iloc[0]
for i in mapping[index]:
sub_df.at[i, "valid_id"] = valid["matchId"]
sub_df.at[i, "valid_name"] = valid["matchName"]
sub_df.at[i, "iri"] = valid["iri"]
if source_taxonomy == "SILVA":
self.logger.debug("SILVA : all names and ids are valid by default")
for index, row in drop_df.iterrows():
for i in mapping[index]:
if id_column:
sub_df.at[i, "valid_id"] = (
row[id_column]
if row[id_column].startswith("SILVA:")
else sub_df.at[i, "valid_id"]
)
taxid = row[id_column].split(":")[-1]
sub_df.at[i, "iri"] = (
f"https://www.arb-silva.de/{taxid}"
if row[id_column].startswith("SILVA:")
else sub_df.at[i, "iri"]
)
if name_column:
sub_df.at[i, "valid_name"] = (
row[name_column]
if sub_df.at[i, "valid_id"].startswith("SILVA:")
else sub_df.at[i, "valid_name"]
)
self.logger.debug(sub_df[["valid_id", "valid_name", "iri"]])
# Get some statistics
df_drop = sub_df.drop_duplicates(subset=subset)
nb_unique = df_drop.shape[0]
nb_valid = df_drop.dropna(subset=["valid_id"]).shape[0]
self.logger.info(f"Found {nb_valid}/{nb_unique} valid taxonomic entities")
return sub_df
def normalize_names(self, names):
"""
Given a list of taxonomic names, return the corresponding canonical forms
"""
f_temp = NamedTemporaryFile(delete=True) # False)
self.logger.debug(f"Write names to {f_temp.name} for validation using gnparser")
names_str = "\n".join([name.replace("\n", " ") for name in names])
f_temp.write(names_str.encode())
f_temp.read() # I don't know why but it is needed or sometimes the file appears empty when reading
canonical_names = get_canonical_names(f_temp.name)
f_temp.close()
canonical_names = canonical_names["CanonicalSimple"].to_list()
assert len(names) == len(canonical_names)
return {names[i]: canonical_names[i] for i in range(len(names))}
def validate_names(self, df, name_column):
"""
Validate all taxonomic names in a DataFrame column
"""
names = df[name_column].to_list()
self.logger.debug(f"Validate names {names} using {self.default_name_matcher}")
query = self.nomer.df_to_query(
df=df,
name_column=name_column,
)
matching = self.nomer.ask_nomer(query, matcher=self.default_name_matcher)
if not matching.empty:
mask = matching["matchType"].isin(
["SAME_AS", "SYNONYM_OF", "HAS_ACCEPTED_NAME"] # , "SIMILAR_TO"]
)
return matching[mask]
return matching
def validate_taxids(self, df, id_column, name_column=None, source_taxonomy=None):
"""
Validate all taxonomic identifiers in a DataFrame column against a given taxonomy
"""
matcher = self.taxo_to_matcher[source_taxonomy]
taxids = df[id_column].to_list()
self.logger.debug(f"Validate taxids {taxids} using {matcher}")
query = self.nomer.df_to_query(
df=df,
id_column=id_column,
name_column=name_column,
)
matching = self.nomer.ask_nomer(query, matcher=matcher)
if not matching.empty:
mask = matching["matchType"].isin(
["SAME_AS", "SYNONYM_OF", "HAS_ACCEPTED_NAME"] # , "SIMILAR_TO"]
)
return matching[mask]
return matching
def test_validator():
df = pd.read_csv(
"/home/leguilln/workspace/KNOWLEDGE_INTEGRATION/inteGraph/taxo_valid_test.csv",
sep=";",
keep_default_na=False,
)
validator = TaxonomicEntityValidator()
df = validator.validate(
df, id_column="consumer_key", name_column="consumer_scientificName"
)
df.to_csv(
"/home/leguilln/workspace/KNOWLEDGE_INTEGRATION/inteGraph/taxo_valid_result.csv",
sep=";",
)
class TaxonomicEntityMapper:
def __init__(self, config):
self.logger = logging.getLogger(__name__)
self.config = config
self.default_matcher = "wikidata-web"
self.ncbi_matcher = "ncbi"
self.target_taxonomy = "NCBI"
self.keep_taxo = ["NCBI", "GBIF", "IF"]
self.nomer = NomerHelper()
def df_to_triples(self, df):
from rdflib import Graph, URIRef, Literal
from rdflib.namespace import RDFS, OWL
g = Graph()
for index, row in df.iterrows():
if row["queryIRI"] != row["iri"]:
g.add((URIRef(row["queryIRI"]), OWL.sameAs, URIRef(row["iri"])))
g.add((URIRef(row["queryIRI"]), RDFS.label, Literal(row["queryName"])))
if row["matchId"].split(":")[0].startswith("NCBI"):
taxid = row["matchId"].split(":")[-1]
g.add(
(
URIRef(row["queryIRI"]),
OWL.sameAs,
URIRef(f"http://purl.obolibrary.org/obo/NCBITaxon_{taxid}"),
)
)
g.add((URIRef(row["iri"]), RDFS.label, Literal(row["matchName"])))
if pd.notna(row["matchRank"]):
g.add(
(
URIRef(row["iri"]),
URIRef("http://purl.obolibrary.org/obo/ncbitaxon#has_rank"),
Literal(row["matchRank"]),
)
)
return g
def map(self, df):
"""For a subset of columns (e.g. consumers and resources),
try to map taxon ids and/or names to IRIs in a target taxonomy
using a TaxonomicEntityMapper.
Returns the input DataFrame with new columns containing the IRIs for each
query column.
"""
taxa_dfs = []
for column_config in self.config.columns:
assert column_config.uri_column != None
# Set default values
column_config.id_column = (
column_config.id_column if "id_column" in column_config else None
)
column_config.name_column = (
column_config.name_column if "name_column" in column_config else None
)
if not (column_config.id_column or column_config.name_column):
raise NoValidColumnException(
"You should specify at least one valid column containing the taxon names or ids."
)
# Map taxa to target taxonomy
self.logger.info(
f"Map {df.shape[0]} taxons from columns ({column_config.id_column},{column_config.name_column}) to target taxo {self.target_taxonomy}"
)
id_to_iri = (
df[[column_config.id_column, column_config.uri_column]]
.astype(pd.StringDtype(), errors="ignore")
.drop_duplicates()
)
id_to_iri = id_to_iri.set_index(column_config.id_column, drop=True)[
column_config.uri_column
].to_dict()
taxa_df = self.map_columns(
df,
id_column=column_config.id_column,
name_column=column_config.name_column,
)
taxa_df["queryIRI"] = taxa_df["queryId"].map(id_to_iri)
taxa_dfs.append(taxa_df)
return pd.concat(taxa_dfs, ignore_index=True)
def map_columns(self, df, id_column=None, name_column=None):
assert id_column or name_column
subset = [col for col in [id_column, name_column] if col]
sub_df = df[subset].astype(pd.StringDtype(), errors="ignore")
mapping = create_mapping(
sub_df
) # Mapping from items in drop_df to all duplicates in sub_df
drop_df = sub_df.drop_duplicates(subset=subset).replace("", np.nan)
matches = pd.DataFrame()
if id_column:
self.logger.debug(
f"Map {drop_df.shape[0]} unique taxa to target taxonomy {self.target_taxonomy} using {self.default_matcher}"
)
matches = self.map_ids_and_names_to_target_taxo(
drop_df, id_column, name_column, matcher=self.default_matcher
)
# Get all matches for each id
if not matches.empty:
matches = matches[
matches["matchId"].str.startswith(tuple(self.keep_taxo))
]
if matches.empty:
not_found_df = drop_df
else:
not_found_in_target_taxo = []
for name, group in matches.groupby(
["queryId", "queryName"], dropna=False
):
if group[
group["matchId"].str.startswith(self.target_taxonomy)
].empty:
self.logger.debug(
f"Entity {name} not found in target taxonomy {self.target_taxonomy}"
)
not_found_in_target_taxo.append(name)
not_found_df = pd.DataFrame.from_records(
not_found_in_target_taxo, columns=[id_column, name_column]
)
not_found_df = pd.concat( # Required if we want to use SILVA taxonomy
[
not_found_df,
drop_df[~drop_df[id_column].isin(matches["queryId"])],
]
)
else:
not_found_df = drop_df
if not not_found_df.empty:
self.logger.debug(
f"Map {not_found_df.shape[0]} remaining taxa to target taxonomy {self.target_taxonomy} using {self.ncbi_matcher}"
)
additional_matches = self.map_ids_and_names_to_target_taxo(
not_found_df, id_column, name_column, matcher=self.ncbi_matcher
)
# Remove queries with multiple matching names in the same taxo
keep = []
for name, group in additional_matches.groupby(["queryId"], dropna=False):
matches_in_taxo = group["matchId"].unique()
taxos = [x.split(":")[0] for x in matches_in_taxo]
keep_match = True
for taxo in taxos:
if taxos.count(taxo) > 1:
self.logger.debug(
f"Multiple matches for {name} in taxonomy {taxo} : {matches_in_taxo} -> discard !"
)
keep_match = False
break
if keep_match:
keep.append(name)
additional_matches = additional_matches[
additional_matches["queryId"].isin(keep)
]
if not matches.empty:
matches = pd.concat([matches, additional_matches], ignore_index=True)
else:
matches = additional_matches
return matches
def map_ids_and_names_to_target_taxo(self, df, id_column, name_column, matcher):
temp_df = df.copy()
temp_df[id_column] = temp_df[id_column].fillna("")
mask = temp_df[id_column].str.startswith("SILVA:")
print(mask.unique())
print(temp_df[id_column].fillna("").astype(str))
temp_df[id_column] = temp_df[id_column].astype(str).mask(mask.astype("bool"))
query = self.nomer.df_to_query(
df=temp_df,
id_column=id_column,
name_column=name_column,
)
matching = self.nomer.ask_nomer(query, matcher=matcher)
if not matching.empty:
mask = matching["matchType"].isin(
["SAME_AS", "SYNONYM_OF", "HAS_ACCEPTED_NAME"]
)
matching = matching[mask]
# Required if we want to use SILVA taxonomy : if the queryId column is empty,
# use the valid_id in df instead
matching["queryId"] = matching.apply(
lambda x: df[df[name_column] == x["queryName"]][id_column].iloc[0]
if x["queryId"] == ""
else x["queryId"],
axis=1,
)
return matching # [mask]
def test_mapper():
df = pd.read_csv(
"/home/leguilln/workspace/KNOWLEDGE_INTEGRATION/inteGraph/taxo_valid_result.csv",
sep=";",
keep_default_na=False,
)
mapper = TaxonomicEntityMapper(None, None)
mapper.map(df, id_column="valid_id", name_column="valid_name")
# def source_id_to_target_id(self, df, id_column, name_column=None, matcher=None):
# """
# Ask nomer about a batch of taxon identifiers using a given matcher.
# """
#
# df_copy = df.drop_duplicates(subset=id_column)
#
# # Keep a reference to the index of the row containing the identifier
# query_to_ref_index_map = {
# row[id_column]: index for index, row in df_copy.iterrows()
# }
#
# query = self.nomer.df_to_query(
# df=df_copy,
# id_column=id_column,
# name_column=name_column,
# )
# res_df = self.nomer.ask_nomer(query, matcher=matcher)
#
# # Keep non empty matches with types SAME_AS or SYNONYM_OF in the target taxonomy
# mapped_df = res_df.dropna(axis=0, subset=["matchId"])
# mapped_df = mapped_df[
# mapped_df["matchType"].isin(["SAME_AS", "SYNONYM_OF", "HAS_ACCEPTED_NAME"])
# ]
#
# if not mapped_df.empty:
# mapped_df = mapped_df[
# mapped_df["matchId"].str.startswith(self.target_taxonomy)
# ]
#
# # Check if a single taxon matches with several (distinct) ids in the target taxonomy
# # Sometimes, the same taxon is matched several times to the same target id,
# # so we start by dropping these duplicates, then looks for remaining duplicates.
# mapped_df_dropped = mapped_df.drop_duplicates(
# subset=["queryId", "matchId"], keep=False
# )
# duplicated = mapped_df_dropped.duplicated(subset=["queryId"], keep=False)
# # print(mapped_df)
# # print(duplicated)
# if duplicated.any():
# self.logger.error(
# f"Found several target IDs for a single taxa in df: {mapped_df_dropped[duplicated]}"
# )
# # mapped_df[duplicated].to_csv(f"duplicated_{id_column}.csv")
# mapped_df = mapped_df_dropped[~duplicated]
# # raise Exception("Unable to handle multiple candidates at id level.")
#
# # Reset index using the (id, index) map
# mapped_index = mapped_df["queryId"].map(query_to_ref_index_map)
# mapped_df.set_index(
# pd.Index(mapped_index.tolist()).rename("externalId"),
# inplace=True,
# )
#
# # others_df contains all the taxa that have no match in the target taxonomy
# others_df = df_copy[~df_copy[id_column].isin(mapped_df["queryId"])]
#
# else:
# others_df = df
#
# return mapped_df, others_df
class TaxonomicEntityMapperOld:
def __init__(self, prefix, config):
self.logger = logging.getLogger(__name__)
# self.uri_mapper = URIMapper()
self.config = config
self.prefix = prefix
self.default_taxonomy = "NCBI"
# Validate source taxonomy
self.source_taxonomy = (
self.config.source_taxonomy if "source_taxonomy" in self.config else None
)
if not (self.source_taxonomy and self.source_taxonomy in self.prefix):
self.logger.error(
"Invalid source taxonomy {} : use default taxonomy {}".format(
self.source_taxonomy, self.default_taxonomy
)
)
self.source_taxonomy = self.default_taxonomy
# Validate target taxonomy
self.target_taxonomy = (
self.config.target_taxonomy if "target_taxonomy" in self.config else None
)
if not (self.target_taxonomy and self.target_taxonomy in self.prefix):
self.logger.error(
"Invalid target taxonomy {} : use default taxonomy {}".format(
self.target_taxonomy, self.default_taxonomy
)
)
self.target_taxonomy = self.default_taxonomy
self.nomer = NomerHelper()
self.taxo_to_matcher = {"GBIF": "gbif", "NCBI": "ncbi", "IF": "indexfungorum"}
self.gnparser = GNParserHelper()
# def format_id_column(self, df, id_column, taxonomy):
# """
# Format taxon identifier as DB:XXXX (e.g. NCBI:6271, GBIF:234581...).
# """
#
# return df.apply(
# lambda row: f"{self.source_taxonomy}:" + row[id_column]
# if (
# pd.notnull(row[id_column])
# and not row[id_column].startswith(self.source_taxonomy + ":")
# )
# else row[id_column],
# axis=1,
# )
#
# def validate(self, df, id_column=None, name_column=None, matcher=None):
# """
# Match taxon from the source taxonomy to the source taxonomy
# (e.g.GBIF to GBIF) to filter out taxa with invalid ids.
# """
#
# # Drop duplicates for efficiency
# # subset = [
# # col for col in [id_column, name_column] if col
# # ]
# subset = id_column if id_column else name_column
# df_copy = df.copy().drop_duplicates(subset=subset)
#
# # For NCBI and GBIF, the id must be an integer
#
# if id_column:
# # valid_df = df_copy
# valid_df = df_copy[df_copy[id_column].str.isnumeric()]
#
# if self.source_taxonomy: # Format taxon id
# valid_df[id_column] = valid_df[id_column].map(
# lambda id: f"{self.source_taxonomy}:{id}"
# if (
# pd.notnull(id)
# and not str(id).startswith(self.source_taxonomy + ":")
# )
# else id
# )
#
# if not valid_df.empty:
#
# query = self.nomer.df_to_query(
# df=valid_df,
# id_column=id_column,
# name_column=name_column,
# )
# res_df = self.nomer.ask_nomer(query, matcher=matcher)
# res_df.set_index(
# pd.Index(res_df["queryId"]).rename("externalId"),
# drop=False,
# inplace=True,
# )
# mask = res_df["matchType"].isin(
# ["SAME_AS", "SYNONYM_OF", "HAS_ACCEPTED_NAME"]
# )
# valid_df = res_df[mask]
# # invalid_df = res_df[~mask]
# #
# # if not invalid_df.empty and name_column:
# # invalid_df = invalid_df[id_column == None]
# #
# # query = self.nomer.df_to_query(
# # df=invalid_df,
# # name_column=name_column,
# # )
#
# else:
# raise EmptyDataError("Empty query")
#
# else: # Validate names (all names are considered valid)
# f_temp = NamedTemporaryFile(delete=True) # False)
# self.logger.debug(
# f"Write names to {f_temp.name} for validation using gnparser"
# )
# names = [name.replace("\n", " ") for name in df_copy[name_column].to_list()]
# names_str = "\n".join(names)
# f_temp.write(names_str.encode())
# f_temp.read() # I don't know why but it is needed or sometimes the file appears empty when reading
# success, canonical_names = self.gnparser.get_canonical_name(f_temp.name)
# f_temp.close()
#
# valid_df = pd.DataFrame(
# columns=self.nomer.columns,
# )
# valid_df["queryName"] = df_copy[name_column].to_list()
# valid_df["matchType"] = "SAME_AS"
# valid_df["matchName"] = valid_df["queryName"]
# if success:
# valid_df["matchName"] = canonical_names["CanonicalSimple"]
# # valid_df.to_csv("valid_name.csv")
# valid_df = valid_df.set_index("queryName", drop=False)
# valid_df = valid_df.set_index(valid_df.index.rename("externalId"))
# valid_df = valid_df.drop_duplicates(subset="matchName")
#
# return valid_df # , invalid_df
# def source_id_to_target_id(self, df, id_column, name_column=None, matcher=None):
# """
# Ask nomer about a batch of taxon identifiers using a given matcher.
# """
#
# df_copy = df.drop_duplicates(subset=id_column)
#
# # Keep a reference to the index of the row containing the identifier
# query_to_ref_index_map = {
# row[id_column]: index for index, row in df_copy.iterrows()
# }
#
# query = self.nomer.df_to_query(
# df=df_copy,
# id_column=id_column,
# name_column=name_column,
# )
# res_df = self.nomer.ask_nomer(query, matcher=matcher)
#
# # Keep non empty matches with types SAME_AS or SYNONYM_OF in the target taxonomy
# mapped_df = res_df.dropna(axis=0, subset=["matchId"])
# mapped_df = mapped_df[
# mapped_df["matchType"].isin(["SAME_AS", "SYNONYM_OF", "HAS_ACCEPTED_NAME"])
# ]
#
# if not mapped_df.empty:
# mapped_df = mapped_df[
# mapped_df["matchId"].str.startswith(self.target_taxonomy)
# ]
#
# # Check if a single taxon matches with several (distinct) ids in the target taxonomy
# # Sometimes, the same taxon is matched several times to the same target id,
# # so we start by dropping these duplicates, then looks for remaining duplicates.
# mapped_df_dropped = mapped_df.drop_duplicates(
# subset=["queryId", "matchId"], keep=False
# )
# duplicated = mapped_df_dropped.duplicated(subset=["queryId"], keep=False)
# # print(mapped_df)
# # print(duplicated)
# if duplicated.any():
# self.logger.error(
# f"Found several target IDs for a single taxa in df: {mapped_df_dropped[duplicated]}"
# )
# # mapped_df[duplicated].to_csv(f"duplicated_{id_column}.csv")
# mapped_df = mapped_df_dropped[~duplicated]
# # raise Exception("Unable to handle multiple candidates at id level.")
#
# # Reset index using the (id, index) map
# mapped_index = mapped_df["queryId"].map(query_to_ref_index_map)
# mapped_df.set_index(
# pd.Index(mapped_index.tolist()).rename("externalId"),
# inplace=True,
# )
#
# # others_df contains all the taxa that have no match in the target taxonomy
# others_df = df_copy[~df_copy[id_column].isin(mapped_df["queryId"])]
#
# else:
# others_df = df
#
# return mapped_df, others_df
#
# def source_name_to_target_id(
# self, df, id_column=None, name_column=None, matcher="ncbi-taxon"
# ):
# """Ask nomer about a batch of taxon names using a given matcher.
# When there are several taxa matching the same name, we try to find
# the best match using a maximum lineage similarity approach (this
# requires a valid id_column).
# """
#
# # df_copy = df.drop_duplicates(subset=[id_column, name_column])
# ref_col = id_column if id_column else name_column
#
# duplicated = df.duplicated(subset=ref_col, keep=False)
# # df.loc[duplicated].to_csv(f"duplicated_base_{ref_col}.csv")
#
# # Keep a reference to the index of the row containing the identifier
# query_to_ref_index_map = {row[ref_col]: index for index, row in df.iterrows()}
#
# query = self.nomer.df_to_query(
# df=df,
# name_column=name_column,
# id_column=id_column,
# )
#
# res_df = self.nomer.ask_nomer(query, matcher=matcher)
#
# # Keep non empty matches with types SAME_AS or SYNONYM_OF in the target taxonomy
# mapped_df = res_df.dropna(axis=0, subset=["matchId"])
# mapped_df = mapped_df[
# mapped_df["matchType"].isin(["SAME_AS", "SYNONYM_OF", "HAS_ACCEPTED_NAME"])
# ]
#
# if not mapped_df.empty:
# mapped_df = mapped_df[
# mapped_df["matchId"].str.startswith(self.target_taxonomy)
# ]
#
# subset = "queryId" if id_column else "queryName"
#
# duplicated = mapped_df.duplicated(subset=[subset], keep=False)
#
# if duplicated.any():
# self.logger.info(
# f"Found several target entities for a single taxon in df :\n{mapped_df[duplicated]}"
# )
# # mapped_df.loc[duplicated].to_csv(f"duplicated_{name_column}.csv")
#
# if id_column:
# keep_index = self.resolve_duplicates(
# df, mapped_df[duplicated], id_column, query_to_ref_index_map
# )
# for index in keep_index:
# duplicated.loc[index] = False
# else:
# # TODO : refactor
# # If we do not have access to the ifentifier for disambiguation,
# # we try to compare the lineages. Very often, we have the same
# # name designating the same entity, but at different ranks (e.g. genus and subgenus)
# # In this case, we keep the highest rank (e.g. genus)
# keep_index = []
# duplicates = mapped_df[duplicated]
# unique_names = pd.unique(duplicates["matchName"])
# for name in unique_names:
# df_name = mapped_df[mapped_df["matchName"] == name]
# resolved = False
# if df_name.shape[0] == 2:
# lin = []
# for index, row in df_name.iterrows():
# lin.append((index, row["linNames"].split(" | ")))
#
# if set(lin[0][1]) == set(lin[1][1]):
# resolved = True
# # We keep the highest rank
# if len(lin[0][1]) < len(lin[1][1]):
# duplicated.loc[lin[0][0]] = False
# else:
# duplicated.loc[lin[1][0]] = False
# if not resolved:
# self.logger.debug(
# f"Cannot resolve duplicates : discard taxon {name}."
# )
#
# mapped_df = mapped_df[~duplicated]
# mapped_index = mapped_df[subset].map(query_to_ref_index_map)
#
# mapped_df.set_index(
# pd.Index(mapped_index.tolist()).rename("externalId"),
# inplace=True,
# )
# others_df = df[~df[ref_col].isin(mapped_df[subset])]
#
# else:
# others_df = df
#
# return mapped_df, others_df
#
# def resolve_duplicates(self, ref_df, duplicates, id_column, query_to_ref_index_map):
#
# unique_duplicates = duplicates["queryId"].unique()
# keep_index = []
# self.logger.debug(f"Unique duplicates {unique_duplicates}")
# for id in unique_duplicates:
# candidates = duplicates[duplicates["queryId"] == id]
# if id not in query_to_ref_index_map:
# raise KeyError(f"Candidate id {id} has no match")
# ref_index = query_to_ref_index_map[id]
# reference = ref_df.loc[ref_index, :] # [ref_df[ref_df.columns[0]] == id]
# self.logger.info(f"Get best match for taxon\n{reference}")
# best_index = self.get_best_match(
# reference,
# candidates,
# )
# keep_index.append(best_index)
# return keep_index
#
# def get_best_match(self, reference, candidates):
# """
# Given nomer's response for a given taxon, return the best match.
# If only one match, returns the candidate IRI
# If more than one match, get the full lineage of the query taxon from its taxid
# For each candidate
# Compute the similarity between the query taxon lineage and the candidate lineage
# Similarity = the length of the intersection of the two lineages
# Return best match = the IRI of the candidate with the maximum similarity
# """
# ref_lineage = [x.strip(" ") for x in reference["linNames"].split("|") if x]
#
# candidate_lineages = [
# {
# "index": index,
# "lineage": [x.strip(" ") for x in row["linNames"].split("|") if x],
# }
# for index, row in candidates.iterrows()
# ]
#
# if len(set([",".join(tgt["lineage"]) for tgt in candidate_lineages])) == 1:
# best_index = candidate_lineages[0]["index"]
# best_match = candidates.loc[best_index]
# best_match_id = best_match["matchId"]
# self.logger.debug(
# f"Found multiple matches with similar lineages, return the first one : {best_match_id}"
# )
# else:
# similarities = [
# 1.0
# * len(set(ref_lineage).intersection(tgt["lineage"]))
# / len(set(ref_lineage))
# for tgt in candidate_lineages
# ]
#
# # print(tgt_lineages, similarities)
# max_sim = max(similarities)
# max_indexes = [i for i, sim in enumerate(similarities) if sim == max_sim]
# if len(max_indexes) > 1:
# self.logger.debug(
# f"Found multiple best matches with similarity {max_sim:.2f}: cannot find best match"
# )
# return None
#
# best_index = candidate_lineages[max_indexes[0]]["index"]
# best_match = candidates.loc[best_index]
# best_match_id = best_match["matchId"]
# self.logger.debug(
# f"Found best match with similarity {max_sim:.2f} : {best_match_id}"
# )
# return best_index
#
# def map(self, df, id_column, name_column):
# """
# Using nomer taxonomic mapping capabilities, try to get IRIs in the
# target taxonomy from taxon names and/or identifiers in the source
# taxonomy.
#
# First, validate taxon names/ids by querying the source taxonomy
# Then, try to map taxon ids to the target taxonomy using wikidata-web
# For each taxon without a match
# Try to map the taxon name using globi
# For each taxon without a match
# Try to map the taxon name using ncbi
# """
#
# # Drop duplicates for efficiency
# subset = [x for x in [id_column, name_column] if x]
# w_df = df.dropna(subset=subset).drop_duplicates(subset=subset)
# # w_df = df.dropna(subset=subset, how="all").drop_duplicates(subset=subset)
#
# self.logger.debug(f"Found {w_df.shape[0]} unique taxa")
#
# # Step 1 : validate taxa against the source taxonomy
# self.logger.debug(f"Validate taxa using info from columns {subset}")
# valid_df = self.validate(
# w_df,
# id_column,
# name_column,
# matcher=self.taxo_to_matcher[self.source_taxonomy],
# )
# self.logger.debug(f"Found {valid_df.shape[0]}/{w_df.shape[0]} valid taxa")
#
# # src_to_src_mappings is a data frame containing mappings between ids
# # in the same source taxonomy. Indeed, a taxon can have several ids
# # in a single taxonomy, and the id used in the data may not be the preferred
# # id for this taxon.
# src_to_src_mappings = valid_df[
# (pd.notnull(valid_df["matchId"]))
# & (valid_df["queryId"] != valid_df["matchId"])
# ]
#
# mapped = []
#
# # Step 2 : map valid taxa using their id in the source taxonomy (if available)
# # if id_column:
# # self.logger.debug(
# # f"Map {valid_df.shape[0]} unique taxa to target taxonomy {self.target_taxonomy} using wikidata-web"
# # )
# # mapped_df, others_df = self.source_id_to_target_id(
# # valid_df,
# # id_column="matchId",
# # # name_column="matchName",
# # matcher="wikidata-web",
# # )
# # self.logger.debug(
# # f"Found {mapped_df.shape[0]}/{valid_df.shape[0]} taxa in target taxonomy {self.target_taxonomy}"
# # )
# # mapped.append(mapped_df)
# # else:
# # others_df = valid_df
# if id_column:
# self.logger.debug(
# f"Map {valid_df.shape[0]} unique taxa to target taxonomy {self.target_taxonomy} using globi"
# )
# mapped_df, others_df = self.source_id_to_target_id(
# valid_df,
# id_column="matchId",
# # name_column="matchName",
# matcher="globi",
# )
# self.logger.debug(
# f"Found {mapped_df.shape[0]}/{valid_df.shape[0]} taxa in target taxonomy {self.target_taxonomy} using globi"
# )
# mapped.append(mapped_df)
# else:
# others_df = valid_df
#
# if not others_df.empty and id_column:
# nb_taxa = others_df.shape[0]
# self.logger.debug(
# f"Map {nb_taxa} remaining taxa to target taxonomy {self.target_taxonomy} using wikidata-web"
# )
# mapped_df, others_df = self.source_id_to_target_id(
# others_df,
# id_column="matchId",
# # name_column="matchName",
# matcher="wikidata-web",
# )
# self.logger.debug(
# f"Found {mapped_df.shape[0]}/{nb_taxa} taxa in target taxonomy {self.target_taxonomy} using wikidata-web"
# )
# mapped.append(mapped_df)
# else:
# others_df = valid_df
#
# # Step 3 : map the remaining taxa using their name (if available)
# if not others_df.empty and name_column:
# nb_taxa = others_df.shape[0]
# self.logger.debug(
# f"Map {nb_taxa} remaining taxa to target taxonomy {self.target_taxonomy} using ncbi"
# )
# # others_df.to_csv("others.csv")
# mapped_df, others_df = self.source_name_to_target_id(
# others_df,
# id_column="matchId" if id_column else None,
# name_column="matchName",
# matcher="ncbi",
# )
# # mapped_df.to_csv("mapped.csv")
# self.logger.debug(
# f"Found {mapped_df.shape[0]}/{nb_taxa} taxa in target taxonomy {self.target_taxonomy} using ncbi"
# )
# mapped.append(mapped_df)
#
# if not others_df.empty and name_column:
# nb_taxa = others_df.shape[0]
# self.logger.debug(
# f"Map {nb_taxa} remaining taxa to target taxonomy {self.target_taxonomy} using globi"
# )
# mapped_df, others_df = self.source_name_to_target_id(
# others_df,
# id_column="matchId" if id_column else None,
# name_column="matchName",
# matcher="globi",
# )
# self.logger.debug(
# f"Found {mapped_df.shape[0]}/{nb_taxa} taxa in target taxonomy {self.target_taxonomy} using globi"
# )
# mapped.append(mapped_df)
#
# mapped_df = pd.concat(mapped, ignore_index=False)
# self.logger.info(
# f"Found {mapped_df.shape[0]}/{valid_df.shape[0]} valid taxa in target taxonomy {self.target_taxonomy}"
# )
#
# if not id_column:
# mapped_df["queryId"] = mapped_df["matchId"]
# valid_df = mapped_df
#
# # Create a taxon-iri map and create the Series containing IRIs
# ref_col = id_column if id_column else name_column
# subset = "queryId" if id_column else "matchId"
#
# if id_column and self.source_taxonomy:
# df[id_column] = self.format_id_column(df, id_column, self.source_taxonomy)
#
# src_tgt_map = {}
# for external_id in df[ref_col].unique():
# print(
# external_id,
# valid_df[subset][external_id]
# if external_id in valid_df.index
# else None,
# )
# valid_id = (
# valid_df[subset][external_id] if external_id in valid_df.index else None
# )
# src_tgt_map[external_id] = valid_id
# valid_id_series = df[ref_col].map(src_tgt_map)
#
# return valid_id_series, pd.concat(
# [mapped_df, src_to_src_mappings], ignore_index=True
# )
class TaxonomicMapper:
def __init__(self, config):
self.logger = logging.getLogger(__name__)
self.config = config
with open(os.path.abspath(self.config.prefix_file), "r") as ymlfile:
self.prefix = yaml.load(ymlfile, Loader=yaml.FullLoader)
def map(self, df):
"""For a subset of columns (e.g. consumers and resources),
try to map taxon ids and/or names to IRIs in a target taxonomy
using a TaxonomicEntityMapper.
Returns the input DataFrame with new columns containing the IRIs for each
query column.
"""
def get_full_iri(taxid):
print(taxid)
if | pd.notnull(taxid) | pandas.notnull |
import logging
from abc import ABC, abstractmethod
from typing import Union
import pandas as pd
import skbio.diversity
from skbio.stats.distance import DistanceMatrix
from skbio.stats.ordination import pcoa
from moonstone.analysis.diversity.base import (
DiversityBase, PhylogeneticDiversityBase
)
from moonstone.plot.graphs.scatter import GroupScatterGraph, GroupScatter3DGraph
logger = logging.getLogger(__name__)
class BetaDiversity(DiversityBase, ABC):
DIVERSITY_INDEXES_NAME = "beta_index"
DEF_TITLE = "(beta diversity) distribution across the samples"
@abstractmethod
def compute_beta_diversity(self, df) -> DistanceMatrix:
"""
method that compute the beta diversity
"""
pass
def compute_diversity(self) -> pd.Series:
series = self.beta_diversity.to_series()
series.name = self.DIVERSITY_INDEXES_NAME
return series
@property
def beta_diversity(self):
"""
DistanceMatrix from skbio.
"""
if getattr(self, '_beta_diversity', None) is None:
self._beta_diversity = self.compute_beta_diversity(self.df)
return self._beta_diversity
@property
def beta_diversity_series(self):
return self.diversity_indexes
@property
def beta_diversity_df(self):
return self.beta_diversity.to_data_frame()
def _get_grouped_df(self, metadata_series):
df_list = []
for group in metadata_series.dropna().unique():
group_df = self.df.loc[:, metadata_series[metadata_series == group].index]
beta_div_multi_indexed_df = self.compute_beta_diversity(group_df).to_series().to_frame()
if beta_div_multi_indexed_df.empty: # Happens if only one item from the group
continue
# Make unique index from multi index
beta_div_not_indexed_df = beta_div_multi_indexed_df.reset_index()
index_col_names = ["level_0", "level_1"]
beta_div_solo_indexed_df = beta_div_not_indexed_df.set_index(
beta_div_not_indexed_df[index_col_names].agg('-'.join, axis=1)
).drop(index_col_names, axis=1)
beta_div_solo_indexed_df.columns = [self.DIVERSITY_INDEXES_NAME]
# Add corresponding group name
beta_div_solo_indexed_df[metadata_series.name] = group
df_list.append(beta_div_solo_indexed_df)
return pd.concat(df_list).dropna()
@property
def pcoa(self):
if getattr(self, '_pcoa', None) is None:
self._pcoa = pcoa(self.beta_diversity).samples
return self._pcoa
def visualize_pcoa(
self, metadata_df: pd.DataFrame, group_col: str, mode: str = 'scatter',
show: bool = True, output_file: Union[bool, str] = False,
colors: dict = None, groups: list = None,
plotting_options: dict = None,
):
filtered_metadata_df = self._get_filtered_df_from_metadata(metadata_df)
df = | pd.concat([self.pcoa, filtered_metadata_df[group_col]], axis=1) | pandas.concat |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import random
import time
import warnings
warnings.filterwarnings('ignore')
sns.set(style='darkgrid', palette='deep')
#Analysing dataset with padas profiling
#from pandas_profiling import ProfileReport
#profile = ProfileReport(df, title='Medical Cost Personal Datasets', html={'style':{'full_width':True}})
#Importing Dataset
df_raw = pd.read_excel('titanic3.xls')
new_columns = ['class','survival', 'name', 'sex', 'age', 'siblings/spouses',
'parents/children', 'ticket', 'fare', 'cabin', 'embarked', 'lifeboat',
'body number', 'home/destination']
df_raw.info()
#Feature Engineering
df = pd.DataFrame(df_raw.values, columns= new_columns )
df_user = pd.DataFrame(np.arange(0, len(df)), columns=['passanger'])
df = pd.concat([df_user, df], axis=1)
df['family'] = df['siblings/spouses'] + df['parents/children'] + 1
df = df.drop(['siblings/spouses','parents/children'], axis=1)
df['embarked'].value_counts()
df['embarked'].replace(['S', 'C', 'Q'],
['southampton', 'cherbourg', 'quennstone'], inplace= True )
df.info()
df.columns
df[['class', 'survival', 'age', 'fare',
'body number', 'family']] = df[['class', 'survival', 'age', 'fare',
'body number', 'family']].apply(pd.to_numeric)
#Converting columns to Datatime
df['Timestamp'] = pd.to_datetime(df['Timestamp'])
time_new = df['Timestamp'].iloc[0]
df['Hour'] = df['Timestamp'].apply(lambda time_new: time_new.hour)
df['Month'] = df['Timestamp'].apply(lambda time_new: time_new.month)
df['Day'] = df['Timestamp'].apply(lambda time_new: time_new.dayofweek)
df["hour"] = df.hour.str.slice(1, 3).astype(int)
#Visualising Dataset
bins = range(0,100,10)
ax = sns.distplot(df.age[df.y=='yes'],
color='red', kde=False, bins=bins, label='Have Subscribed')
sns.distplot(df.age[df.y=='no'],
ax=ax, # Overplots on first plot
color='blue', kde=False, bins=bins, label="Haven't Subscribed")
plt.legend()
plt.show()
g = pd.crosstab(df.sex, df.survival).plot(kind='bar', figsize=(10,5))
ax = g.axes
for p in ax.patches:
ax.annotate(f"{p.get_height() * 100 / df.shape[0]:.2f}%", (p.get_x() + p.get_width() / 2., p.get_height()),
ha='center', va='center', fontsize=11, color='gray', rotation=0, xytext=(0, 10),
textcoords='offset points')
plt.grid(b=True, which='major', linestyle='--')
plt.title('Survival Frequency for Genre')
plt.legend(['Not Survived', 'Survived'])
plt.xlabel('Genre')
plt.ylabel('Quantity')
plt.show()
df.groupby(pd.cut(df.age, bins))['age'].count().plot(kind='bar', figsize=(10,10))
plt.grid(b=True, which='major', linestyle='--')
plt.title('Frequency of Age')
plt.grid(b=True, which='major', linestyle='--')
plt.xlabel('Age')
plt.ylabel('Quantity')
plt.show()
pd.crosstab(pd.cut(df.age, bins), df.survival).plot(kind='bar', figsize=(10,10))
plt.grid(b=True, which='major', linestyle='--')
plt.title('Survival Frequency for Age')
plt.legend(['Not Survival', 'Survival'])
plt.yticks(np.arange(0,250,50))
plt.xlabel('Age')
plt.ylabel('Quantity')
plt.show()
age_notsurvival = (df.groupby(pd.cut(df.age, bins))['age'].count()/ len(df[df.survival==0]))*100
age_survival = (df.groupby(pd.cut(df.age, bins))['age'].count()/ len(df[df.survival==1]))*100
age_notsurvival.plot(kind='bar', figsize=(10,10))
plt.grid(b=True, which='major', linestyle='--')
plt.title('Percentage of Age for Passanger Not Survived')
plt.yticks(np.arange(0,110,10))
plt.xlabel('Age')
plt.ylabel('Percentage')
plt.show()
age_survival.plot(kind='bar', figsize=(10,10))
plt.grid(b=True, which='major', linestyle='--')
plt.title('Percentage of Age for Passanger Survived')
plt.yticks(np.arange(0,110,10))
plt.xlabel('Age')
plt.ylabel('Percentage')
plt.show()
fig, axes = plt.subplots(nrows=2, ncols=1, sharex=True, sharey=True, figsize=(10,10))
plt.subplots_adjust(hspace=0)
plt.suptitle('Age Frequency')
ax1 = sns.countplot(pd.cut(df.age, bins), data= df,
color='darkblue', ax=axes[0], saturation=0.5)
ax2 = sns.countplot(pd.cut(df.age, bins)[df.survival==0], data=df ,
color='red', ax=axes[1], saturation=1, alpha=0.5)
ax2.set_xlabel('Age')
ax3 = sns.countplot(pd.cut(df.age, bins)[df.survival==1], data= df,
color='darkblue', ax=ax2, saturation=1, alpha=0.5)
ax2.legend(['Have Not Survived', 'Have Survived'])
pd.crosstab(df['class'], df.survival).plot(kind='bar', figsize=(15,10))
plt.grid(b=True, which= 'major', linestyle='--')
plt.title('Survival Frequency for Class')
plt.yticks(np.arange(0,600,50))
plt.legend(['Not Survival', 'Survival'])
plt.xlabel('class')
plt.ylabel('Quantity')
plt.show()
pd.crosstab(df.embarked, df.survival).plot(kind='bar', figsize=(15,10))
plt.grid(b=True, which='major', linestyle='--')
plt.yticks(np.arange(0,700,50))
plt.title('Survival Frequency for Embarked')
plt.legend(['Not Survival', 'Survival'])
plt.xlabel('Embarked')
plt.ylabel('Quantity')
plt.show()
sns.pairplot(data=df, hue='survival', vars=['age', 'fare', ])
sns.countplot(x='survival', data=df)
sns.heatmap(data= df.corr(),annot=True,cmap='viridis')
sns.distplot(df.age, bins=10)
pd.crosstab(df.survival[df.embarked=='southampton'],df['class']).plot(kind='bar', figsize=(15,10))
plt.title('Survival Frequency for Class / Embarked(Southampton)')
plt.grid(b=True, which='Major', linestyle='--')
plt.legend(['First Class', 'Second Class', 'Third Class'])
plt.ylabel('Quatity')
plt.xlabel('Survival')
plt.show()
df.drop(['passanger', 'survival'], axis=1).hist(figsize=(10,10))
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
## Correlation with independent Variable (Note: Models like RF are not linear like these)
df2 = df.drop(['passanger', 'name', 'home/destination', 'survival'], axis=1)
df2.corrwith(df.survival).plot.bar(
figsize = (10, 10), title = "Correlation with Survival", fontsize = 15,
rot = 45, grid = True)
sns.set(style="white")
# Compute the correlation matrix
corr = df2.corr()
# Generate a mask for the upper triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(10, 10))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0,
square=True, linewidths=.5, cbar_kws={"shrink": .5})
## Pie Plots (Just for binary values)
df.columns
df2 = df[['class','survival','sex', 'embarked']]
fig = plt.figure(figsize=(15, 12))
plt.suptitle('Pie Chart Distributions', fontsize=20)
for i in range(1, df2.shape[1] + 1):
plt.subplot(6, 3, i)
f = plt.gca()
f.axes.get_yaxis().set_visible(False)
f.set_title(df2.columns.values[i - 1])
values = df2.iloc[:, i - 1].value_counts(normalize = True).values
index = df2.iloc[:, i - 1].value_counts(normalize = True).index
plt.pie(values, labels = index, autopct='%1.1f%%')
plt.axis('equal')
fig.tight_layout(rect=[0, 0.03, 1, 0.95])
#Data analysis
statistical = df.describe()
survival = df.survival.value_counts()
countNotsurvival = len(df[df.survival == 0])
countSurvival = len(df[df.survival == 1])
print('Percentage of Titanic not survival: {:.2f}%'.format((countNotsurvival/len(df)) * 100))
print('Percentage of Titanic survival: {:.2f}%'.format((countSurvival/len(df)) * 100))
df.groupby(df['survival']).mean()
#Looking for Null Values
sns.heatmap(df.isnull(), yticklabels=False, cbar=False, cmap='viridis')
df.isnull().any()
df.isnull().sum()
null_percentage = (df.isnull().sum()/len(df) * 100)
null_percentage = pd.DataFrame(null_percentage, columns = ['Percentage Null Values (%)'])
null_percentage
#Define X and y
X = df.drop(['enrolled','user','first_open','enrolled_date', 'enrolled_date' ], axis=1)
y = df['enrolled']
#Get Dummies
X = pd.get_dummies(X)
#Dummies Trap
X.columns
X = X.drop(['Gender_Male', 'Geography_Germany'], axis= 1)
#Taking care of missing data
'''
from sklearn.preprocessing import Imputer
imputer = Imputer(missing_values='NaN', strategy='mean', axis=0)
imputer = imputer.fit(X[:, 1:3])
X[:, 1:3] = imputer.transform(X[:, 1:3] )
'''
#Encoding categorical data
'''
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelenconder_x = LabelEncoder()
X.iloc[:, 1] = labelenconder_x.fit_transform(X.iloc[:, 1])
onehotencoder_x = OneHotEncoder(categorical_features=[1])
X2 = pd.DataFrame(onehotencoder_x.fit_transform(X).toarray())
y = pd.DataFrame(labelenconder_x.fit_transform(y))
#Dummies Trap
X2 = X2.iloc[:, 1:]
X2 = X2.iloc[:,[0,1,2]]
X2 = X2.rename(columns={1:'pay_schedule_1', 2:'pay_schedule_2', 3:'pay_schedule_3'})
X = pd.concat([X,X2], axis=1)
X = X.drop(['pay_schedule'], axis=1)
'''
#Splitting the Dataset into the training set and test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, stratify=y, random_state=0)
X_train.shape
X_test.shape
y_train.shape
y_test.shape
# Balancing the Training Set
y_train.value_counts()
pos_index = y_train[y_train.values == 1].index
neg_index = y_train[y_train.values == 0].index
if len(pos_index) > len(neg_index):
higher = pos_index
lower = neg_index
else:
higher = neg_index
lower = pos_index
random.seed(0)
higher = np.random.choice(higher, size=len(lower))
lower = np.asarray(lower)
new_indexes = np.concatenate((lower, higher))
X_train = X_train.loc[new_indexes]
y_train = y_train.loc[new_indexes]
#Feature scaling
from sklearn.preprocessing import StandardScaler
sc_x = StandardScaler()
X_train = pd.DataFrame(sc_x.fit_transform(X_train), columns=X.columns.values)
X_test = pd.DataFrame(sc_x.transform(X_test), columns=X.columns.values)
#Applying PCA (If Necessary)
'''
from sklearn.decomposition import PCA
pca = PCA(n_components = 2)#Antes de sabermos quantas variáveis serão reduzidas, utiliza-se o None primeiro. Depois
será substituido pelo a quantidade de variáveis com maior variância gerada pela explained_variance. Neste caso, 02.
X_train_pca = pca.fit_transform(X_train)
X_test_pca = pca.transform(X_test)
explained_variance = pca.explained_variance_ratio_ #Verifica as variáveis com maior variância
'''
#### Model Building ####
### Comparing Models
## Logistic Regression
from sklearn.linear_model import LogisticRegression
lr_classifier = LogisticRegression(random_state = 0, penalty = 'l2')
lr_classifier.fit(X_train, y_train)
# Predicting Test Set
y_pred = lr_classifier.predict(X_test)
from sklearn.metrics import confusion_matrix, accuracy_score, f1_score, precision_score, recall_score
acc = accuracy_score(y_test, y_pred)
prec = precision_score(y_test, y_pred)
rec = recall_score(y_test, y_pred)
f1 = f1_score(y_test, y_pred)
results = pd.DataFrame([['Logistic Regression (Lasso)', acc, prec, rec, f1]],
columns = ['Model', 'Accuracy', 'Precision', 'Recall', 'F1 Score'])
## K-Nearest Neighbors (K-NN)
#Choosing the K value
error_rate= []
for i in range(1,40):
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=i)
knn.fit(X_train, y_train)
pred_i = knn.predict(X_test)
error_rate.append(np.mean(pred_i != y_test))
plt.figure(figsize=(10,6))
plt.plot(range(1,40),error_rate,color='blue', linestyle='dashed', marker='o',
markerfacecolor='red', markersize=10)
plt.title('Error Rate vs. K Value')
plt.xlabel('K')
plt.ylabel('Error Rate')
print(np.mean(error_rate))
from sklearn.neighbors import KNeighborsClassifier
kn_classifier = KNeighborsClassifier(n_neighbors=15, metric='minkowski', p= 2)
kn_classifier.fit(X_train, y_train)
# Predicting Test Set
y_pred = kn_classifier.predict(X_test)
acc = accuracy_score(y_test, y_pred)
prec = precision_score(y_test, y_pred)
rec = recall_score(y_test, y_pred)
f1 = f1_score(y_test, y_pred)
model_results = pd.DataFrame([['K-Nearest Neighbors (minkowski)', acc, prec, rec, f1]],
columns = ['Model', 'Accuracy', 'Precision', 'Recall', 'F1 Score'])
results = results.append(model_results, ignore_index = True)
## SVM (Linear)
from sklearn.svm import SVC
svm_linear_classifier = SVC(random_state = 0, kernel = 'linear', probability= True)
svm_linear_classifier.fit(X_train, y_train)
# Predicting Test Set
y_pred = svm_linear_classifier.predict(X_test)
acc = accuracy_score(y_test, y_pred)
prec = precision_score(y_test, y_pred)
rec = recall_score(y_test, y_pred)
f1 = f1_score(y_test, y_pred)
model_results = pd.DataFrame([['SVM (Linear)', acc, prec, rec, f1]],
columns = ['Model', 'Accuracy', 'Precision', 'Recall', 'F1 Score'])
results = results.append(model_results, ignore_index = True)
## SVM (rbf)
from sklearn.svm import SVC
svm_rbf_classifier = SVC(random_state = 0, kernel = 'rbf', probability= True)
svm_rbf_classifier.fit(X_train, y_train)
# Predicting Test Set
y_pred = svm_rbf_classifier.predict(X_test)
acc = accuracy_score(y_test, y_pred)
prec = precision_score(y_test, y_pred)
rec = recall_score(y_test, y_pred)
f1 = f1_score(y_test, y_pred)
model_results = pd.DataFrame([['SVM (RBF)', acc, prec, rec, f1]],
columns = ['Model', 'Accuracy', 'Precision', 'Recall', 'F1 Score'])
results = results.append(model_results, ignore_index = True)
## Naive Bayes
from sklearn.naive_bayes import GaussianNB
gb_classifier = GaussianNB()
gb_classifier.fit(X_train, y_train)
# Predicting Test Set
y_pred = gb_classifier.predict(X_test)
acc = accuracy_score(y_test, y_pred)
prec = precision_score(y_test, y_pred)
rec = recall_score(y_test, y_pred)
f1 = f1_score(y_test, y_pred)
model_results = pd.DataFrame([['Naive Bayes (Gaussian)', acc, prec, rec, f1]],
columns = ['Model', 'Accuracy', 'Precision', 'Recall', 'F1 Score'])
results = results.append(model_results, ignore_index = True)
## Decision Tree
from sklearn.tree import DecisionTreeClassifier
dt_classifier = DecisionTreeClassifier(criterion='entropy', random_state=0)
dt_classifier.fit(X_train, y_train)
#Predicting the best set result
y_pred = dt_classifier.predict(X_test)
acc = accuracy_score(y_test, y_pred)
prec = precision_score(y_test, y_pred)
rec = recall_score(y_test, y_pred)
f1 = f1_score(y_test, y_pred)
model_results = pd.DataFrame([['Decision Tree', acc, prec, rec, f1]],
columns = ['Model', 'Accuracy', 'Precision', 'Recall', 'F1 Score'])
results = results.append(model_results, ignore_index = True)
from sklearn.externals.six import StringIO
from IPython.display import Image
from sklearn.tree import export_graphviz
import pydotplus
dot_data = StringIO()
export_graphviz(dt_classifier, out_file=dot_data,
filled=True, rounded=True,
special_characters=True)
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
Image(graph.create_png())
graph.write_pdf('titanic.pdf')
graph.write_png('titanic.png')
## Random Forest
from sklearn.ensemble import RandomForestClassifier
rf_classifier = RandomForestClassifier(random_state = 0, n_estimators = 100,
criterion = 'gini')
rf_classifier.fit(X_train, y_train)
# Predicting Test Set
y_pred = rf_classifier.predict(X_test)
acc = accuracy_score(y_test, y_pred)
prec = precision_score(y_test, y_pred)
rec = recall_score(y_test, y_pred)
f1 = f1_score(y_test, y_pred)
model_results = pd.DataFrame([['Random Forest Gini (n=100)', acc, prec, rec, f1]],
columns = ['Model', 'Accuracy', 'Precision', 'Recall', 'F1 Score'])
results = results.append(model_results, ignore_index = True)
## Ada Boosting
from sklearn.ensemble import AdaBoostClassifier
ad_classifier = AdaBoostClassifier()
ad_classifier.fit(X_train, y_train)
# Predicting Test Set
y_pred = ad_classifier.predict(X_test)
acc = accuracy_score(y_test, y_pred)
prec = precision_score(y_test, y_pred)
rec = recall_score(y_test, y_pred)
f1 = f1_score(y_test, y_pred)
model_results = pd.DataFrame([['Ada Boosting', acc, prec, rec, f1]],
columns = ['Model', 'Accuracy', 'Precision', 'Recall', 'F1 Score'])
results = results.append(model_results, ignore_index = True)
##Gradient Boosting
from sklearn.ensemble import GradientBoostingClassifier
gr_classifier = GradientBoostingClassifier()
gr_classifier.fit(X_train, y_train)
# Predicting Test Set
y_pred = gr_classifier.predict(X_test)
acc = accuracy_score(y_test, y_pred)
prec = precision_score(y_test, y_pred)
rec = recall_score(y_test, y_pred)
f1 = f1_score(y_test, y_pred)
model_results = pd.DataFrame([['Gradient Boosting', acc, prec, rec, f1]],
columns = ['Model', 'Accuracy', 'Precision', 'Recall', 'F1 Score'])
results = results.append(model_results, ignore_index = True)
##Xg Boosting
from xgboost import XGBClassifier
xg_classifier = XGBClassifier()
xg_classifier.fit(X_train, y_train)
# Predicting Test Set
y_pred = xg_classifier.predict(X_test)
acc = accuracy_score(y_test, y_pred)
prec = precision_score(y_test, y_pred)
rec = recall_score(y_test, y_pred)
f1 = f1_score(y_test, y_pred)
model_results = pd.DataFrame([['Xg Boosting', acc, prec, rec, f1]],
columns = ['Model', 'Accuracy', 'Precision', 'Recall', 'F1 Score'])
results = results.append(model_results, ignore_index = True)
##Ensemble Voting Classifier
from sklearn.ensemble import VotingClassifier
from sklearn.metrics import accuracy_score
voting_classifier = VotingClassifier(estimators= [('lr', lr_classifier),
('kn', kn_classifier),
('svc_linear', svm_linear_classifier),
('svc_rbf', svm_rbf_classifier),
('gb', gb_classifier),
('dt', dt_classifier),
('rf', rf_classifier),
('ad', ad_classifier),
('gr', gr_classifier),
('xg', xg_classifier),],
voting='soft')
for clf in (lr_classifier,kn_classifier,svm_linear_classifier,svm_rbf_classifier,
gb_classifier, dt_classifier,rf_classifier, ad_classifier, gr_classifier, xg_classifier,
voting_classifier):
clf.fit(X_train,y_train)
y_pred = clf.predict(X_test)
print(clf.__class__.__name__, accuracy_score(y_test, y_pred))
# Predicting Test Set
y_pred = voting_classifier.predict(X_test)
acc = accuracy_score(y_test, y_pred)
prec = precision_score(y_test, y_pred)
rec = recall_score(y_test, y_pred)
f1 = f1_score(y_test, y_pred)
model_results = pd.DataFrame([['Ensemble Voting', acc, prec, rec, f1]],
columns = ['Model', 'Accuracy', 'Precision', 'Recall', 'F1 Score'])
results = results.append(model_results, ignore_index = True)
#The Best Classifier
print('The best classifier is:')
print('{}'.format(results.sort_values(by='Accuracy',ascending=False).head(5)))
#Applying K-fold validation
from sklearn.model_selection import cross_val_score
accuracies = cross_val_score(estimator=gr_classifier, X=X_train, y=y_train,cv=10)
accuracies.mean()
accuracies.std()
print("Gradient Boosting Accuracy: %0.3f (+/- %0.3f)" % (accuracies.mean(), accuracies.std() * 2))
#Plotting Cumulative Accuracy Profile (CAP)
y_pred_proba = classifier.predict_proba(X=X_test)
import matplotlib.pyplot as plt
from scipy import integrate
def capcurve(y_values, y_preds_proba):
num_pos_obs = np.sum(y_values)
num_count = len(y_values)
rate_pos_obs = float(num_pos_obs) / float(num_count)
ideal = pd.DataFrame({'x':[0,rate_pos_obs,1],'y':[0,1,1]})
xx = np.arange(num_count) / float(num_count - 1)
y_cap = np.c_[y_values,y_preds_proba]
y_cap_df_s = | pd.DataFrame(data=y_cap) | pandas.DataFrame |
import lightgbm as lgbm
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import datetime
from MLFeatureSelection import sequence_selection as ss
def score(pred, real): #评分系统,感谢herhert,返回s2
pred['index'] = np.arange(pred.shape[0]) + 1
pred['wi'] = 1 / (1 + np.log(pred['index']))
compare = | pd.merge(pred, real, how='left', on='user_id') | pandas.merge |
import sys
import pytz
import hashlib
import numpy as np
import pandas as pd
from datetime import datetime
def edit_form_link(link_text='Submit edits'):
"""Return HTML for link to form for edits"""
return f'<a href="https://docs.google.com/forms/d/e/1FAIpQLScw8EUGIOtUj994IYEM1W7PfBGV0anXjEmz_YKiKJc4fm-tTg/viewform">{link_text}</a>'
def add_google_analytics(input_html):
"""
Return HTML with Google Analytics block added
"""
ga_block = """
<!-- Global site tag (gtag.js) - Google Analytics -->
<script async src="https://www.googletagmanager.com/gtag/js?id=UA-173043454-1"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
gtag('js', new Date());
gtag('config', 'UA-173043454-1');
</script>
"""
output_html = input_html.replace('<!-- replace with google analytics -->', ga_block)
return output_html
def add_geojson(shape_gdf, field_name, field_value, input_html):
"""
Add a GeoJSON feature as a Javascript variable to an HTML string
This variable will be used to calculate the bounds of the map
"""
shape_row = shape_gdf[shape_gdf[field_name] == field_value].copy()
shape_geo = shape_row.geometry.iloc[0]
geo_bounds = shape_geo.boundary[0].xy
output_string = '[['
for idx, value in enumerate(geo_bounds[0]):
if idx > 0:
output_string += ','
output_string += '['
x = geo_bounds[0][idx]
output_string += '{}'.format(x)
y = geo_bounds[1][idx]
output_string += ', {}'.format(y)
output_string += ']\n'
output_string += ']]'
output_html = input_html.replace('REPLACE_WITH_XY', output_string)
return output_html
def dc_coordinates():
"""Return coordinates for a DC-wide map"""
dc_longitude = -77.016243706276569
dc_latitude = 38.894858329321485
dc_zoom_level = 10.3
return dc_longitude, dc_latitude, dc_zoom_level
def anc_names(anc_id):
"""
Return formatted ANC names
"""
ancs = pd.read_csv('data/ancs.csv')
anc_upper = 'ANC' + anc_id
anc_lower = anc_upper.lower()
anc_neighborhoods = ancs[ancs['anc_id'] == anc_id]['neighborhoods'].values[0]
return anc_upper, anc_lower, anc_neighborhoods
def assemble_divo():
"""
Return DataFrame with one row per SMD and various stats about each SMD's ranking
divo = district-votes
"""
results = pd.read_csv('data/results.csv')
districts = pd.read_csv('data/districts.csv')
votes_per_smd = pd.DataFrame(results.groupby('smd_id').votes.sum()).reset_index()
# Calculate number of SMDs in each Ward and ANC
smds_per_ward = pd.DataFrame(districts.groupby('ward').size(), columns=['smds_in_ward']).reset_index()
smds_per_anc = pd.DataFrame(districts.groupby('anc_id').size(), columns=['smds_in_anc']).reset_index()
divo = pd.merge(districts, votes_per_smd, how='inner', on='smd_id')
divo = pd.merge(divo, smds_per_ward, how='inner', on='ward')
divo = pd.merge(divo, smds_per_anc, how='inner', on='anc_id')
divo['smds_in_dc'] = len(districts)
# Rank each SMD by the number of votes recorded for ANC races within that SMD
# method = min: assigns the lowest rank when multiple rows are tied
divo['rank_dc'] = divo['votes'].rank(method='min', ascending=False)
divo['rank_ward'] = divo.groupby('ward').votes.rank(method='min', ascending=False)
divo['rank_anc'] = divo.groupby('anc_id').votes.rank(method='min', ascending=False)
# Create strings showing the ranking of each SMD within its ANC, Ward, and DC-wide
divo['string_dc'] = divo.apply(
lambda row: f"{make_ordinal(row['rank_dc'])} out of {row['smds_in_dc']} SMDs", axis=1)
divo['string_ward'] = divo.apply(
lambda row: f"{make_ordinal(row['rank_ward'])} out of {row['smds_in_ward']} SMDs", axis=1)
divo['string_anc'] = divo.apply(
lambda row: f"{make_ordinal(row['rank_anc'])} out of {row['smds_in_anc']} SMDs", axis=1)
average_votes_in_dc = divo.votes.mean()
average_votes_by_ward = divo.groupby('ward').votes.mean()
average_votes_by_anc = divo.groupby('anc_id').votes.mean()
return divo
def list_commissioners(status=None, date_point=None):
"""
Return dataframe with list of commissioners by status
Options:
status=None (all statuses returned) -- default
status='former'
status='current'
status='future'
date_point=None -- all statuses calculated from current DC time (default)
date_point=(some other datetime) -- all statuses calculated from that datetime
"""
commissioners = pd.read_csv('data/commissioners.csv')
if not date_point:
tz = pytz.timezone('America/New_York')
date_point = datetime.now(tz)
commissioners['start_date'] = pd.to_datetime(commissioners['start_date']).dt.tz_localize(tz='America/New_York')
commissioners['end_date'] = pd.to_datetime(commissioners['end_date']).dt.tz_localize(tz='America/New_York')
# Create combined field with start and end dates, showing ambiguity
commissioners['start_date_str'] = commissioners['start_date'].dt.strftime('%B %-d, %Y')
commissioners['end_date_str'] = commissioners['end_date'].dt.strftime('%B %-d, %Y')
# We don't have exact dates when these commissioners started, so show "circa 2019"
commissioners.loc[commissioners['start_date_str'] == 'January 2, 2019', 'start_date_str'] = '~2019'
# Combine start and end dates into one field
commissioners['term_in_office'] = commissioners['start_date_str'] + ' to ' + commissioners['end_date_str']
commissioners['is_former'] = commissioners.end_date < date_point
commissioners['is_current'] = (commissioners.start_date < date_point) & (date_point < commissioners.end_date)
commissioners['is_future'] = date_point < commissioners.start_date
# Test here that there is, at most, one "Current" and one "Future" commissioner per SMD.
# Multiple "Former" commissioners is allowed
smd_count = commissioners.groupby('smd_id')[['is_former', 'is_current', 'is_future']].sum().astype(int)
# smd_count.to_csv('smd_commissioner_count.csv')
if smd_count['is_current'].max() > 1 or smd_count['is_future'].max() > 1:
raise Exception('Too many commissioners per SMD')
if status:
commissioner_output = commissioners[commissioners['is_' + status]].copy()
else:
commissioner_output = commissioners.copy()
return commissioner_output
def build_results_candidate_people():
"""
Return DataFrame containing results, candidates, and people joined
"""
people = pd.read_csv('data/people.csv')
candidates = pd.read_csv('data/candidates.csv')
results = pd.read_csv('data/results.csv')
results_candidates = pd.merge(
results #[['candidate_id', 'person_id', 'smd_id']]
, candidates #[['candidate_id']]
, how='left'
, on=['candidate_id', 'smd_id']
)
rcp = pd.merge(results_candidates, people, how='left', on='person_id') # results-candidates-people
# Determine who were incumbent candidates at the time of the election
election_date = datetime(2020, 11, 3, tzinfo=pytz.timezone('America/New_York'))
commissioners = list_commissioners(status=None)
incumbents = commissioners[(commissioners.start_date < election_date) & (election_date < commissioners.end_date)]
incumbent_candidates = pd.merge(incumbents, candidates, how='inner', on='person_id')
incumbent_candidates['is_incumbent'] = True
rcp = pd.merge(rcp, incumbent_candidates[['candidate_id', 'is_incumbent']], how='left', on='candidate_id')
rcp['is_incumbent'] = rcp['is_incumbent'].fillna(False)
# Sort by SMD ascenting, Votes descending
rcp = rcp.sort_values(by=['smd_id', 'votes'], ascending=[True, False])
# Placeholder name for all write-in candidates.
# We do not know the combination of name and vote count for write-in candidates
# We only know the name of the write-in winners
rcp['full_name'] = rcp['full_name'].fillna('Write-ins combined')
rcp['write_in_winner_int'] = rcp['write_in_winner'].astype(int)
return rcp
def build_district_comm_commelect():
"""
Build DataFrame showing commissioner and commissioner-elect for every district
"""
districts = pd.read_csv('data/districts.csv')
commissioners = list_commissioners(status=None)
people = pd.read_csv('data/people.csv')
cp = pd.merge(commissioners, people, how='inner', on='person_id')
# left join to both current commissioners and commissioners-elect
cp_current = pd.merge(districts, cp.loc[cp['is_current'], ['smd_id', 'person_id', 'full_name']], how='left', on='smd_id')
cp_current = cp_current.rename(columns={'full_name': 'current_commissioner', 'person_id': 'current_person_id'})
cp_current_future = pd.merge(cp_current, cp.loc[cp['is_future'], ['smd_id', 'person_id', 'full_name']], how='left', on='smd_id')
cp_current_future = cp_current_future.rename(columns={'full_name': 'commissioner_elect', 'person_id': 'future_person_id'})
# If there is not a current commissioner for the SMD, mark the row as "vacant"
cp_current_future['current_commissioner'] = cp_current_future['current_commissioner'].fillna('(vacant)')
return cp_current_future
def build_smd_html_table(list_of_smds, link_path=''):
"""
Return an HTML table with one row per district for a given list of SMDs
Contains current commissioner and all candidates with number of votes
"""
rcp = build_results_candidate_people()
# Bold the winners in this text field
# results_field = 'Candidates and Results (Winner in Bold)'
# rcp[results_field] = rcp.apply(
# lambda row:
# '<strong>{} ({:,.0f} votes)</strong>'.format(row['full_name'], row['votes'])
# if row['winner']
# else '{} ({:,.0f} votes)'.format(row['full_name'], row['votes'])
# , axis=1
# )
results_field = 'Candidates and Results'
rcp[results_field] = rcp.apply(
lambda row: '{} ({:,.0f} votes)'.format(row['full_name'], row['votes'])
, axis=1
)
# Aggregate results by SMD
district_results = rcp.groupby('smd_id').agg({
'votes': sum
, results_field: lambda x: ', '.join(x)
, 'write_in_winner_int': sum
})
total_votes_display_name = 'ANC Votes'
district_results[total_votes_display_name] = district_results['votes']
max_votes_for_bar_chart = district_results[total_votes_display_name].max()
district_comm_commelect = build_district_comm_commelect()
dcp_results = pd.merge(district_comm_commelect, district_results, how='left', on='smd_id')
display_df = dcp_results[dcp_results['smd_id'].isin(list_of_smds)].copy()
display_df['SMD'] = (
f'<a href="{link_path}' + display_df['smd_id'].str.replace('smd_','').str.lower() + '.html">'
+ display_df['smd_id'].str.replace('smd_','') + '</a>'
)
display_df['Current Commissioner'] = display_df['current_commissioner']
display_df['Commissioner-Elect'] = display_df['commissioner_elect']
# Append "write-in" to Commissioners-Elect who were write-in candidates
display_df.loc[display_df['write_in_winner_int'] == 1, 'Commissioner-Elect'] = display_df.loc[display_df['write_in_winner_int'] == 1, 'Commissioner-Elect'] + ' (write-in)'
columns_to_html = ['SMD', 'Current Commissioner']
css_uuid = hashlib.sha224(display_df[columns_to_html].to_string().encode()).hexdigest() + '_'
html = (
display_df[columns_to_html]
.fillna('')
.style
# .set_properties(
# subset=[results_field]
# , **{
# 'text-align': 'left'
# , 'width': '700px'
# , 'height': '45px'
# }
# )
# .set_properties(
# subset=[total_votes_display_name]
# , **{'text-align': 'left'}
# )
.set_properties(
subset=['Current Commissioner']
, **{'width': '230px', 'text-align': 'left'} # 230px fits the longest commissioner name on one row
) # why is the width in pixels so different between these columns?
# .format({total_votes_display_name: '{:,.0f}'})
# .bar(
# subset=[total_votes_display_name]
# , color='#cab2d6' # light purple
# , vmin=0
# , vmax=3116
# )
.set_uuid(css_uuid)
.hide_index()
.render()
)
return html
def build_smd_html_table_candidates(list_of_smds, link_path=''):
"""
Return an HTML table with one row per district for a given list of SMDs
Contains current commissioner and all candidates by status
"""
districts = pd.read_csv('data/districts.csv')
commissioners = list_commissioners(status='current')
people = pd.read_csv('data/people.csv')
candidates = pd.read_csv('data/candidates.csv')
candidate_statuses = pd.read_csv('data/candidate_statuses.csv')
dc = pd.merge(districts, commissioners, how='left', on='smd_id')
dcp = pd.merge(dc, people, how='left', on='person_id')
cp = pd.merge(candidates, people, how='inner', on='person_id')
cpd = pd.merge(cp, districts, how='inner', on='smd_id')
cpds = pd.merge(cpd, candidate_statuses, how='inner', on='candidate_status')
dcp['Current Commissioner'] = dcp['full_name'].fillna('(vacant)')
display_df = dcp[dcp['smd_id'].isin(list_of_smds)].copy()
display_df['SMD'] = (
f'<a href="{link_path}' + display_df['smd_id'].str.replace('smd_','').str.lower() + '.html">'
+ display_df['smd_id'].str.replace('smd_','') + '</a>'
)
# Number of candidates in each SMD
# todo: make this a function
cps = pd.merge(cp, candidate_statuses, how='inner', on='candidate_status')
# Only include active candidates
district_candidates = pd.merge(districts, cps[cps['count_as_candidate']].copy(), how='left', on='smd_id')
candidate_count = pd.DataFrame(district_candidates.groupby('smd_id')['candidate_id'].count()).reset_index()
candidate_count.rename(columns={'candidate_id': 'Number of Candidates'}, inplace=True)
display_df = pd.merge(display_df, candidate_count, how='inner', on='smd_id')
columns_to_html = ['SMD', 'Current Commissioner', 'Number of Candidates']
cpds['order_status'] = cpds['display_order'].astype(str) + ';' + cpds['candidate_status']
candidates_in_smds = cpds[cpds['smd_id'].isin(list_of_smds)].copy()
statuses_in_smds = sorted(candidates_in_smds['order_status'].unique())
for status in statuses_in_smds:
status_name = status[status.find(';')+1:]
columns_to_html += [status_name]
cs_df = candidates_in_smds[candidates_in_smds['order_status'] == status][['smd_id', 'full_name']].copy()
cs_smd = cs_df.groupby('smd_id').agg({'full_name': list}).reset_index()
cs_smd[status_name] = cs_smd['full_name'].apply(lambda row: ', '.join(row))
display_df = pd.merge(display_df, cs_smd, how='left', on='smd_id')
html = (
display_df[columns_to_html]
.fillna('')
.style
.set_uuid('smd_')
.hide_index()
.render()
)
return html
def build_district_list(smd_id_list=None, level=0):
"""
Bulleted list of districts and current commmissioners
If smd_id_list is None, all districts are returned
If smd_id_list is a list, those SMDs are returned
link level:
0: homepage
1: ANC page
2: SMD page
"""
districts = pd.read_csv('data/districts.csv')
commissioners = list_commissioners(status='current')
people = | pd.read_csv('data/people.csv') | pandas.read_csv |
from django.db import models
import pandas as pd
class Campaign(models.Model):
name = models.CharField(max_length=25)
description = models.TextField(max_length=300)
latitude = models.DecimalField(max_digits=11, decimal_places=6)
longitude = models.DecimalField(max_digits=11, decimal_places=6)
picture = models.ImageField(upload_to='pictures/')
file = models.FileField(upload_to='files/')
raw_data_path = models.CharField(max_length=100, blank=True, null=True)
raw_var_list = models.TextField(max_length=150, blank=True, null=True)
raw_dtypes = models.TextField(max_length=150, blank=True, null=True)
start_date = models.DateField(max_length=20, blank=True, null=True)
end_date = models.DateField(max_length=20, blank=True, null=True)
var1 = models.CharField(max_length=10, blank=True, null=True)
var2 = models.CharField(max_length=10, blank=True, null=True)
var_list = models.TextField(max_length=150, blank=True, null=True)
def save(self, *args, **kwargs):
df = | pd.read_csv(self.file) | pandas.read_csv |
# Copyright 2019-2020 The Lux Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .context import lux
import pytest
import random
import pandas as pd
import warnings
# Suite of test that checks if data_type inferred correctly by Lux
def test_check_cars():
lux.config.set_SQL_connection("")
df = pd.read_csv("lux/data/car.csv")
df.maintain_metadata()
assert df.data_type["Name"] == "nominal"
assert df.data_type["MilesPerGal"] == "quantitative"
assert df.data_type["Cylinders"] == "nominal"
assert df.data_type["Displacement"] == "quantitative"
assert df.data_type["Horsepower"] == "quantitative"
assert df.data_type["Weight"] == "quantitative"
assert df.data_type["Acceleration"] == "quantitative"
assert df.data_type["Year"] == "temporal"
assert df.data_type["Origin"] == "nominal"
def test_check_int_id():
df = pd.read_csv(
"https://github.com/lux-org/lux-datasets/blob/master/data/instacart_sample.csv?raw=true"
)
df._ipython_display_()
inverted_data_type = lux.config.executor.invert_data_type(df.data_type)
assert len(inverted_data_type["id"]) == 3
assert (
"<code>order_id</code>, <code>product_id</code>, <code>user_id</code> is not visualized since it resembles an ID field."
in df._message.to_html()
)
def test_check_str_id():
df = pd.read_csv("https://github.com/lux-org/lux-datasets/blob/master/data/churn.csv?raw=true")
df._ipython_display_()
assert (
"<code>customerID</code> is not visualized since it resembles an ID field.</li>"
in df._message.to_html()
)
def test_check_hpi():
df = pd.read_csv("https://github.com/lux-org/lux-datasets/blob/master/data/hpi.csv?raw=true")
df.maintain_metadata()
assert df.data_type == {
"HPIRank": "quantitative",
"Country": "geographical",
"SubRegion": "nominal",
"AverageLifeExpectancy": "quantitative",
"AverageWellBeing": "quantitative",
"HappyLifeYears": "quantitative",
"Footprint": "quantitative",
"InequalityOfOutcomes": "quantitative",
"InequalityAdjustedLifeExpectancy": "quantitative",
"InequalityAdjustedWellbeing": "quantitative",
"HappyPlanetIndex": "quantitative",
"GDPPerCapita": "quantitative",
"Population": "quantitative",
}
def test_check_airbnb():
df = pd.read_csv("https://github.com/lux-org/lux-datasets/blob/master/data/airbnb_nyc.csv?raw=true")
df.maintain_metadata()
assert df.data_type == {
"id": "id",
"name": "nominal",
"host_id": "id",
"host_name": "nominal",
"neighbourhood_group": "nominal",
"neighbourhood": "nominal",
"latitude": "quantitative",
"longitude": "quantitative",
"room_type": "nominal",
"price": "quantitative",
"minimum_nights": "quantitative",
"number_of_reviews": "quantitative",
"last_review": "temporal",
"reviews_per_month": "quantitative",
"calculated_host_listings_count": "quantitative",
"availability_365": "quantitative",
}
def test_check_airports():
df = pd.read_csv(
"https://raw.githubusercontent.com/altair-viz/vega_datasets/master/vega_datasets/_data/airports.csv"
)
df.maintain_metadata()
assert df.data_type == {
"iata": "id",
"name": "nominal",
"city": "nominal",
"state": "geographical",
"country": "geographical",
"latitude": "quantitative",
"longitude": "quantitative",
}
def test_check_datetime():
df = pd.DataFrame(
{
"a": ["2020-01-01"],
"b": ["20-01-01"],
"c": ["20-jan-01"],
"d": ["20-january-01"],
"e": ["2020 January 01"],
"f": ["2020 January 01 00:00:00 pm PT"],
"g": ["2020 January 01 13:00:00"],
"h": ["2020 January 01 23:59:59 GTC-6"],
}
)
df.maintain_metadata()
assert df.data_type == {
"a": "temporal",
"b": "temporal",
"c": "temporal",
"d": "temporal",
"e": "temporal",
"f": "temporal",
"g": "temporal",
"h": "temporal",
}
def test_check_datetime_numeric_values():
car_df = | pd.read_csv("lux/data/car.csv") | pandas.read_csv |
from unittest.mock import patch
import pandas as pd
import pytest
from powersimdata.tests.mock_scenario import MockScenario
from postreise.plot.plot_bar_generation_vs_capacity import (
_get_bar_display_val,
make_gen_cap_custom_data,
plot_bar_generation_vs_capacity,
)
mock_plant = {
"plant_id": ["A", "B", "C", "D", "E", "F", "G", "H"],
"zone_id": [301, 302, 303, 304, 305, 306, 307, 308],
"Pmax": [100, 75, 150, 30, 50, 300, 200, 80],
"Pmin": [0, 0, 0, 0, 0, 100, 10, 0],
"type": ["solar", "wind", "solar", "wind", "wind", "solar", "solar", "wind"],
"zone_name": [
"Far West",
"North",
"West",
"South",
"North Central",
"South Central",
"Coast",
"East",
],
}
mock_solar = pd.DataFrame(
{
"A": [95, 95, 96, 94],
"C": [140, 135, 136, 144],
"F": [299, 298, 299, 298],
"G": [195, 195, 193, 199],
},
index=pd.date_range(start="2016-01-01", periods=4, freq="H"),
)
mock_wind = pd.DataFrame(
{
"B": [70, 71, 70, 72],
"D": [29, 29, 29, 29],
"E": [40, 39, 38, 41],
"H": [71, 74, 68, 69],
},
index=pd.date_range(start="2016-01-01", periods=4, freq="H"),
)
mock_pg = pd.DataFrame(
{
"A": [80, 75, 72, 81],
"B": [22, 22, 25, 20],
"C": [130, 130, 130, 130],
"D": [25, 26, 27, 28],
"E": [10, 11, 9, 12],
"F": [290, 295, 295, 294],
"G": [190, 190, 191, 190],
"H": [61, 63, 65, 67],
},
index=pd.date_range(start="2016-01-01", periods=4, freq="H"),
)
mock_demand = pd.DataFrame(
{
301: [1100, 1102, 1103, 1104],
302: [2344, 2343, 2342, 2341],
303: [3875, 3876, 3877, 3878],
304: [4924, 4923, 4922, 4921],
305: [400, 300, 200, 100],
306: [5004, 5003, 5002, 5001],
307: [2504, 2503, 2502, 2501],
308: [3604, 3603, 3602, 1],
},
index= | pd.date_range(start="2016-01-01", periods=4, freq="H") | pandas.date_range |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from pandas.api.types import CategoricalDtype
import pyspark.pandas as ps
from pyspark.testing.pandasutils import PandasOnSparkTestCase, TestUtils
class CategoricalTest(PandasOnSparkTestCase, TestUtils):
@property
def pdf(self):
return pd.DataFrame(
{
"a": pd.Categorical([1, 2, 3, 1, 2, 3]),
"b": pd.Categorical(
["b", "a", "c", "c", "b", "a"], categories=["c", "b", "d", "a"]
),
},
)
@property
def psdf(self):
return ps.from_pandas(self.pdf)
@property
def df_pair(self):
return self.pdf, self.psdf
def test_categorical_frame(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf, pdf)
self.assert_eq(psdf.a, pdf.a)
self.assert_eq(psdf.b, pdf.b)
self.assert_eq(psdf.index, pdf.index)
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psdf.sort_values("b"), pdf.sort_values("b"))
def test_categorical_series(self):
pser = pd.Series([1, 2, 3], dtype="category")
psser = ps.Series([1, 2, 3], dtype="category")
self.assert_eq(psser, pser)
self.assert_eq(psser.cat.categories, pser.cat.categories)
self.assert_eq(psser.cat.codes, pser.cat.codes)
self.assert_eq(psser.cat.ordered, pser.cat.ordered)
def test_categories_setter(self):
pdf, psdf = self.df_pair
pser = pdf.a
psser = psdf.a
pser.cat.categories = ["z", "y", "x"]
psser.cat.categories = ["z", "y", "x"]
self.assert_eq(pser, psser)
self.assert_eq(pdf, psdf)
with self.assertRaises(ValueError):
psser.cat.categories = [1, 2, 3, 4]
def test_add_categories(self):
pdf, psdf = self.df_pair
pser = pdf.a
psser = psdf.a
self.assert_eq(pser.cat.add_categories(4), psser.cat.add_categories(4))
self.assert_eq(pser.cat.add_categories([4, 5]), psser.cat.add_categories([4, 5]))
self.assert_eq(pser.cat.add_categories([]), psser.cat.add_categories([]))
pser.cat.add_categories(4, inplace=True)
psser.cat.add_categories(4, inplace=True)
self.assert_eq(pser, psser)
self.assert_eq(pdf, psdf)
self.assertRaises(ValueError, lambda: psser.cat.add_categories(4))
self.assertRaises(ValueError, lambda: psser.cat.add_categories([5, 5]))
def test_remove_categories(self):
pdf, psdf = self.df_pair
pser = pdf.a
psser = psdf.a
self.assert_eq(pser.cat.remove_categories(2), psser.cat.remove_categories(2))
self.assert_eq(pser.cat.remove_categories([1, 3]), psser.cat.remove_categories([1, 3]))
self.assert_eq(pser.cat.remove_categories([]), psser.cat.remove_categories([]))
self.assert_eq(pser.cat.remove_categories([2, 2]), psser.cat.remove_categories([2, 2]))
self.assert_eq(
pser.cat.remove_categories([1, 2, 3]), psser.cat.remove_categories([1, 2, 3])
)
self.assert_eq(pser.cat.remove_categories(None), psser.cat.remove_categories(None))
self.assert_eq(pser.cat.remove_categories([None]), psser.cat.remove_categories([None]))
pser.cat.remove_categories(2, inplace=True)
psser.cat.remove_categories(2, inplace=True)
self.assert_eq(pser, psser)
self.assert_eq(pdf, psdf)
self.assertRaises(ValueError, lambda: psser.cat.remove_categories(4))
self.assertRaises(ValueError, lambda: psser.cat.remove_categories([4, None]))
def test_remove_unused_categories(self):
pdf, psdf = self.df_pair
pser = pdf.a
psser = psdf.a
self.assert_eq(pser.cat.remove_unused_categories(), psser.cat.remove_unused_categories())
pser.cat.add_categories(4, inplace=True)
pser.cat.remove_categories(2, inplace=True)
psser.cat.add_categories(4, inplace=True)
psser.cat.remove_categories(2, inplace=True)
self.assert_eq(pser.cat.remove_unused_categories(), psser.cat.remove_unused_categories())
pser.cat.remove_unused_categories(inplace=True)
psser.cat.remove_unused_categories(inplace=True)
self.assert_eq(pser, psser)
self.assert_eq(pdf, psdf)
def test_as_ordered_unordered(self):
pdf, psdf = self.df_pair
pser = pdf.a
psser = psdf.a
# as_ordered
self.assert_eq(pser.cat.as_ordered(), psser.cat.as_ordered())
pser.cat.as_ordered(inplace=True)
psser.cat.as_ordered(inplace=True)
self.assert_eq(pser, psser)
self.assert_eq(pdf, psdf)
# as_unordered
self.assert_eq(pser.cat.as_unordered(), psser.cat.as_unordered())
pser.cat.as_unordered(inplace=True)
psser.cat.as_unordered(inplace=True)
self.assert_eq(pser, psser)
self.assert_eq(pdf, psdf)
def test_astype(self):
pser = pd.Series(["a", "b", "c"])
psser = ps.from_pandas(pser)
self.assert_eq(psser.astype("category"), pser.astype("category"))
self.assert_eq(
psser.astype(CategoricalDtype(["c", "a", "b"])),
pser.astype(CategoricalDtype(["c", "a", "b"])),
)
pcser = pser.astype(CategoricalDtype(["c", "a", "b"]))
kcser = psser.astype(CategoricalDtype(["c", "a", "b"]))
self.assert_eq(kcser.astype("category"), pcser.astype("category"))
if LooseVersion(pd.__version__) >= LooseVersion("1.2"):
self.assert_eq(
kcser.astype(CategoricalDtype(["b", "c", "a"])),
pcser.astype(CategoricalDtype(["b", "c", "a"])),
)
else:
self.assert_eq(
kcser.astype(CategoricalDtype(["b", "c", "a"])),
pser.astype(CategoricalDtype(["b", "c", "a"])),
)
self.assert_eq(kcser.astype(str), pcser.astype(str))
def test_factorize(self):
pser = pd.Series(["a", "b", "c", None], dtype=CategoricalDtype(["c", "a", "d", "b"]))
psser = ps.from_pandas(pser)
pcodes, puniques = pser.factorize()
kcodes, kuniques = psser.factorize()
self.assert_eq(kcodes.tolist(), pcodes.tolist())
self.assert_eq(kuniques, puniques)
pcodes, puniques = pser.factorize(na_sentinel=-2)
kcodes, kuniques = psser.factorize(na_sentinel=-2)
self.assert_eq(kcodes.tolist(), pcodes.tolist())
self.assert_eq(kuniques, puniques)
def test_frame_apply(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.apply(lambda x: x).sort_index(), pdf.apply(lambda x: x).sort_index())
self.assert_eq(
psdf.apply(lambda x: x, axis=1).sort_index(),
pdf.apply(lambda x: x, axis=1).sort_index(),
)
def test_frame_apply_without_shortcut(self):
with ps.option_context("compute.shortcut_limit", 0):
self.test_frame_apply()
pdf = pd.DataFrame(
{"a": ["a", "b", "c", "a", "b", "c"], "b": ["b", "a", "c", "c", "b", "a"]}
)
psdf = ps.from_pandas(pdf)
dtype = CategoricalDtype(categories=["a", "b", "c"])
def categorize(ser) -> ps.Series[dtype]:
return ser.astype(dtype)
self.assert_eq(
psdf.apply(categorize).sort_values(["a", "b"]).reset_index(drop=True),
pdf.apply(categorize).sort_values(["a", "b"]).reset_index(drop=True),
)
def test_frame_transform(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.transform(lambda x: x), pdf.transform(lambda x: x))
self.assert_eq(psdf.transform(lambda x: x.cat.codes), pdf.transform(lambda x: x.cat.codes))
pdf = pd.DataFrame(
{"a": ["a", "b", "c", "a", "b", "c"], "b": ["b", "a", "c", "c", "b", "a"]}
)
psdf = ps.from_pandas(pdf)
dtype = CategoricalDtype(categories=["a", "b", "c", "d"])
self.assert_eq(
psdf.transform(lambda x: x.astype(dtype)).sort_index(),
pdf.transform(lambda x: x.astype(dtype)).sort_index(),
)
def test_frame_transform_without_shortcut(self):
with ps.option_context("compute.shortcut_limit", 0):
self.test_frame_transform()
pdf, psdf = self.df_pair
def codes(pser) -> ps.Series[np.int8]:
return pser.cat.codes
self.assert_eq(psdf.transform(codes), pdf.transform(codes))
pdf = pd.DataFrame(
{"a": ["a", "b", "c", "a", "b", "c"], "b": ["b", "a", "c", "c", "b", "a"]}
)
psdf = ps.from_pandas(pdf)
dtype = CategoricalDtype(categories=["a", "b", "c", "d"])
def to_category(pser) -> ps.Series[dtype]:
return pser.astype(dtype)
self.assert_eq(
psdf.transform(to_category).sort_index(), pdf.transform(to_category).sort_index()
)
def test_series_apply(self):
pdf, psdf = self.df_pair
self.assert_eq(
psdf.a.apply(lambda x: x).sort_index(), pdf.a.apply(lambda x: x).sort_index()
)
def test_series_apply_without_shortcut(self):
with ps.option_context("compute.shortcut_limit", 0):
self.test_series_apply()
pdf, psdf = self.df_pair
ret = psdf.a.dtype
def identity(pser) -> ret:
return pser
self.assert_eq(psdf.a.apply(identity).sort_index(), pdf.a.apply(identity).sort_index())
# TODO: The return type is still category.
# def to_str(x) -> str:
# return str(x)
#
# self.assert_eq(
# psdf.a.apply(to_str).sort_index(), pdf.a.apply(to_str).sort_index()
# )
def test_groupby_apply(self):
pdf, psdf = self.df_pair
self.assert_eq(
psdf.groupby("a").apply(lambda df: df).sort_index(),
pdf.groupby("a").apply(lambda df: df).sort_index(),
)
self.assert_eq(
psdf.groupby("b").apply(lambda df: df[["a"]]).sort_index(),
pdf.groupby("b").apply(lambda df: df[["a"]]).sort_index(),
)
self.assert_eq(
psdf.groupby(["a", "b"]).apply(lambda df: df).sort_index(),
pdf.groupby(["a", "b"]).apply(lambda df: df).sort_index(),
)
self.assert_eq(
psdf.groupby("a").apply(lambda df: df.b.cat.codes).sort_index(),
pdf.groupby("a").apply(lambda df: df.b.cat.codes).sort_index(),
)
self.assert_eq(
psdf.groupby("a")["b"].apply(lambda b: b.cat.codes).sort_index(),
pdf.groupby("a")["b"].apply(lambda b: b.cat.codes).sort_index(),
)
# TODO: grouping by a categorical type sometimes preserves unused categories.
# self.assert_eq(
# psdf.groupby("a").apply(len).sort_index(), pdf.groupby("a").apply(len).sort_index(),
# )
def test_groupby_apply_without_shortcut(self):
with ps.option_context("compute.shortcut_limit", 0):
self.test_groupby_apply()
pdf, psdf = self.df_pair
def identity(df) -> ps.DataFrame[zip(psdf.columns, psdf.dtypes)]:
return df
self.assert_eq(
psdf.groupby("a").apply(identity).sort_values(["a", "b"]).reset_index(drop=True),
pdf.groupby("a").apply(identity).sort_values(["a", "b"]).reset_index(drop=True),
)
def test_groupby_transform(self):
pdf, psdf = self.df_pair
self.assert_eq(
psdf.groupby("a").transform(lambda x: x).sort_index(),
pdf.groupby("a").transform(lambda x: x).sort_index(),
)
dtype = | CategoricalDtype(categories=["a", "b", "c", "d"]) | pandas.api.types.CategoricalDtype |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scheduler.GOBI import GOBIScheduler
plt.style.use(['science'])
plt.rcParams["text.usetex"] = False
class Stats():
def __init__(self, Environment, WorkloadModel, Datacenter, Scheduler):
self.env = Environment
self.env.stats = self
self.workload = WorkloadModel
self.datacenter = Datacenter
self.scheduler = Scheduler
self.simulated_scheduler = GOBIScheduler('energy_latency_'+str(self.datacenter.num_hosts))
self.simulated_scheduler.env = self.env
self.initStats()
def initStats(self):
self.hostinfo = []
self.workloadinfo = []
self.activecontainerinfo = []
self.allcontainerinfo = []
self.metrics = []
self.schedulerinfo = []
def saveHostInfo(self):
hostinfo = dict()
hostinfo['interval'] = self.env.interval
hostinfo['cpu'] = [host.getCPU() for host in self.env.hostlist]
hostinfo['numcontainers'] = [len(self.env.getContainersOfHost(i)) for i,host in enumerate(self.env.hostlist)]
hostinfo['power'] = [host.getPower() for host in self.env.hostlist]
hostinfo['baseips'] = [host.getBaseIPS() for host in self.env.hostlist]
hostinfo['ipsavailable'] = [host.getIPSAvailable() for host in self.env.hostlist]
hostinfo['ipscap'] = [host.ipsCap for host in self.env.hostlist]
hostinfo['apparentips'] = [host.getApparentIPS() for host in self.env.hostlist]
hostinfo['ram'] = [host.getCurrentRAM() for host in self.env.hostlist]
hostinfo['ramavailable'] = [host.getRAMAvailable() for host in self.env.hostlist]
hostinfo['disk'] = [host.getCurrentDisk() for host in self.env.hostlist]
hostinfo['diskavailable'] = [host.getDiskAvailable() for host in self.env.hostlist]
self.hostinfo.append(hostinfo)
def saveWorkloadInfo(self, deployed, migrations):
workloadinfo = dict()
workloadinfo['interval'] = self.env.interval
workloadinfo['totalcontainers'] = len(self.workload.createdContainers)
if self.workloadinfo:
workloadinfo['newcontainers'] = workloadinfo['totalcontainers'] - self.workloadinfo[-1]['totalcontainers']
else:
workloadinfo['newcontainers'] = workloadinfo['totalcontainers']
workloadinfo['deployed'] = len(deployed)
workloadinfo['migrations'] = len(migrations)
workloadinfo['inqueue'] = len(self.workload.getUndeployedContainers())
self.workloadinfo.append(workloadinfo)
def saveContainerInfo(self):
containerinfo = dict()
containerinfo['interval'] = self.env.interval
containerinfo['activecontainers'] = self.env.getNumActiveContainers()
containerinfo['ips'] = [(c.getBaseIPS() if c else 0) for c in self.env.containerlist]
containerinfo['apparentips'] = [(c.getApparentIPS() if c else 0) for c in self.env.containerlist]
containerinfo['ram'] = [(c.getRAM() if c else 0) for c in self.env.containerlist]
containerinfo['disk'] = [(c.getDisk() if c else 0) for c in self.env.containerlist]
containerinfo['creationids'] = [(c.creationID if c else -1) for c in self.env.containerlist]
containerinfo['hostalloc'] = [(c.getHostID() if c else -1) for c in self.env.containerlist]
containerinfo['active'] = [(c.active if c else False) for c in self.env.containerlist]
self.activecontainerinfo.append(containerinfo)
def saveAllContainerInfo(self):
containerinfo = dict()
allCreatedContainers = [self.env.getContainerByCID(cid) for cid in list(np.where(self.workload.deployedContainers)[0])]
containerinfo['interval'] = self.env.interval
if self.datacenter.__class__.__name__ == 'Datacenter':
containerinfo['application'] = [self.env.getContainerByCID(cid).application for cid in list(np.where(self.workload.deployedContainers)[0])]
containerinfo['ips'] = [(c.getBaseIPS() if c.active else 0) for c in allCreatedContainers]
containerinfo['create'] = [(c.createAt) for c in allCreatedContainers]
containerinfo['start'] = [(c.startAt) for c in allCreatedContainers]
containerinfo['destroy'] = [(c.destroyAt) for c in allCreatedContainers]
containerinfo['apparentips'] = [(c.getApparentIPS() if c.active else 0) for c in allCreatedContainers]
containerinfo['ram'] = [(c.getRAM() if c.active else 0) for c in allCreatedContainers]
containerinfo['disk'] = [(c.getDisk() if c.active else 0) for c in allCreatedContainers]
containerinfo['hostalloc'] = [(c.getHostID() if c.active else -1) for c in allCreatedContainers]
containerinfo['active'] = [(c.active) for c in allCreatedContainers]
self.allcontainerinfo.append(containerinfo)
def saveMetrics(self, destroyed, migrations):
metrics = dict()
metrics['interval'] = self.env.interval
metrics['numdestroyed'] = len(destroyed)
metrics['nummigrations'] = len(migrations)
metrics['energy'] = [host.getPower()*self.env.intervaltime for host in self.env.hostlist]
metrics['energytotalinterval'] = np.sum(metrics['energy'])
metrics['energypercontainerinterval'] = np.sum(metrics['energy'])/self.env.getNumActiveContainers()
metrics['responsetime'] = [c.totalExecTime + c.totalMigrationTime for c in destroyed]
metrics['avgresponsetime'] = np.average(metrics['responsetime']) if len(destroyed) > 0 else 0
metrics['migrationtime'] = [c.totalMigrationTime for c in destroyed]
metrics['avgmigrationtime'] = np.average(metrics['migrationtime']) if len(destroyed) > 0 else 0
metrics['slaviolations'] = len(np.where([c.destroyAt > c.sla for c in destroyed]))
metrics['slaviolationspercentage'] = metrics['slaviolations'] * 100.0 / len(destroyed) if len(destroyed) > 0 else 0
metrics['waittime'] = [c.startAt - c.createAt for c in destroyed]
metrics['energytotalinterval_pred'], metrics['avgresponsetime_pred'] = self.runSimulationGOBI()
self.metrics.append(metrics)
def saveSchedulerInfo(self, selectedcontainers, decision, schedulingtime):
schedulerinfo = dict()
schedulerinfo['interval'] = self.env.interval
schedulerinfo['selection'] = selectedcontainers
schedulerinfo['decision'] = decision
schedulerinfo['schedule'] = [(c.id, c.getHostID()) if c else (None, None) for c in self.env.containerlist]
schedulerinfo['schedulingtime'] = schedulingtime
if self.datacenter.__class__.__name__ == 'Datacenter':
schedulerinfo['migrationTime'] = self.env.intervalAllocTimings[-1]
self.schedulerinfo.append(schedulerinfo)
def saveStats(self, deployed, migrations, destroyed, selectedcontainers, decision, schedulingtime):
self.saveHostInfo()
self.saveWorkloadInfo(deployed, migrations)
self.saveContainerInfo()
self.saveAllContainerInfo()
self.saveMetrics(destroyed, migrations)
self.saveSchedulerInfo(selectedcontainers, decision, schedulingtime)
def runSimpleSimulation(self, decision):
host_alloc = []; container_alloc = [-1] * len(self.env.hostlist)
for i in range(len(self.env.hostlist)):
host_alloc.append([])
for c in self.env.containerlist:
if c and c.getHostID() != -1:
host_alloc[c.getHostID()].append(c.id)
container_alloc[c.id] = c.getHostID()
decision = self.simulated_scheduler.filter_placement(decision)
for cid, hid in decision:
if self.env.getPlacementPossible(cid, hid) and container_alloc[cid] != -1:
host_alloc[container_alloc[cid]].remove(cid)
host_alloc[hid].append(cid)
energytotalinterval_pred = 0
for hid, cids in enumerate(host_alloc):
ips = 0
for cid in cids: ips += self.env.containerlist[cid].getApparentIPS()
energytotalinterval_pred += self.env.hostlist[hid].getPowerFromIPS(ips)
return energytotalinterval_pred*self.env.intervaltime, max(0, np.mean([metric_d['avgresponsetime'] for metric_d in self.metrics[-5:]]))
def runSimulationGOBI(self):
host_alloc = []; container_alloc = [-1] * len(self.env.hostlist)
for i in range(len(self.env.hostlist)):
host_alloc.append([])
for c in self.env.containerlist:
if c and c.getHostID() != -1:
host_alloc[c.getHostID()].append(c.id)
container_alloc[c.id] = c.getHostID()
selected = self.simulated_scheduler.selection()
decision = self.simulated_scheduler.filter_placement(self.simulated_scheduler.placement(selected))
for cid, hid in decision:
if self.env.getPlacementPossible(cid, hid) and container_alloc[cid] != -1:
host_alloc[container_alloc[cid]].remove(cid)
host_alloc[hid].append(cid)
energytotalinterval_pred = 0
for hid, cids in enumerate(host_alloc):
ips = 0
for cid in cids: ips += self.env.containerlist[cid].getApparentIPS()
energytotalinterval_pred += self.env.hostlist[hid].getPowerFromIPS(ips)
return energytotalinterval_pred*self.env.intervaltime, max(0, np.mean([metric_d['avgresponsetime'] for metric_d in self.metrics[-5:]]))
########################################################################################################
def generateGraphsWithInterval(self, dirname, listinfo, obj, metric, metric2=None):
fig, axes = plt.subplots(len(listinfo[0][metric]), 1, sharex=True, figsize=(4, 0.5*len(listinfo[0][metric])))
title = obj + '_' + metric + '_with_interval'
totalIntervals = len(listinfo)
x = list(range(totalIntervals))
metric_with_interval = []; metric2_with_interval = []
ylimit = 0; ylimit2 = 0
for hostID in range(len(listinfo[0][metric])):
metric_with_interval.append([listinfo[interval][metric][hostID] for interval in range(totalIntervals)])
ylimit = max(ylimit, max(metric_with_interval[-1]))
if metric2:
metric2_with_interval.append([listinfo[interval][metric2][hostID] for interval in range(totalIntervals)])
ylimit2 = max(ylimit2, max(metric2_with_interval[-1]))
for hostID in range(len(listinfo[0][metric])):
axes[hostID].set_ylim(0, max(ylimit, ylimit2))
axes[hostID].plot(x, metric_with_interval[hostID])
if metric2:
axes[hostID].plot(x, metric2_with_interval[hostID])
axes[hostID].set_ylabel(obj[0].capitalize()+" "+str(hostID))
axes[hostID].grid(b=True, which='both', color='#eeeeee', linestyle='-')
plt.tight_layout(pad=0)
plt.savefig(dirname + '/' + title + '.pdf')
def generateMetricsWithInterval(self, dirname):
fig, axes = plt.subplots(9, 1, sharex=True, figsize=(4, 5))
x = list(range(len(self.metrics)))
res = {}
for i,metric in enumerate(['numdestroyed', 'nummigrations', 'energytotalinterval', 'avgresponsetime',\
'avgmigrationtime', 'slaviolations', 'slaviolationspercentage', 'waittime', 'energypercontainerinterval']):
metric_with_interval = [self.metrics[i][metric] for i in range(len(self.metrics))] if metric != 'waittime' else \
[sum(self.metrics[i][metric]) for i in range(len(self.metrics))]
axes[i].plot(x, metric_with_interval)
axes[i].set_ylabel(metric, fontsize=5)
axes[i].grid(b=True, which='both', color='#eeeeee', linestyle='-')
res[metric] = sum(metric_with_interval)
print("Summation ", metric, " = ", res[metric])
print('Average energy (sum energy interval / sum numdestroyed) = ', res['energytotalinterval']/res['numdestroyed'])
plt.tight_layout(pad=0)
plt.savefig(dirname + '/' + 'Metrics' + '.pdf')
def generateWorkloadWithInterval(self, dirname):
fig, axes = plt.subplots(5, 1, sharex=True, figsize=(4, 5))
x = list(range(len(self.workloadinfo)))
for i, metric in enumerate(['totalcontainers', 'newcontainers', 'deployed', 'migrations', 'inqueue']):
metric_with_interval = [self.workloadinfo[i][metric] for i in range(len(self.workloadinfo))]
axes[i].plot(x, metric_with_interval)
axes[i].set_ylabel(metric)
axes[i].grid(b=True, which='both', color='#eeeeee', linestyle='-')
plt.tight_layout(pad=0)
plt.savefig(dirname + '/' + 'Workload' + '.pdf')
########################################################################################################
def generateCompleteDataset(self, dirname, data, name):
title = name + '_with_interval'
metric_with_interval = []
headers = list(data[0].keys())
for datum in data:
metric_with_interval.append([datum[value] for value in datum.keys()])
df = pd.DataFrame(metric_with_interval, columns=headers)
df.to_csv(dirname + '/' + title + '.csv', index=False)
def generateDatasetWithInterval(self, dirname, metric, objfunc, metric2=None, objfunc2=None):
title = metric + '_' + (metric2 + '_' if metric2 else "") + (objfunc + '_' if objfunc else "") + (objfunc2 + '_' if objfunc2 else "") + 'with_interval'
totalIntervals = len(self.hostinfo)
metric_with_interval = []; metric2_with_interval = [] # metric1 is of host and metric2 is of containers
host_alloc_with_interval = []; objfunc2_with_interval = []
objfunc_with_interval = []
for interval in range(totalIntervals-1):
metric_with_interval.append([self.hostinfo[interval][metric][hostID] for hostID in range(len(self.hostinfo[0][metric]))])
host_alloc_with_interval.append([self.activecontainerinfo[interval]['hostalloc'][cID] for cID in range(len(self.activecontainerinfo[0]['hostalloc']))])
objfunc_with_interval.append(self.metrics[interval+1][objfunc])
if metric2:
metric2_with_interval.append(self.activecontainerinfo[interval][metric2])
if objfunc2:
objfunc2_with_interval.append(self.metrics[interval+1][objfunc2])
df = pd.DataFrame(metric_with_interval)
if metric2: df = pd.concat([df, pd.DataFrame(metric2_with_interval)], axis=1)
df = pd.concat([df, pd.DataFrame(host_alloc_with_interval)], axis=1)
df = pd.concat([df, pd.DataFrame(objfunc_with_interval)], axis=1)
if objfunc2: df = pd.concat([df, pd.DataFrame(objfunc2_with_interval)], axis=1)
df.to_csv(dirname + '/' + title + '.csv' , header=False, index=False)
def generateDatasetWithInterval2(self, dirname, metric, metric2, metric3, metric4, objfunc, objfunc2):
title = metric + '_' + metric2 + '_' + metric3 + '_' + metric4 + '_' +objfunc + '_' + objfunc2 + '_' + 'with_interval'
totalIntervals = len(self.hostinfo)
metric_with_interval = []; metric2_with_interval = []
metric3_with_interval = []; metric4_with_interval = []
host_alloc_with_interval = []; objfunc2_with_interval = []
objfunc_with_interval = []
for interval in range(totalIntervals-1):
metric_with_interval.append([self.hostinfo[interval][metric][hostID] for hostID in range(len(self.hostinfo[0][metric]))])
host_alloc_with_interval.append([self.activecontainerinfo[interval]['hostalloc'][cID] for cID in range(len(self.activecontainerinfo[0]['hostalloc']))])
objfunc_with_interval.append(self.metrics[interval+1][objfunc])
metric2_with_interval.append(self.activecontainerinfo[interval][metric2])
metric3_with_interval.append(self.metrics[interval][metric3])
metric4_with_interval.append(self.metrics[interval][metric4])
objfunc2_with_interval.append(self.metrics[interval+1][objfunc2])
df = pd.DataFrame(metric_with_interval)
df = pd.concat([df, pd.DataFrame(metric2_with_interval)], axis=1)
df = pd.concat([df, pd.DataFrame(host_alloc_with_interval)], axis=1)
df = pd.concat([df, pd.DataFrame(metric3_with_interval)], axis=1)
df = pd.concat([df, pd.DataFrame(metric4_with_interval)], axis=1)
df = pd.concat([df, pd.DataFrame(objfunc_with_interval)], axis=1)
df = pd.concat([df, pd.DataFrame(objfunc2_with_interval)], axis=1)
df.to_csv(dirname + '/' + title + '.csv' , header=False, index=False)
def generateGraphs(self, dirname):
self.generateGraphsWithInterval(dirname, self.hostinfo, 'host', 'cpu')
self.generateGraphsWithInterval(dirname, self.hostinfo, 'host', 'numcontainers')
self.generateGraphsWithInterval(dirname, self.hostinfo, 'host', 'power')
self.generateGraphsWithInterval(dirname, self.hostinfo, 'host', 'baseips', 'apparentips')
self.generateGraphsWithInterval(dirname, self.hostinfo, 'host', 'ipscap', 'apparentips')
self.generateGraphsWithInterval(dirname, self.activecontainerinfo, 'container', 'ips', 'apparentips')
self.generateGraphsWithInterval(dirname, self.activecontainerinfo, 'container', 'hostalloc')
self.generateMetricsWithInterval(dirname)
self.generateWorkloadWithInterval(dirname)
def generateDatasets(self, dirname):
# self.generateDatasetWithInterval(dirname, 'cpu', objfunc='energytotalinterval')
self.generateDatasetWithInterval(dirname, 'cpu', metric2='apparentips', objfunc='energytotalinterval', objfunc2='avgresponsetime')
self.generateDatasetWithInterval2(dirname, 'cpu', 'apparentips', 'energytotalinterval_pred', 'avgresponsetime_pred', objfunc='energytotalinterval', objfunc2='avgresponsetime')
def generateCompleteDatasets(self, dirname):
self.generateCompleteDataset(dirname, self.hostinfo, 'hostinfo')
self.generateCompleteDataset(dirname, self.workloadinfo, 'workloadinfo')
self.generateCompleteDataset(dirname, self.metrics, 'metrics')
self.generateCompleteDataset(dirname, self.activecontainerinfo, 'activecontainerinfo')
self.generateCompleteDataset(dirname, self.allcontainerinfo, 'allcontainerinfo')
self.generateCompleteDataset(dirname, self.schedulerinfo, 'schedulerinfo')
def generateSchedulerMetricsDataOutput(self):
filepath = 'logs/' + self.datacenter.__class__.__name__ + '/' + self.scheduler.__class__.__name__ + '.csv'
# Create metrics data
metrics_keys = ['interval', 'numdestroyed', 'nummigrations', 'energytotalinterval', 'slaviolations', 'waittime',
'avgmigrationtime', 'avgresponsetime', 'slaviolationspercentage', 'energypercontainerinterval']
metric_with_interval = []
cols_metrics = list(metrics_keys)
for datum in self.metrics:
metric_with_interval.append([datum[value] for value in metrics_keys])
df_metrics = pd.DataFrame(metric_with_interval, columns=cols_metrics)
# Create column headers for each container
cols_containers = []
for i in range(50):
cols_containers.append('W' + str(i) + ' (ips, ram, disk, sla)')
# Create ips data per container
ips_with_interval = []
for interval in range(len(self.activecontainerinfo)):
ips_with_interval.append(self.activecontainerinfo[interval]['ips'])
df_ips = | pd.DataFrame(ips_with_interval, columns=cols_containers) | pandas.DataFrame |
from mlxtend.frequent_patterns import fpgrowth
import pandas as pd
import time
import cProfile
def get_closed_itemsets(baskets, threshold):
print('========== Collecting Closed Itemsets ==========')
# Each itemset has minimum possible support 'threshold', assuming it appears in the database
print(f'Finding all frequent itemsets with support above: {threshold}')
start_time = time.time()
itemsets = fpgrowth(baskets, min_support=threshold, use_colnames=True)
print(f'Time to run fpgrowth with min_sup 0: {time.time() - start_time}')
su = itemsets.support.unique()
fredic = {}
for i in range(len(su)):
inset = list(itemsets.loc[itemsets.support == su[i]]['itemsets'])
fredic[su[i]] = inset
start_time = time.time()
cl = []
print(itemsets.shape)
for index, row in itemsets.iterrows():
isclose = True
cli = row['itemsets']
cls = row['support']
checkset = fredic[cls]
for i in checkset:
if (cli != i):
if (frozenset.issubset(cli, i)):
isclose = False
break
if isclose:
cl.append((row['itemsets'], row['support']))
print("Stage 1 done", len(cl))
closed_itemset_dict = dict()
for c, s in cl:
# c = frozenset([int(c_i) for c_i in c])
closed_itemset_dict[c] = s
print(f'Time to find closed itemsets: {time.time() - start_time}')
print(f'{itemsets.shape[0]} itemsets reduced to {len(cl)} closed itemsets')
print('================================================\n')
return closed_itemset_dict, itemsets
def itemsets_from_closed_itemsets(closed_itemsets, possible_itemsets):
# profile = cProfile.Profile()
# profile.enable()
supports = []
for itemset in possible_itemsets:
max_supp = 0
for closed_itemset, supp in closed_itemsets.items():
# closed_itemset = frozenset([str(c_i) for c_i in closed_itemset])
if itemset <= closed_itemset:
max_supp = max(max_supp, supp)
supports.append(max_supp)
df = | pd.DataFrame(data={'support': supports, 'itemsets': possible_itemsets}) | pandas.DataFrame |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from fbprophet import Prophet
from scipy import optimize, stats
from sklearn import preprocessing, svm
import statsmodels.api as sm
import pmdarima
import statsmodels.tsa.api as smt
import arch
## for deep learning
#from tensorflow.keras import models, layers, preprocessing as kprocessing
pd.plotting.register_matplotlib_converters()
###############################################################################
# TS ANALYSIS #
###############################################################################
'''
Plot ts with rolling mean and 95% confidence interval with rolling std.
:parameter
:param ts: pandas Series
:param window: num for rolling stats
'''
def plot_ts(ts, plot_ma=True, plot_intervals=True, window=30, figsize=(15,5)):
rolling_mean = ts.rolling(window=window).mean()
rolling_std = ts.rolling(window=window).std()
plt.figure(figsize=figsize)
plt.title(ts.name)
plt.plot(ts[window:], label='Actual values', color="black")
if plot_ma:
plt.plot(rolling_mean, 'g', label='MA'+str(window), color="red")
if plot_intervals:
lower_bound = rolling_mean - (1.96 * rolling_std)
upper_bound = rolling_mean + (1.96 * rolling_std)
plt.fill_between(x=ts.index, y1=lower_bound, y2=upper_bound, color='lightskyblue', alpha=0.4)
plt.legend(loc='best')
plt.grid(True)
plt.show()
'''
Test stationarity by:
- running Augmented Dickey-Fuller test wiht 95%
- plotting mean and variance of a sample from data
- plottig autocorrelation and partial autocorrelation
'''
def test_stationarity_acf_pacf(ts, sample=0.20, maxlag=30, figsize=(15,10)):
with plt.style.context(style='bmh'):
## set figure
fig = plt.figure(figsize=figsize)
ts_ax = plt.subplot2grid(shape=(2,2), loc=(0,0), colspan=2)
pacf_ax = plt.subplot2grid(shape=(2,2), loc=(1,0))
acf_ax = plt.subplot2grid(shape=(2,2), loc=(1,1))
## plot ts with mean/std of a sample from the first x%
dtf_ts = ts.to_frame(name="ts")
sample_size = int(len(ts)*sample)
dtf_ts["mean"] = dtf_ts["ts"].head(sample_size).mean()
dtf_ts["lower"] = dtf_ts["ts"].head(sample_size).mean() + dtf_ts["ts"].head(sample_size).std()
dtf_ts["upper"] = dtf_ts["ts"].head(sample_size).mean() - dtf_ts["ts"].head(sample_size).std()
dtf_ts["ts"].plot(ax=ts_ax, color="black", legend=False)
dtf_ts["mean"].plot(ax=ts_ax, legend=False, color="red", linestyle="--", linewidth=0.7)
ts_ax.fill_between(x=dtf_ts.index, y1=dtf_ts['lower'], y2=dtf_ts['upper'], color='lightskyblue', alpha=0.4)
dtf_ts["mean"].head(sample_size).plot(ax=ts_ax, legend=False, color="red", linewidth=0.9)
ts_ax.fill_between(x=dtf_ts.head(sample_size).index, y1=dtf_ts['lower'].head(sample_size), y2=dtf_ts['upper'].head(sample_size), color='lightskyblue')
## test stationarity (Augmented Dickey-Fuller)
adfuller_test = sm.tsa.stattools.adfuller(ts, maxlag=maxlag, autolag="AIC")
adf, p, critical_value = adfuller_test[0], adfuller_test[1], adfuller_test[4]["5%"]
p = round(p, 3)
conclusion = "Stationary" if p < 0.05 else "Non-Stationary"
ts_ax.set_title('Dickey-Fuller Test 95%: '+conclusion+' (p-value: '+str(p)+')')
## pacf (for AR) e acf (for MA)
smt.graphics.plot_pacf(ts, lags=maxlag, ax=pacf_ax, title="Partial Autocorrelation (for AR component)")
smt.graphics.plot_acf(ts, lags=maxlag, ax=acf_ax, title="Autocorrelation (for MA component)")
plt.tight_layout()
'''
Defferenciate ts.
:parameter
:param ts: pandas Series
:param lag: num - diff[t] = y[t] - y[t-lag]
:param order: num - how many times it has to differenciate: diff[t]^order = diff[t] - diff[t-lag]
:param drop_na: logic - if True Na are dropped, else are filled with last observation
'''
def diff_ts(ts, lag=1, order=1, drop_na=True):
for i in range(order):
ts = ts - ts.shift(lag)
ts = ts[(pd.notnull(ts))] if drop_na is True else ts.fillna(method="bfill")
return ts
'''
'''
def undo_diff(ts, first_y, lag=1, order=1):
for i in range(order):
(24168.04468 - 18256.02366) + a.cumsum()
ts = np.r_[ts, ts[lag:]].cumsum()
return ts
'''
Run Granger test on 2 series
'''
def test_2ts_casuality(ts1, ts2, maxlag=30, figsize=(15,5)):
## prepare
dtf = ts1.to_frame(name=ts1.name)
dtf[ts2.name] = ts2
dtf.plot(figsize=figsize, grid=True, title=ts1.name+" vs "+ts2.name)
plt.show()
## test casuality (Granger test)
granger_test = sm.tsa.stattools.grangercausalitytests(dtf, maxlag=maxlag, verbose=False)
for lag,tupla in granger_test.items():
p = np.mean([tupla[0][k][1] for k in tupla[0].keys()])
p = round(p, 3)
if p < 0.05:
conclusion = "Casuality with lag "+str(lag)+" (p-value: "+str(p)+")"
print(conclusion)
'''
Decompose ts into
- trend component = moving avarage
- seasonality
- residuals = y - (trend + seasonality)
:parameter
:param s: num - number of observations per season (ex. 7 for weekly seasonality with daily data, 12 for yearly seasonality with monthly data)
'''
def decompose_ts(ts, s=250, figsize=(20,13)):
decomposition = smt.seasonal_decompose(ts, freq=s)
trend = decomposition.trend
seasonal = decomposition.seasonal
residual = decomposition.resid
fig, ax = plt.subplots(nrows=4, ncols=1, sharex=True, sharey=False, figsize=figsize)
ax[0].plot(ts)
ax[0].set_title('Original')
ax[0].grid(True)
ax[1].plot(trend)
ax[1].set_title('Trend')
ax[1].grid(True)
ax[2].plot(seasonal)
ax[2].set_title('Seasonality')
ax[2].grid(True)
ax[3].plot(residual)
ax[3].set_title('Residuals')
ax[3].grid(True)
return {"trend":trend, "seasonal":seasonal, "residual":residual}
'''
Find outliers using sklearn unsupervised support vetcor machine.
:parameter
:param ts: pandas Series
:param perc: float - percentage of outliers to look for
:return
dtf with raw ts, outlier 1/0 (yes/no), numeric index
'''
def find_outliers(ts, perc=0.01, figsize=(15,5)):
## fit svm
scaler = preprocessing.StandardScaler()
ts_scaled = scaler.fit_transform(ts.values.reshape(-1,1))
model = svm.OneClassSVM(nu=perc, kernel="rbf", gamma=0.01)
model.fit(ts_scaled)
## dtf output
dtf_outliers = ts.to_frame(name="ts")
dtf_outliers["index"] = range(len(ts))
dtf_outliers["outlier"] = model.predict(ts_scaled)
dtf_outliers["outlier"] = dtf_outliers["outlier"].apply(lambda x: 1 if x==-1 else 0)
## plot
fig, ax = plt.subplots(figsize=figsize)
ax.set(title="Outliers detection: found "+str(sum(dtf_outliers["outlier"]==1)))
ax.plot(dtf_outliers["index"], dtf_outliers["ts"], color="black")
ax.scatter(x=dtf_outliers[dtf_outliers["outlier"]==1]["index"], y=dtf_outliers[dtf_outliers["outlier"]==1]['ts'], color='red')
ax.grid(True)
plt.show()
return dtf_outliers
'''
Interpolate outliers in a ts.
'''
def remove_outliers(ts, outliers_idx, figsize=(15,5)):
ts_clean = ts.copy()
ts_clean.loc[outliers_idx] = np.nan
ts_clean = ts_clean.interpolate(method="linear")
ax = ts.plot(figsize=figsize, color="red", alpha=0.5, title="Remove outliers", label="original", legend=True)
ts_clean.plot(ax=ax, grid=True, color="black", label="interpolated", legend=True)
plt.show()
return ts_clean
###############################################################################
# MODEL DESIGN & TESTING - FORECASTING #
###############################################################################
'''
Split train/test from any given data point.
:parameter
:param ts: pandas Series
:param exog: array len(ts) x n regressors
:param test: num or str - test size (ex. 0.20) or index position (ex. "yyyy-mm-dd", 1000)
:return
ts_train, ts_test, exog_train, exog_test
'''
def split_train_test(ts, exog=None, test=0.20, plot=True, figsize=(15,5)):
## define splitting point
if type(test) is float:
split = int(len(ts)*(1-test))
perc = test
elif type(test) is str:
split = ts.reset_index()[ts.reset_index().iloc[:,0]==test].index[0]
perc = round(len(ts[split:])/len(ts), 2)
else:
split = test
perc = round(len(ts[split:])/len(ts), 2)
print("--- splitting at index: ", split, "|", ts.index[split], "| test size:", perc, " ---")
## split ts
ts_train = ts.head(split)
ts_test = ts.tail(len(ts)-split)
if plot is True:
fig, ax = plt.subplots(nrows=1, ncols=2, sharex=False, sharey=True, figsize=figsize)
ts_train.plot(ax=ax[0], grid=True, title="Train", color="black")
ts_test.plot(ax=ax[1], grid=True, title="Test", color="black")
ax[0].set(xlabel=None)
ax[1].set(xlabel=None)
plt.show()
## split exog
if exog is not None:
exog_train = exog[0:split]
exog_test = exog[split:]
return ts_train, ts_test, exog_train, exog_test
else:
return ts_train, ts_test
'''
Compute the confidence interval for predictions:
[y[t+h] +- (c*σ*√h)]
:parameter
:param lst_values: list or array
:param error_std: σ (standard dev of residuals)
:param conf: num - confidence level (90%, 95%, 99%)
:return
array with 2 columns (upper and lower bounds)
'''
def utils_conf_int(lst_values, error_std, conf=0.95):
lst_values = list(lst_values) if type(lst_values) != list else lst_values
c = round( stats.norm.ppf(1-(1-conf)/2), 2)
lst_ci = []
for x in lst_values:
lst_x = lst_values[:lst_values.index(x)+1]
h = len(lst_x)
ci = [x - (c*error_std*np.sqrt(h)), x + (c*error_std*np.sqrt(h))]
lst_ci.append(ci)
return np.array(lst_ci)
'''
Evaluation metrics for predictions.
:parameter
:param dtf: DataFrame with columns "ts", "model", "forecast", and "lower"/"upper" (if available)
:return
dtf with columns "ts", "model", "residuals", "lower", "forecast", "upper", "error"
'''
def utils_evaluate_ts_model(dtf, conf=0.95, title=None, plot=True, figsize=(20,13)):
try:
## residuals from fitting
### add column
dtf["residuals"] = dtf["ts"] - dtf["model"]
### kpi
residuals_mean = dtf["residuals"].mean()
residuals_std = dtf["residuals"].std()
## forecasting error
### add column
dtf["error"] = dtf["ts"] - dtf["forecast"]
dtf["error_pct"] = dtf["error"] / dtf["ts"]
### kpi
error_mean = dtf["error"].mean()
error_std = dtf["error"].std()
mae = dtf["error"].apply(lambda x: np.abs(x)).mean() #mean absolute error
mape = dtf["error_pct"].apply(lambda x: np.abs(x)).mean() #mean absolute error %
mse = dtf["error"].apply(lambda x: x**2).mean() #mean squared error
rmse = np.sqrt(mse) #root mean squared error
## interval
if "upper" not in dtf.columns:
print("--- computing confidence interval ---")
dtf["lower"], dtf["upper"] = [np.nan, np.nan]
dtf.loc[dtf["forecast"].notnull(), ["lower","upper"]] = utils_conf_int(
dtf[dtf["forecast"].notnull()]["forecast"], residuals_std, conf)
## plot
if plot is True:
fig = plt.figure(figsize=figsize)
fig.suptitle(title, fontsize=20)
ax1 = fig.add_subplot(2,2, 1)
ax2 = fig.add_subplot(2,2, 2, sharey=ax1)
ax3 = fig.add_subplot(2,2, 3)
ax4 = fig.add_subplot(2,2, 4)
### training
dtf[pd.notnull(dtf["model"])][["ts","model"]].plot(color=["black","green"], title="Model", grid=True, ax=ax1)
ax1.set(xlabel=None)
### test
dtf[pd.isnull(dtf["model"])][["ts","forecast"]].plot(color=["black","red"], title="Forecast", grid=True, ax=ax2)
ax2.fill_between(x=dtf.index, y1=dtf['lower'], y2=dtf['upper'], color='b', alpha=0.2)
ax2.set(xlabel=None)
### residuals
dtf[["residuals","error"]].plot(ax=ax3, color=["green","red"], title="Residuals", grid=True)
ax3.set(xlabel=None)
### residuals distribution
dtf[["residuals","error"]].plot(ax=ax4, color=["green","red"], kind='kde', title="Residuals Distribution", grid=True)
ax4.set(ylabel=None)
plt.show()
print("Training --> Residuals mean:", np.round(residuals_mean), " | std:", np.round(residuals_std))
print("Test --> Error mean:", np.round(error_mean), " | std:", np.round(error_std),
" | mae:",np.round(mae), " | mape:",np.round(mape*100), "% | mse:",np.round(mse), " | rmse:",np.round(rmse))
return dtf[["ts", "model", "residuals", "lower", "forecast", "upper", "error"]]
except Exception as e:
print("--- got error ---")
print(e)
'''
Generate dates to index predictions.
:parameter
:param start: str - "yyyy-mm-dd"
:param end: str - "yyyy-mm-dd"
:param n: num - length of index
:param freq: None or str - 'B' business day, 'D' daily, 'W' weekly, 'M' monthly, 'A' annual, 'Q' quarterly
'''
def utils_generate_indexdate(start, end=None, n=None, freq="D"):
if end is not None:
index = pd.date_range(start=start, end=end, freq=freq)
else:
index = pd.date_range(start=start, periods=n, freq=freq)
index = index[1:]
# print("start ", start)
# print("end ", end)
# print("index --", index)
print("--- generating index date --> start:", index[0], "| end:", index[-1], "| len:", len(index), "---")
return index
'''
Plot unknown future forecast and produce conf_int with residual_std and pred_int if an error_std is given.
:parameter
:param dtf: DataFrame with columns "ts", "model", "forecast", and "lower"/"upper" (if available)
:param conf: num - confidence level (90%, 95%, 99%)
:param zoom: int - plots the focus on the last zoom days
:return
dtf with columns "ts", "model", "residuals", "lower", "forecast", "upper" (No error)
'''
def utils_add_forecast_int(dtf, conf=0.95, plot=True, zoom=30, figsize=(15,5)):
## residuals from fitting
### add column
dtf["residuals"] = dtf["ts"] - dtf["model"]
### kpi
residuals_std = dtf["residuals"].std()
## interval
if "upper" not in dtf.columns:
print("--- computing confidence interval ---")
dtf["lower"], dtf["upper"] = [np.nan, np.nan]
dtf.loc[dtf["forecast"].notnull(), ["lower","upper"]] = utils_conf_int(
dtf[dtf["forecast"].notnull()]["forecast"], residuals_std, conf)
## plot
if plot is True:
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=figsize)
### entire series
dtf[["ts","forecast"]].plot(color=["black","red"], grid=True, ax=ax[0], title="History + Future")
ax[0].fill_between(x=dtf.index, y1=dtf['lower'], y2=dtf['upper'], color='b', alpha=0.2)
### focus on last
first_idx = dtf[pd.notnull(dtf["forecast"])].index[0]
first_loc = dtf.index.tolist().index(first_idx)
zoom_idx = dtf.index[first_loc-zoom]
dtf.loc[zoom_idx:][["ts","forecast"]].plot(color=["black","red"], grid=True, ax=ax[1], title="Zoom on the last "+str(zoom)+" observations")
ax[1].fill_between(x=dtf.loc[zoom_idx:].index, y1=dtf.loc[zoom_idx:]['lower'], y2=dtf.loc[zoom_idx:]['upper'], color='b', alpha=0.2)
plt.show()
return dtf[["ts", "model", "residuals", "lower", "forecast", "upper"]]
###############################################################################
# RANDOM WALK #
###############################################################################
'''
Generate a Random Walk process.
:parameter
:param y0: num - starting value
:param n: num - length of process
:param ymin: num - limit
:param ymax: num - limit
'''
def utils_generate_rw(y0, n, sigma, ymin=None, ymax=None):
rw = [y0]
for t in range(1, n):
yt = rw[t-1] + np.random.normal(0,sigma)
if (ymax is not None) and (yt > ymax):
yt = rw[t-1] - abs(np.random.normal(0,sigma))
elif (ymin is not None) and (yt < ymin):
yt = rw[t-1] + abs(np.random.normal(0,sigma))
rw.append(yt)
return rw
'''
Simulate Random Walk from params of a given ts:
y[t+1] = y[t] + wn~(0,σ)
:return
dtf with columns "ts", "model", "residuals", "lower", "forecast", "upper", "error"
'''
def simulate_rw(ts_train, ts_test, conf=0.95, figsize=(15,10)):
## simulate train
diff_ts = ts_train - ts_train.shift(1)
rw = utils_generate_rw(y0=ts_train[0], n=len(ts_train), sigma=diff_ts.std(), ymin=ts_train.min(), ymax=ts_train.max())
dtf_train = ts_train.to_frame(name="ts").merge(pd.DataFrame(rw, index=ts_train.index, columns=["model"]), how='left', left_index=True, right_index=True)
## test
rw = utils_generate_rw(y0=ts_train[-1], n=len(ts_test), sigma=diff_ts.std(), ymin=ts_train.min(), ymax=ts_train.max())
dtf_test = ts_test.to_frame(name="ts").merge(pd.DataFrame(rw, index=ts_test.index, columns=["forecast"]),
how='left', left_index=True, right_index=True)
## evaluate
dtf = dtf_train.append(dtf_test)
dtf = utils_evaluate_ts_model(dtf, conf=conf, figsize=figsize, title="Random Walk Simulation")
return dtf
'''
Forecast unknown future.
:parameter
:param ts: pandas series
:param pred_ahead: number of observations to forecast (ex. pred_ahead=30)
:param end: string - date to forecast (ex. end="2016-12-31")
:param freq: None or str - 'B' business day, 'D' daily, 'W' weekly, 'M' monthly, 'A' annual, 'Q' quarterly
:param zoom: for plotting
:return
dtf with columns "ts", "model", "residuals", "lower", "forecast", "upper" (No error)
'''
def forecast_rw(ts, pred_ahead=None, end=None, freq="D", conf=0.95, zoom=30, figsize=(15,5)):
## fit
diff_ts = ts - ts.shift(1)
sigma = diff_ts.std()
rw = utils_generate_rw(y0=ts[0], n=len(ts), sigma=sigma, ymin=ts.min(), ymax=ts.max())
dtf = ts.to_frame(name="ts").merge(pd.DataFrame(rw, index=ts.index, columns=["model"]),
how='left', left_index=True, right_index=True)
## index
index = utils_generate_indexdate(start=ts.index[-1], end=end, n=pred_ahead, freq=freq)
## forecast
preds = utils_generate_rw(y0=ts[-1], n=len(index), sigma=sigma, ymin=ts.min(), ymax=ts.max())
dtf = dtf.append( | pd.DataFrame(data=preds, index=index, columns=["forecast"]) | pandas.DataFrame |
# Copyright (c) 2020 Google LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Builds a table with SMILES and yield information."""
import collections
import dataclasses
import pandas as pd
from rdkit import Chem
from rdkit.Chem import AllChem
def location_to_row_col(location, block, plate):
"""Converts a block location to (row, col) on the plate.
Args:
location: Text location in the block, e.g. "1:A".
block: Integer block number.
plate: Integer plate number.
Returns:
Tuple of (row, col) integers; the location on the plate.
"""
if plate == 3:
row_letter, col = location.split(':')
else:
col, row_letter = location.split(':')
col = int(col)
row = ord(row_letter) - 64
if block == 2:
col += 24
elif block == 3:
row += 16
elif block == 4:
row += 16
col += 24
return row, col
def read_yield_data():
"""Reads location/yield data from the yield_data/ directory.
Returns:
DataFrame with the following columns:
* plate
* row
* col
* yield
"""
data = []
for plate in [1, 2, 3]:
for block in [1, 2, 3, 4]:
filename = f'yield_data/plate{plate}.{block}.csv'
print(filename)
df = | pd.read_csv(filename) | pandas.read_csv |
import pandas as pd
import numpy as np
import time
from .utils import open_wods
class Clean(object):
"""An object to clean (post-process) downloaded CrossFit open data.
"""
def __init__(self, path):
"""Clean Crossfit open data object.
Pareameters
----------
path : string
File path.
Returns
-------
cfopendata : pd.Dataframe
Cleaned Crossfit open data.
Example
-------
cfa.Clean('Data/Men_Rx_2018_raw')
"""
self.path = path
# Open file
self.df = pd.read_pickle(self.path)
# Get year from the file name
self.year = int(str(self.path[-8:-4]))
# Get WOD info
wod_info = open_wods(self.year)
self.wodscompleted = int(wod_info['wodscompleted'].values)
self.predictions = wod_info['predictions'].values
self.totalreps = wod_info['totalreps'].values
self.timecaps = wod_info['timecaps'].values
new_cols = wod_info['dfcheader'].values
self.scorel = wod_info['scorel'].values
# Find the column indexes of the score columns
self.ci = [None] * self.wodscompleted
for j in range(self.wodscompleted):
self.ci[j] = self.df.columns.get_loc(self.scorel[j])
# Check file is in the right order
if int(self.df.loc[0, 'Overall_rank']) != 1:
raise IOError('File is not in correct order. Should be ascending \
rank')
# Initialize new DataFrame
self.columns = ['User_id', 'Name', 'Height_(m)', 'Weight_(kg)', 'Age',
'Region_id', 'Region_name', 'Affiliate_id',
'Overall_rank', 'Overall_score', 'Overall_percentile']
self.columns.extend(new_cols)
self.cleandata = | pd.DataFrame(columns=self.columns) | pandas.DataFrame |
"""
Name: foneutil
Version: 0.4.4
Info: Python based script in order to record customer interactions, allowing
the user to record relevant information from customer interaction. The
script allows for the user to edit already entered in real time.
Requirements: Pandas, pyfiglet and termcolor modules
Created by: <NAME> - <EMAIL>
"""
import datetime
import readline
import os
import sys
import pandas as pd
import numpy as np
from pyfiglet import figlet_format
def clear():
"""
Clear the screen at the start of the script
"""
_ = os.system('clear')
try:
from termcolor import colored
except ImportError:
colored = None
def banner(string, color, font="speed", figlet=False):
"""
Add a color banner on the top of the menu when loading script
"""
if colored:
if not figlet:
print(colored(string, color))
else:
print(colored(figlet_format(string, font=font), color))
else:
print(string)
def rlinput(prompt, prefill=''):
"""
Function to allow the user to go back and edit the existing variable
data when entering call info. This in effect allows the form to act as
an interactive utility.
"""
readline.set_startup_hook(lambda: readline.insert_text(prefill))
try:
return input(prompt)
finally:
readline.set_startup_hook()
def display_file_content(filename):
"""
This function reads in the data file and displays the rows along with a
numberical index value. The pd.set_option() is there to stop the displayed
rows being truncated.
"""
clear()
df_csv = pd.read_csv(filename)
df_csv = df_csv.replace(np.nan, '', regex=True)
pd.set_option('display.max_rows', None)
print(df_csv)
def get_record(var):
"""
Pull the row related to the index ID that is provided by the user when
the read records area.
"""
df_csv = pd.read_csv(filename)
index_id = int(var)
display_record = df_csv.iloc[index_id]
pd.set_option('display.max_colwidth', 2000)
format_row(display_record)
def format_row(row_number):
"""
Display row data in a similar setup to the data_entry() function when pulling
the data from the file.
"""
print(f'\nDate: {row_number["date"]}')
print(f'Customer name: {row_number["name"]}')
print(f'Domain: {row_number["domain"]}')
print(f'Install: {row_number["install"]}')
print(f'Pin: {row_number["pin"]}')
print(f'Conundrum: {row_number["conundrum"]}\n')
def display_record(filename):
"""
Retrieve data from data.csv. Currently looking at improving the display
of this data and to create a more interactive menu/search function
"""
display_file_content(filename)
while True:
choice = input("\nEnter index ID to look up record or type [E]xit to return to Main Menu: ")
if choice.isdigit():
try:
get_record(choice)
except IndexError:
print("Could not find requested index.")
elif choice.lower() in ['e', 'exit']:
mainMenuHeader()
break
else:
mainMenuHeader()
banner("\nIncorrect input provided.", color="yellow")
break
pd.reset_option('display.max_colwidth')
def remove_record(filename):
"""
This deletes a specific row in the data.csv file that is defined by way
of user input.
"""
display_file_content(filename)
while True:
try:
record = input("\nProvide the record index to delete or type [E]xit to return to Main Menu: ")
if record.lower() in ['e', 'exit']:
mainMenuHeader()
break
else:
index_id = int(record)
try:
df_csv = pd.read_csv(filename)
df_csv = df_csv.drop(index=[index_id])
df_csv.to_csv(filename, index=False)
mainMenuHeader()
banner("\nNotice:", color="yellow")
banner("Record {} has been deleted. Returning to main menu.".format(index_id), color="yellow")
break
except FileNotFoundError:
print("Unable to find file. Make sure data.csv exists.")
except IOError:
print("Unable to open file.")
except IndexError:
print("Data doesn't exist.")
except ValueError:
print("Incorrect value")
def data_entry(filename):
"""
Obtain data from the user to save. The user will be able to go back and
edit the data already entered and it will be saved once user goes back
to the main menu.
"""
var_name = ""
var_domain = ""
var_install = ""
var_pin = ""
var_conundrum = ""
date_now = datetime.datetime.now() # Set time for when script runs
while True:
try:
clear()
date_formatted = date_now.strftime("%x" + " " + "%X")
print(date_formatted)
print("Name: " + var_name)
print("Domain: " + var_domain)
print("Install: " + var_install)
print("Pin: " + var_pin)
print("Conundrum: " + var_conundrum)
print("\n[S]ave \t [E]xit\n")
choice = str(input("Which option do you want to edit? "))
readline.set_pre_input_hook(None)
if choice.lower() in ['n', 'name']:
var_name = rlinput("Enter name: ", var_name)
elif choice.lower() in ['d', 'domain', 'url']:
var_domain = rlinput("Enter domain: ", var_domain)
elif choice.lower() in ['i', 'install', 'site']:
var_install = rlinput("Enter install: ", var_install)
elif choice.lower() in ['p', 'pin']:
var_pin = rlinput("Enter pin: ", var_pin)
elif choice.lower() in ['c', 'notes', 'conundrum']:
var_conundrum = rlinput("Enter conundrum: ", var_conundrum)
elif choice.lower() in ['s', 'save']:
menu_options = input("This will save and close this form. Continue? [y/n] ")
if menu_options.lower() in ['y', 'yes']:
notes = {
'date': [date_formatted],
'name': [var_name],
'domain': [var_domain],
'install': [var_install],
'pin': [var_pin],
'conundrum': [var_conundrum]
}
df = pd.DataFrame(data=notes)
try:
df.to_csv(filename, mode='a', header=False, index=False)
mainMenuHeader()
break
except IOError as e:
print("I/O error: {0}".format(e))
sys.exit(1)
elif menu_options.lower() in ['n', 'no']:
continue
elif choice.lower() in ['e', 'exit', 'q', 'return']:
menu_options = input("Do you wish to exit the form without saving? [y/n] ")
if menu_options.lower() in ['y', 'yes']:
mainMenuHeader()
break
elif menu_options.lower() in ['n', 'no']:
continue
else:
print("invalid INPUT!!")
continue
except ValueError:
print("Invalid input.")
def update_record(filename):
display_file_content(filename)
record = input("\nPlease provide the index ID to review: ")
df_csv = | pd.read_csv(filename) | pandas.read_csv |
import numpy as np
import pandas as pd
import random
import tensorflow.keras as keras
from sklearn.model_selection import train_test_split
def read_data(random_state=42,
otu_filename='../../Datasets/otu_table_all_80.csv',
metadata_filename='../../Datasets/metadata_table_all_80.csv'):
otu = pd.read_csv(otu_filename, index_col=0, header=None, sep='\t').T
otu = otu.set_index('otuids')
otu = otu.astype('int32')
metadata = pd.read_csv(metadata_filename, sep='\t')
metadata = metadata.set_index('X.SampleID')
domain = metadata[['age',
'Temperature',
'Precipitation3Days',
'INBREDS',
'Maize_Line']]
domain = pd.concat([domain, pd.get_dummies(domain['INBREDS'], prefix='INBREDS')], axis=1)
domain = pd.concat([domain, pd.get_dummies(domain['Maize_Line'], prefix='Maize_Line')], axis=1)
domain = domain.drop(['INBREDS', 'Maize_Line'], axis=1)
df = pd.concat([otu, domain], axis=1, sort=True, join='outer')
data_microbioma = df[otu.columns].to_numpy(dtype=np.float32)
data_domain = df[domain.columns].to_numpy(dtype=np.float32)
data_microbioma_train, data_microbioma_test, data_domain_train, data_domain_test = \
train_test_split(data_microbioma, data_domain, test_size=0.1, random_state=random_state)
return data_microbioma_train, data_microbioma_test, data_domain_train, data_domain_test, otu.columns, domain.columns
def read_df_with_transfer_learning_subset_fewerDomainFeatures(
metadata_names=['age','Temperature','Precipitation3Days'],
random_state=42,
otu_filename='../Datasets/otu_table_all_80.csv',
metadata_filename='../Datasets/metadata_table_all_80.csv'):
otu = pd.read_csv(otu_filename, index_col=0, header=None, sep='\t').T
otu = otu.set_index('otuids')
otu = otu.astype('int32')
metadata = pd.read_csv(metadata_filename, sep='\t')
metadata = metadata.set_index('X.SampleID')
domain = metadata[metadata_names]
if 'INBREDS' in metadata_names:
domain = pd.concat([domain, pd.get_dummies(domain['INBREDS'], prefix='INBREDS')], axis=1)
domain = domain.drop(['INBREDS'], axis=1)
elif 'Maize_Line' in metadata_names:
domain = pd.concat([domain, pd.get_dummies(domain['Maize_Line'], prefix='Maize_Line')], axis=1)
domain = domain.drop(['Maize_Line'], axis=1)
df = pd.concat([otu, domain], axis=1, sort=True, join='outer')
#data_microbioma = df[otu.columns].to_numpy(dtype=np.float32)
#data_domain = df[domain.columns].to_numpy(dtype=np.float32)
df_microbioma = df[otu.columns]
df_domain = df[domain.columns]
df_microbioma_train, df_microbioma_no_train, df_domain_train, df_domain_no_train = \
train_test_split(df_microbioma, df_domain, test_size=0.1, random_state=random_state)
# Transfer learning subset
df_microbioma_test, df_microbioma_transfer_learning, df_domain_test, df_domain_transfer_learning = \
train_test_split(df_microbioma_no_train, df_domain_no_train, test_size=100, random_state=random_state)
df_microbioma_transfer_learning_train, df_microbioma_transfer_learning_test, df_domain_transfer_learning_train, df_domain_transfer_learning_test = \
train_test_split(df_microbioma_transfer_learning, df_domain_transfer_learning, test_size=0.3, random_state=random_state)
return df_microbioma_train, df_microbioma_test, df_microbioma_transfer_learning_train, df_microbioma_transfer_learning_test, df_domain_train, df_domain_test, df_domain_transfer_learning_train, df_domain_transfer_learning_test, otu.columns, domain.columns
def read_df_with_transfer_learning_subset(random_state=42,
otu_filename='../Datasets/otu_table_all_80.csv',
metadata_filename='../Datasets/metadata_table_all_80.csv'):
otu = pd.read_csv(otu_filename, index_col=0, header=None, sep='\t').T
otu = otu.set_index('otuids')
otu = otu.astype('int32')
metadata = pd.read_csv(metadata_filename, sep='\t')
metadata = metadata.set_index('X.SampleID')
domain = metadata[['age',
'Temperature',
'Precipitation3Days',
'INBREDS',
'Maize_Line']]
domain = pd.concat([domain, pd.get_dummies(domain['INBREDS'], prefix='INBREDS')], axis=1)
domain = pd.concat([domain, pd.get_dummies(domain['Maize_Line'], prefix='Maize_Line')], axis=1)
domain = domain.drop(['INBREDS', 'Maize_Line'], axis=1)
df = pd.concat([otu, domain], axis=1, sort=True, join='outer')
#data_microbioma = df[otu.columns].to_numpy(dtype=np.float32)
#data_domain = df[domain.columns].to_numpy(dtype=np.float32)
df_microbioma = df[otu.columns]
df_domain = df[domain.columns]
df_microbioma_train, df_microbioma_no_train, df_domain_train, df_domain_no_train = \
train_test_split(df_microbioma, df_domain, test_size=0.1, random_state=random_state)
df_microbioma_test, df_microbioma_transfer_learning, df_domain_test, df_domain_transfer_learning = \
train_test_split(df_microbioma_no_train, df_domain_no_train, test_size=100, random_state=random_state)
df_microbioma_transfer_learning_train, df_microbioma_transfer_learning_test, df_domain_transfer_learning_train, df_domain_transfer_learning_test = \
train_test_split(df_microbioma_transfer_learning, df_domain_transfer_learning, test_size=0.3, random_state=random_state)
return df_microbioma_train, df_microbioma_test, df_microbioma_transfer_learning_train, df_microbioma_transfer_learning_test, df_domain_train, df_domain_test, df_domain_transfer_learning_train, df_domain_transfer_learning_test, otu.columns, domain.columns
def read_df_with_transfer_learning_subset_stratified_by_maize_line(random_state=42,
otu_filename='../Datasets/otu_table_all_80.csv',
metadata_filename='../Datasets/metadata_table_all_80.csv'):
otu = pd.read_csv(otu_filename, index_col=0, header=None, sep='\t').T
otu = otu.set_index('otuids')
otu = otu.astype('int32')
metadata = pd.read_csv(metadata_filename, sep='\t')
metadata = metadata.set_index('X.SampleID')
domain = metadata[['age',
'Temperature',
'Precipitation3Days',
'INBREDS',
'Maize_Line']]
domain = pd.concat([domain, pd.get_dummies(domain['INBREDS'], prefix='INBREDS')], axis=1)
domain = pd.concat([domain, pd.get_dummies(domain['Maize_Line'], prefix='Maize_Line')], axis=1)
domain = domain.drop(['INBREDS', 'Maize_Line'], axis=1)
df = pd.concat([otu, domain], axis=1, sort=True, join='outer')
#data_microbioma = df[otu.columns].to_numpy(dtype=np.float32)
#data_domain = df[domain.columns].to_numpy(dtype=np.float32)
df_microbioma = df[otu.columns]
df_domain = df[domain.columns]
df_microbioma_train, df_microbioma_no_train, df_domain_train, df_domain_no_train = \
train_test_split(df_microbioma, df_domain, test_size=0.1, random_state=random_state)
df_microbioma_test, df_microbioma_transfer_learning, df_domain_test, df_domain_transfer_learning = \
train_test_split(df_microbioma_no_train, df_domain_no_train, test_size=100, random_state=random_state)
df_temp=df_domain_transfer_learning
col_stratify=df_temp.iloc[:,30:36][df==1].stack().reset_index().loc[:,'level_1']
df_microbioma_transfer_learning_train, df_microbioma_transfer_learning_test, df_domain_transfer_learning_train, df_domain_transfer_learning_test = \
train_test_split(df_microbioma_transfer_learning, df_domain_transfer_learning, test_size=0.3, random_state=random_state, stratify = col_stratify)
return df_microbioma_train, df_microbioma_test, df_microbioma_transfer_learning_train, df_microbioma_transfer_learning_test, df_domain_train, df_domain_test, df_domain_transfer_learning_train, df_domain_transfer_learning_test, otu.columns, domain.columns
def read_df_with_transfer_learning_2otufiles_fewerDomainFeatures(
metadata_names=['age','Temperature','Precipitation3Days'],
random_state=42,
otu_filename='../Datasets/otu_table_all_80.csv',
metadata_filename='../Datasets/metadata_table_all_80.csv',
otu_transfer_filename='../Datasets/Walters5yearsLater/otu_table_Walters5yearsLater.csv',
metadata_transfer_filename='../Datasets/Walters5yearsLater/metadata_table_Walters5yearsLater.csv'):
otu = pd.read_csv(otu_filename, index_col=0, header=None, sep='\t').T
otu = otu.set_index('otuids')
otu = otu.astype('int32')
metadata = pd.read_csv(metadata_filename, sep='\t')
metadata = metadata.set_index('X.SampleID')
domain = metadata[metadata_names]
if 'INBREDS' in metadata_names:
domain = pd.concat([domain, pd.get_dummies(domain['INBREDS'], prefix='INBREDS')], axis=1)
domain = domain.drop(['INBREDS'], axis=1)
elif 'Maize_Line' in metadata_names:
domain = pd.concat([domain, pd.get_dummies(domain['Maize_Line'], prefix='Maize_Line')], axis=1)
domain = domain.drop(['Maize_Line'], axis=1)
df = pd.concat([otu, domain], axis=1, sort=True, join='outer')
df_microbioma = df[otu.columns]
df_domain = df[domain.columns]
df_microbioma_train, df_microbioma_no_train, df_domain_train, df_domain_no_train = \
train_test_split(df_microbioma, df_domain, test_size=0.1, random_state=random_state)
df_microbioma_test, _, df_domain_test, _ = \
train_test_split(df_microbioma_no_train, df_domain_no_train, test_size=100, random_state=random_state)
otu_columns = otu.columns
domain_columns = domain.columns
# TRANSFER LEARNING SUBSETS
otu = pd.read_csv(otu_transfer_filename, index_col=0, header=None, sep='\t').T
otu = otu.set_index('otuids')
otu = otu.astype('int32')
metadata = pd.read_csv(metadata_transfer_filename, sep='\t')
metadata = metadata.set_index('X.SampleID')
domain = metadata[metadata_names]
if 'INBREDS' in metadata_names:
domain = pd.concat([domain, pd.get_dummies(domain['INBREDS'], prefix='INBREDS')], axis=1)
domain = domain.drop(['INBREDS'], axis=1)
elif 'Maize_Line' in metadata_names:
domain = pd.concat([domain, pd.get_dummies(domain['Maize_Line'], prefix='Maize_Line')], axis=1)
domain = domain.drop(['Maize_Line'], axis=1)
df = pd.concat([otu, domain], axis=1, sort=True, join='outer')
df_microbioma = df[otu.columns]
df_domain = df[domain.columns]
df_microbioma_transfer_learning_train, df_microbioma_transfer_learning_test, df_domain_transfer_learning_train, df_domain_transfer_learning_test = \
train_test_split(df_microbioma, df_domain, test_size=0.3, random_state=random_state)
return df_microbioma_train, df_microbioma_test, df_microbioma_transfer_learning_train, df_microbioma_transfer_learning_test, df_domain_train, df_domain_test, df_domain_transfer_learning_train, df_domain_transfer_learning_test, otu_columns, domain_columns
def read_df_with_transfer_learning_2otufiles_differentDomainFeatures(
metadata_names=['age','Temperature','Precipitation3Days'],
random_state=42,
otu_filename='../Datasets/otu_table_all_80.csv',
metadata_filename='../Datasets/metadata_table_all_80.csv',
metadata_names_transfer=['pH', 'Nmin', 'N', 'C', 'C.N', 'Corg', 'soil_type', 'clay_fration', 'water_holding_capacity'],
otu_transfer_filename='../Datasets/Maarastawi2018/otu_table_Order_Maarastawi2018.csv',
metadata_transfer_filename='../Datasets/Maarastawi2018/metadata_table_Maarastawi2018.csv'):
otu = pd.read_csv(otu_filename, index_col=0, header=None, sep='\t').T
otu = otu.set_index('otuids')
otu = otu.astype('int32')
metadata = pd.read_csv(metadata_filename, sep='\t')
metadata = metadata.set_index('X.SampleID')
domain = metadata[metadata_names]
if 'INBREDS' in metadata_names:
domain = pd.concat([domain, pd.get_dummies(domain['INBREDS'], prefix='INBREDS')], axis=1)
domain = domain.drop(['INBREDS'], axis=1)
elif 'Maize_Line' in metadata_names:
domain = pd.concat([domain, pd.get_dummies(domain['Maize_Line'], prefix='Maize_Line')], axis=1)
domain = domain.drop(['Maize_Line'], axis=1)
df = | pd.concat([otu, domain], axis=1, sort=True, join='outer') | pandas.concat |
import sys
import requests
import numpy as np
import pandas as pd
import kauffman.constants as c
| pd.set_option('max_columns', 1000) | pandas.set_option |
#%%
"""
Analyze model:
Meant to analyze each models and their performance
"""
import h5py
import matplotlib.pyplot as plt
import seaborn
import numpy as np
import pandas as pd
import os
from keras.models import load_model
path = os.path.abspath(os.curdir)
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
#%%
df = pd.read_csv('{}/csv-data/movie-data.csv'.format(path))
df.drop(columns='Unnamed: 0', inplace=True)
# action_model = load_model('{}/model_version/n_most/action_model.h5'.format(path))
# adventure_model = load_model('{}/model_version/n_most/adventure_model.h5'.format(path))
# comedy_model = load_model('{}/model_version/n_most/comedy_model.h5'.format(path))
# crime_model = load_model('{}/model_version/n_most/crime_model.h5'.format(path))
# family_model = load_model('{}/model_version/n_most/family_model.h5'.format(path))
# mystery_model = load_model('{}/model_version/n_most/mystery_model.h5'.format(path))
# romance_model = load_model('{}/model_version/n_most/romance_model.h5'.format(path))
# thriller_model = load_model('{}/model_version/n_most/thriller_model.h5'.format(path))
#%%
print(df.head())
#%%
features = df['plot'].values
n_most_common_words = 10000
max_len = 500
tokenizer = Tokenizer(num_words=n_most_common_words, lower=True)
tokenizer.fit_on_texts(features)
sequences = tokenizer.texts_to_sequences(features)
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
# print(word_index)
X = pad_sequences(sequences, maxlen=500)
# print(X)
#%%
models = {"Action": action_model,
"Adventure": adventure_model,
'Comedy': comedy_model,
"Crime": crime_model,
"Family": family_model,
"Mystery": mystery_model,
"Romance": romance_model,
"Thriller": thriller_model}
for genre, model in models.items():
y = df[genre]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, shuffle=True)
prediction = model.predict(X_test)
cf_m = confusion_matrix(y_test.values.tolist(), np.rint(prediction.flatten()).tolist())
plt.title('Actual')
seaborn.set(font_scale=1.1) # for label size
rep = pd.DataFrame(cf_m, index=['N {}'.format(genre), genre],
columns=['N {}'.format(genre), genre])
sb = seaborn.heatmap(rep, annot=True, fmt='g').xaxis.set_ticks_position('top')
plt.ylabel('Predicted')
plt.xlabel('Bidirectional LSTM')
plt.show()
#%%
print(path)
df_co = pd.read_csv('{}/csv-data/movie-data-cleaned.csv'.format(path))
df_co.drop(['Unnamed: 0'], axis=1, inplace=True)
df_im = pd.read_csv('{}/csv-data/movies_genres.csv'.format(path), delimiter='\t')
df_im.head()
imdb_genres = df_im.drop(['plot', 'title', 'Sci-Fi','Documentary', 'Reality-TV', 'Animation'], axis=1)
counts = []
categories = list(imdb_genres.columns.values)
for i in categories:
counts.append((i, imdb_genres[i].sum()))
df_stats_imdb = pd.DataFrame(counts, columns=['genre', '#movies'])
df_stats_imdb = df_stats_imdb[df_stats_imdb['#movies'] > 8000]
df_stats_imdb
# df_stats_imdb['genre'].values
df_co.head()
corpus_genres = df_co.drop(['Title', 'Summary', 'Horror'], axis=1)
counts = []
categories = list(corpus_genres.columns.values)
for i in categories:
counts.append((i, corpus_genres[i].sum()))
df_stats_corpus = pd.DataFrame(counts, columns=['genre', '#movies'])
df_stats_corpus
cs = []
for index, category in enumerate(df_stats_imdb['genre']):
current_index_b = 0
for index_b, category_b in enumerate(df_stats_corpus['genre']):
if category == category_b:
current_index_b = index_b
cs.append((category, df_stats_corpus['#movies'].values[index_b] + df_stats_imdb['#movies'].values[index]))
if not (category, df_stats_corpus['#movies'].values[current_index_b] + df_stats_imdb['#movies'].values[index]) in cs:
cs.append((category, df_stats_imdb['#movies'].values[index]))
df_stats = | pd.DataFrame(cs, columns=['genre', '#movies']) | pandas.DataFrame |
import json
import pickle
import os
import random
from pathlib import Path
from typing import List
import sys
import numpy as np
import pandas as pd
from tqdm import tqdm
import fire
import torch
from ignite.engine.engine import Engine
from ignite.contrib.handlers import ProgressBar
sys.path.append(os.getcwd())
import utils.train_util as train_util
# from datasets.caption_dataset import CaptionDataset, CaptionEvalDataset, CaptionSampler, collate_fn
import datasets.caption_dataset as ac_dataset
class BaseRunner(object):
"""Main class to run experiments"""
def __init__(self, seed=1):
super(BaseRunner, self).__init__()
self.seed = seed
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
device = "cpu"
if torch.cuda.is_available():
device = "cuda"
torch.cuda.manual_seed_all(seed)
# torch.use_deterministic_algorithms(True)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
self.device = torch.device(device)
def _get_dataloaders(self, config, vocabulary):
augments = train_util.parse_augments(config["augments"])
if config["distributed"]:
config["dataloader_args"]["batch_size"] //= self.world_size
if "caption_file" in config:
h5file_df = pd.read_csv(config["h5_csv"], sep="\t")
h5file_dict = dict(zip(h5file_df["audio_id"], h5file_df["hdf5_path"]))
caption_info = json.load(open(config["caption_file"], "r"))["audios"]
val_size = int(len(caption_info) * (1 - config["train_percent"] / 100.))
val_audio_idxs = np.random.choice(len(caption_info), val_size, replace=False)
train_audio_idxs = [idx for idx in range(len(caption_info)) if idx not in val_audio_idxs]
train_dataset = ac_dataset.CaptionDataset(
h5file_dict=h5file_dict,
caption_info=caption_info,
vocabulary=vocabulary,
transform=augments
)
# TODO DistributedCaptionSampler
# train_sampler = torch.utils.data.DistributedSampler(train_dataset) if config["distributed"] else None
train_sampler = ac_dataset.CaptionSampler(train_dataset, train_audio_idxs, True)
train_dataloader = torch.utils.data.DataLoader(
train_dataset,
collate_fn=ac_dataset.collate_fn([0, 1], 1),
sampler=train_sampler,
**config["dataloader_args"]
)
val_audio_ids = [caption_info[audio_idx]["audio_id"] for audio_idx in val_audio_idxs]
val_dataset = ac_dataset.CaptionEvalDataset(
h5file_dict={audio_id: h5file_dict[audio_id] for audio_id in val_audio_ids}
)
val_dataloader = torch.utils.data.DataLoader(
val_dataset,
collate_fn=ac_dataset.collate_fn([1]),
**config["dataloader_args"]
)
train_key2refs = {}
for audio_idx in train_audio_idxs:
audio_id = caption_info[audio_idx]["audio_id"]
train_key2refs[audio_id] = []
for caption in caption_info[audio_idx]["captions"]:
train_key2refs[audio_id].append(caption["token" if config["zh"] else "caption"])
val_key2refs = {}
for audio_idx in val_audio_idxs:
audio_id = caption_info[audio_idx]["audio_id"]
val_key2refs[audio_id] = []
for caption in caption_info[audio_idx]["captions"]:
val_key2refs[audio_id].append(caption["token" if config["zh"] else "caption"])
else:
train_h5file_df = pd.read_csv(config["train_h5_csv"], sep="\t")
train_h5file_dict = dict(zip(train_h5file_df["audio_id"], train_h5file_df["hdf5_path"]))
train_caption_info = json.load(open(config["train_caption_file"], "r"))["audios"]
val_h5file_df = | pd.read_csv(config["val_h5_csv"], sep="\t") | pandas.read_csv |
"""
Module to generate an interactive app to visualize and train a QoE predictive model
using BRISQUE as input
It relies of Streamlite library for the visualization and display of widgets
"""
import os
import numpy as np
import pandas as pd
import streamlit as st
import random
import plotly.graph_objects as go
from catboost import Pool, CatBoostRegressor
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.metrics import mean_absolute_error
st.title('QoE model predictor')
DATA_URI_BRISQUE = '../../cloud_functions/data-brisque-large.csv'
DATA_URI_QOE = '../../cloud_functions/data-qoe-metrics-large.csv'
def mean_absolute_percentage_error(y_true, y_pred):
"""
Computes the MAPE between two vectors of values
"""
return np.mean(np.abs((y_true - y_pred) / y_true))
def compute_brisque_features_aggregators(row):
"""
Aggregates the 36 features of brisque for each frame
of a random sequence into mean and std of both the sequence
and its derivative
"""
features = []
sampling = random.choices(row.index, k=4)
for frame in sampling:
if row[frame].shape[0] == 36:
features.append(row[frame])
row['mean'] = np.mean(features, axis=0)
row['std'] = np.std(features, axis=0)
row['mean_dx'] = np.mean(np.diff(features, axis=0), axis=0)
row['std_dx'] = np.std(np.diff(features, axis=0), axis=0)
return row
@st.cache
def load_data(data_uri, nrows):
"""
Function to retrieve data from a given file or URL
in a Pandas DataFrame suitable for model training.
nrows limits the amount of data displayed for optimization
"""
data_df = pd.read_csv(data_uri, nrows=nrows)
lowercase = lambda x: str(x).lower()
data_df.rename(lowercase, axis='columns', inplace=True)
if 'unnamed: 0' in data_df.columns:
data_df.drop('unnamed: 0', axis='columns', inplace=True)
if 'kind' in data_df.columns:
data_df.drop('kind', axis='columns', inplace=True)
if 'qoe' in data_uri:
data_df.rename(columns={'attack':'rendition', 'title':'source'}, inplace=True)
data_df['rendition'] = data_df['rendition'].apply(lambda x: set_rendition_name(x))
data_df['dimension_y'] = data_df['rendition'].apply(lambda x: int(x.split('_')[0]))
data_df['crf'] = data_df['rendition'].apply(lambda x: x.split('_')[-1])
data_df['source'] = data_df['source'].apply(lambda x: x.split('/')[-1])
else:
names = data_df['source']
data_df = data_df.drop('source', axis=1)
sorted_columns = str(sorted([int(i) for i in data_df.columns])).replace('[', '').replace(']', '').split(', ')
data_df = data_df.reindex(columns=sorted_columns)
for column in data_df.columns:
data_df[column] = data_df[column].astype(str).apply(lambda x: np.fromstring(x.replace('[', '').replace(']', ''),
dtype=np.float, sep=' '))
data_df = data_df.apply(lambda row: compute_brisque_features_aggregators(row), axis=1)
data_df['source'] = names
return data_df
def set_rendition_name(rendition_name):
"""
Function to extract source name from rendition path
"""
return os.path.dirname(rendition_name).replace('/vimeo', '').split('/')[-1]
def plot_rd_curves(df_qoe):
"""
Display difference between predicted ssim and measured ssim
according to their tamper classification
"""
metrics = list(df_qoe.columns)
asset = st.selectbox('Which asset to represent?', list(df_qoe['source'].unique()))
metric_x = st.selectbox('Which metric to represent for X?', metrics, index=metrics.index('crf'))
metric_y = st.selectbox('Which metric to represent for Y?', metrics, index=metrics.index('temporal_ssim-mean'))
rate_distortion_df = df_qoe[[metric_x, metric_y, 'pixels', 'dimension_y', 'rendition']][df_qoe['source'] == asset]
data = []
dimensions = rate_distortion_df['dimension_y'].unique()
dimensions.sort()
for dimension in dimensions:
trace = go.Scatter(x=rate_distortion_df[rate_distortion_df['dimension_y'] == dimension][metric_x],
y=rate_distortion_df[rate_distortion_df['dimension_y'] == dimension][metric_y],
mode='markers',
marker=dict(color=dimension,
opacity=0.8,
line=dict(width=0)
),
hovertext=rate_distortion_df['rendition'],
name=str(dimension),
)
data.append(trace)
fig = go.Figure(data=data)
fig.update_layout(title="{} vs {}".format(metric_x, metric_y),
yaxis_title=metric_y,
xaxis_title=metric_x
)
fig.update_layout(legend=go.layout.Legend(x=0,
y=1,
traceorder="normal",
font=dict(family="sans-serif",
size=12,
color="black"
),
bgcolor="LightSteelBlue",
bordercolor="Black",
borderwidth=2
)
)
st.plotly_chart(fig)
def train_features(models_dict, pools_dict, features, x_train, x_test, y_train, y_test):
"""
Function to aggregate models from a set of features
"""
learn_mape_train_df = pd.DataFrame()
learn_rmse_train_df = pd.DataFrame()
learn_mape_test_df = pd.DataFrame()
learn_rmse_test_df = pd.DataFrame()
categorical_features_indices = []
for feature in features:
y_train = pd.DataFrame(data=y_train, columns=features)
y_test = pd.DataFrame(data=y_test, columns=features)
train_pool = Pool(data=x_train,
label=y_train[feature],
cat_features=categorical_features_indices)
num_trees = 500
loss_funct = 'MAPE'
depth = 1
l2_leaf_reg = 0.2
learning_rate = 0.005
if 'ssim' in feature:
loss_funct = 'MAE'
depth = 1
num_trees = 500
learning_rate = 0.05
l2_leaf_reg = 0.2
models_dict[feature] = CatBoostRegressor(depth=depth,
num_trees=num_trees,
l2_leaf_reg=l2_leaf_reg,
learning_rate=learning_rate,
loss_function=loss_funct
)
#train the model
print('Training QoE model:', feature)
models_dict[feature].fit(train_pool)
pools_dict[feature] = Pool(data=x_test,
label=y_test[feature],
cat_features=categorical_features_indices)
learn_mape_train_df[feature] = models_dict[feature].eval_metrics(train_pool, ['MAPE'])['MAPE']
learn_mape_test_df[feature] = models_dict[feature].eval_metrics(pools_dict[feature], ['MAPE'])['MAPE']
learn_rmse_train_df[feature] = models_dict[feature].eval_metrics(train_pool, ['RMSE'])['RMSE']
learn_rmse_test_df[feature] = models_dict[feature].eval_metrics(pools_dict[feature], ['RMSE'])['RMSE']
st.write('QoE model test set MAPE:')
st.write(learn_mape_test_df.min())
st.write(learn_mape_test_df.min().describe())
return models_dict, pools_dict
@st.cache(suppress_st_warning=True)
def predict_qoe(data_df):
"""
Function to train model from given dataset
"""
num_train = int(data_df.shape[0] * 0.8)
train_data = data_df.sample(num_train)
st.write('Train:', train_data.shape)
test_data = data_df[~data_df.index.isin(train_data.index)]
st.write('Test:', test_data.shape)
x_features = [feature for feature in data_df.columns if 'input' in feature]
x_features.append('dimension_y')
x_features.append('pixels')
x_features.append('crf')
x_features.append('288_45_ssim')
x_features.append('288_45_size')
x_features.append('288_45_pixels')
ssim_features = [feature for feature in data_df.columns if 'ssim' in feature]
bitrate_features = [feature for feature in data_df.columns if 'size' in feature]
ssim_features.remove('288_45_ssim')
ssim_features.sort()
bitrate_features.remove('288_45_size')
bitrate_features.sort()
st.write(train_data[x_features].head(), 'Train')
st.write(train_data[ssim_features].head(), 'Test SSIM')
st.write(train_data[bitrate_features].head(), 'Test Bitrate')
x_train = np.asarray(train_data[x_features])
x_test = np.asarray(test_data[x_features])
# Scale the data
scaler = StandardScaler()
x_train = scaler.fit_transform(x_train)
x_test = scaler.transform(x_test)
bitrate_scaler = MinMaxScaler(feature_range=(0, 1))
ssim_train = train_data[ssim_features].values
bitrate_train = bitrate_scaler.fit_transform(train_data[bitrate_features].values)
ssim_test = test_data[ssim_features].values
bitrate_test = bitrate_scaler.transform(test_data[bitrate_features].values)
models_dict = dict()
pools_dict = dict()
x_train = pd.DataFrame(x_train, columns=x_features, index=train_data.index)
x_test = pd.DataFrame(x_test, columns=x_features, index=test_data.index)
st.write('XTEST SHAPE:', x_test.shape)
models_dict, pools_dict = train_features(models_dict,
pools_dict,
ssim_features,
x_train,
x_test,
ssim_train,
ssim_test)
models_dict, pools_dict = train_features(models_dict,
pools_dict,
bitrate_features,
x_train,
x_test,
bitrate_train,
bitrate_test)
pred_plot_ssim_df = pd.DataFrame(index=test_data.index)
pred_plot_bitrate_df = pd.DataFrame(index=test_data.index)
true_plot_ssim_df = pd.DataFrame(index=test_data.index)
true_plot_bitrate_df = | pd.DataFrame(index=test_data.index) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: olivergiesecke
1) Collect the data on the speakers and text for each alternative.
2) Do the regular pre-processing for each text entry.
3) Apply standard LDA
4) Provide summary statics how the probability mass lines up with the different alternatives.
5) Check alignment with the voting record.
"""
from nltk.corpus import stopwords
import pandas as pd
import numpy as np
from gensim.utils import simple_preprocess
import itertools
import os
import gensim
from gensim import corpora, models
from nltk.stem.porter import PorterStemmer
from sklearn.decomposition import TruncatedSVD
import matplotlib
import matplotlib.pyplot as plt
import re
import seaborn as sns
import create_lda_data
import provide_helperfunctions
from nltk.util import ngrams
from collections import Counter
from pprint import pprint
from gensim.models.coherencemodel import CoherenceModel
from mpl_toolkits.mplot3d import Axes3D
pd.set_option('mode.chained_assignment', None)
pd.set_option('display.max_rows', 20)
pd.set_option('display.max_columns', 20)
# Set random state for entire file
rnd_state=5
###############################################################################
### Import data ###
data = create_lda_data.main()
data.rename(columns={"start_date":"date"},inplace=True)
data.to_csv("../output/lda_dataset.csv",index=False)
### Data Selection ###
data['new']=1
df_balt=data[data['d_alt']==1].pivot_table(index="date",values='new',aggfunc=np.sum).reset_index()
df_summary = data.pivot_table(index="date",values='new',columns=['d_alt','votingmember'],aggfunc=np.sum)
# Keep only dates for which alternatives are available and speakers who are votingmembers
data_speakers=data[data['votingmember']==1].merge(df_balt,on='date',how='inner')
data_alternatives=data[data['d_alt']==1]
data_alternatives = data_alternatives[data_alternatives['content']!='[]'].copy()
#data_alternatives.to_csv("~/Desktop/alternativetext.csv")
### Check the coverage of the speaker data ###
alt_dates = pd.DataFrame(data[data['d_alt']==1].date.unique()).rename(columns={0:"date"})
alt_dates['alt']=1
date_speakers = pd.DataFrame(data[data['votingmember']==1].date.unique()).rename(columns={0:"date"})
date_speakers['speaker']=1
merge_df = pd.merge(alt_dates,date_speakers,on="date",how="outer")
print("Number of alternative dates: %d" % len(data_alternatives['date'].unique()))
print(f"Earliest meeting with alternatives: {data_alternatives['date'].min()}" )
print(f"Latest meeting with alternatives: {data_alternatives['date'].max()}" )
print("Number of speaker dates: %d" % len(data_speakers['date'].unique()))
print("Earliest date of speaker: %s" % data_speakers['date'].min())
print("Latest date of speaker: %s" % data_speakers['date'].max())
print("Number of words for the speakers is: {:.3f} million".format(len(" ".join(data_speakers['content'].tolist())) / 1e6))
print("Number of words for the alternatives is: {:.3f} million".format(len(" ".join(data_alternatives['content'].tolist())) / 1e6 ))
### Summary Statistics ###
with open("../output/file_basic_sumstats.tex","w") as file:
file.write("DOCUMENTS COLLECTED:\\\\\\\\")
file.write(f"Number of alternative dates: \t \t {len(data_alternatives['date'].unique())}\\\\")
file.write(f"Earliest meeting with alternatives:\t \t {data_alternatives['date'].min()} \\\\")
file.write(f"Latest meeting with alternatives:\t \t {data_alternatives['date'].max()} \\\\ \\\\" )
file.write(f"Number of speaker dates: {len(data_speakers['date'].unique())}\\\\")
file.write(f"Earliest date of speaker: {data_speakers['date'].min()}\\\\")
file.write(f"Latest date of speaker: {data_speakers['date'].max()}\\\\\\\\")
file.write("Number of words for the speakers is: {:.3f} million \\\\".format(len(" ".join(data_speakers['content'].tolist())) / 1e6))
file.write("Number of words for the alternatives is: {:.3f} million \\".format(len(" ".join(data_alternatives['content'].tolist())) / 1e6 ))
# =============================================================================
# # Subsample the speakers -- only to learn the model
# data_speakers_subsample = data_speakers.sample(frac =.1 ,random_state=5)
# print("Number of words for the subsample of speakers is: %s" % (len(" ".join(data_speakers_subsample ['content'].tolist())) / 1e6))
# data_sel = pd.concat([data_speakers_subsample,data_alternatives],axis=0, join='inner')
# data_sel = data_sel.reset_index()
# =============================================================================
### Learn the model based only on basis of the alternatives ###
print("\n### MODEL ESTIMATION - ALTERNATIVES ONLY ###\n")
data_sel = data_alternatives.reset_index()
# Do simple preprocessing
data_sel['parsed']=data_sel['content'].apply(provide_helperfunctions.extract_token)
data_sel['parsed'].loc[1]
### Revome stopwords and do stemming ###
stopwordsnltk = stopwords.words('english')
stopwordsnltk.extend(["mr","chairman","yes",'restrict', 'control','class','page',
'chart','strictli',"presid", "governor", "would","think",
"altern","could","committe","may",
"ty","yt","πt","bt","yt","na","na","gt","row","qiv","rtc","tip","dec","jul",
"confid","interv","ut","seven","confidenti","jun",
"jan","feb","mar","apr","aug","sep","oct","nov",'march','septemb','fr','june','april','nan'])
data_sel['parsed_cleaned']=data_sel['parsed'].apply(lambda x:
provide_helperfunctions.remove_stopwords(
provide_helperfunctions.do_stemming(
provide_helperfunctions.remove_stopwords(x,stopwordsnltk)),stopwordsnltk))
### Build corpus ###
texts=[]
for row_index,row in data_sel.iterrows():
item=row['parsed_cleaned']
texts.append(item)
### Extract tokens ###
tokens =[]
for text in texts:
for word in text:
tokens.append(word)
### Extract the top 100 common tokens ###
counter = Counter(tokens)
n_topwords=100
provide_helperfunctions.plot_wordlist(counter.most_common(n_topwords),n_topwords,n_percolumns=34,filename="../output/tab_tf_alternatives.tex")
### Extract the top 100 bigrams tokens ###
bi_grams = list(ngrams(tokens, 2))
counter = Counter(bi_grams)
n_topwords=100
provide_helperfunctions.plot_wordlist(counter.most_common(n_topwords),n_topwords,n_percolumns=34,filename="../output/tab_tf_bigrams.tex")
### Add bi-grams ###
n_bigrams = 100
bi_gram_mostcommon = ["_".join(ele[0]) for ele in counter.most_common(n_bigrams)]
texts = provide_helperfunctions.add_bigrams(texts,bi_gram_mostcommon)
### Extract the top 100 trigrams tokens ###
tri_grams = list(ngrams(tokens, 3))
counter = Counter(tri_grams)
n_topwords = 68
provide_helperfunctions.plot_wordlist(counter.most_common(n_topwords),n_topwords,n_percolumns=34,filename="../output/tab_tf_trigrams.tex")
### Add tri-grams ###
n_tri_grams = 50
tri_gram_mostcommon = ["_".join(ele[0]) for ele in counter.most_common(n_tri_grams)]
texts = provide_helperfunctions.add_trigrams(texts,tri_gram_mostcommon)
### Plot TF-IDF figure to decide on the terms ###
tokens =[]
for text in texts:
for word in text:
tokens.append(word)
# Unique words
unique_tokens =sorted(list(set(tokens)))
tf_idf = provide_helperfunctions.get_tdidf(tokens,unique_tokens,texts)
tf_idf_sort =np.sort(tf_idf)
tf_idf_invsort = tf_idf_sort[::-1]
plt.figure(figsize=(12,7))
plt.plot(np.arange(len(unique_tokens)),tf_idf_invsort)
plt.ylabel('Tf-idf weight')
plt.xlabel('Rank of terms ordered by tf-idf')
plt.savefig('../output/fig_alt_tfidf.pdf')
# print terms with the largest ranking
def merge(list1, list2):
merged_list = [(list1[i], list2[i]) for i in range(0, len(list1))]
return merged_list
n_topwords = 68
indices = tf_idf.argsort()[-n_topwords:][::-1]
tfidf_top = tf_idf[indices]
word_arr = np.asarray(unique_tokens)
word_top= word_arr[indices]
counter = merge(list(word_top),list(tfidf_top))
provide_helperfunctions.plot_wordlist(counter,n_topwords,n_percolumns=34,filename="../output/tab_tfidf_list.tex",columnnames=['#','term','tf-idf score'])
### Keep top x words ###
totaln_words=2200
texts = provide_helperfunctions.trim_texts(tf_idf,unique_tokens,texts,totaln_words)
### Build dictionary ###
dictionary = corpora.Dictionary(texts)
corpus = [dictionary.doc2bow(text) for text in texts]
###############################################################################
### Model Selection ###
run_modelsel = False
run_numtopic = False
if run_modelsel == True:
### Explore multiple dimension of the parameter space - TAKES A LONG TIME ###
alpha_v = np.array([0.001,0.01,0.1,0.5,1])
eta_v = np.array([0.001,0.01,0.1,0.5,1])
topic_v =np.array([5,10,15,20])
models_df = provide_helperfunctions.explore_parameterspace(totaln_words,corpus,dictionary,rnd_state,texts,alpha_v,eta_v,topic_v)
models_df=models_df.sort_values(by='coherence score (PMI)',ascending=False).reset_index().drop(columns="index")
models_df['model']=models_df.index
models_df['model']=models_df['model'].apply(lambda x:x+1)
models_df.to_latex("../output/tab_models.tex",index=False,float_format="%.3f")
# plot the parameter space
#provide_helperfunctions.plot_parameterspace(models_df)
# extract the parameter values for the highest coherence score
row = models_df.loc[models_df['coherence score (PMI)'].idxmax()]
row.to_pickle("../output/opt_parameter_coh")
row = models_df.loc[models_df['perplexity'].idxmax()]
row.to_pickle("../output/opt_parameter_perplexity")
row = pd.read_pickle("../output/opt_parameter_coh")
num_topics = int(row['# topics'])
eta_p = row['eta']
alpha_p = row['alpha']
provide_helperfunctions.output_number(num_topics,filename="../output/par_bs_numtopoics.tex",dec=0)
provide_helperfunctions.output_number(eta_p,filename="../output/par_bs_eta.tex",dec=3)
provide_helperfunctions.output_number(alpha_p,filename="../output/par_bs_alpha.tex",dec=3)
if run_numtopic == True:
### Number of topic evaluations ###
eta = eta_p
alpha = alpha_p
provide_helperfunctions.explore_numberoftopics(totaln_words,corpus,dictionary,texts,rnd_state, eta , alpha )
provide_helperfunctions.output_number(eta,filename="../output/par_topic_eta.tex",dec=3)
provide_helperfunctions.output_number(alpha,filename="../output/par_topic_alpha.tex",dec=3)
###############################################################################
### Model Estimation ###
### Do LDA ###
print(f"# Model parameter: Number of topics = {num_topics}, eta = {eta_p}, alpha = {alpha_p} random state = {rnd_state}\n")
ldamodel = models.ldamodel.LdaModel(corpus, num_topics, id2word = dictionary, passes=30 ,eta=eta_p ,alpha = alpha_p, random_state=rnd_state)
# Perplexity
logperplexity = ldamodel.log_perplexity(corpus, total_docs=None)
provide_helperfunctions.output_number(logperplexity,filename="../output/par_logperplexity.tex",dec=3)
# Coherence measure
cm = CoherenceModel(model=ldamodel, corpus=corpus, texts = texts, coherence='c_uci') # this is the pointwise mutual info measure.
coherence = cm.get_coherence() # get coherence value
provide_helperfunctions.output_number(coherence,filename="../output/par_coherence.tex",dec=3)
### Inspect the topics ###
n_words=10
x=ldamodel.show_topics(num_topics, num_words=n_words,formatted=False)
topics_words = [(tp[0], [wd[0] for wd in tp[1]]) for tp in x]
print("# These are the topic distributions for the estimated model:\n")
for topic,words in topics_words:
print(str(topic)+ "::"+ str(words))
### Visualize as a heatmap ###
provide_helperfunctions.draw_heatmap(x,n_words,params=(num_topics,eta_p,alpha_p), pmin = 0, pmax = 0.05)
###############################################################################
data = pd.concat([data_speakers,data_alternatives],axis=0, join='inner')
data = data.reset_index()
data_light = data[['d_alt','date', 'speaker', 'speaker_id', 'votingmember', 'ambdiss','tighterdiss', 'easierdiss']]
# Do simple preprocessing
data['parsed']=data['content'].apply(provide_helperfunctions.extract_token)
# Revome stopwords and do stemming
data['parsed_cleaned']=data['parsed'].apply(lambda x:
provide_helperfunctions.remove_stopwords(
provide_helperfunctions.do_stemming(
provide_helperfunctions.remove_stopwords(x,stopwordsnltk)),stopwordsnltk))
### Build corpus ###
texts=[]
for row_index,row in data.iterrows():
item=row['parsed_cleaned']
texts.append(item)
### Add bigrams and trigrams ###
texts = provide_helperfunctions.add_bigrams(texts,bi_gram_mostcommon)
texts = provide_helperfunctions.add_trigrams(texts,tri_gram_mostcommon)
### Build the dictionary ###
corpus = [dictionary.doc2bow(text) for text in texts]
### Extract topic vectors ###
sent_topics_df = provide_helperfunctions.extract_vectors(ldamodel,int(num_topics),corpus)
data_lda = | pd.concat([data,sent_topics_df],axis=1, join='inner') | pandas.concat |
import warnings
import numpy as np
import pandas as pd
from pandas.api.types import (
is_categorical_dtype,
is_datetime64tz_dtype,
is_interval_dtype,
is_period_dtype,
is_scalar,
is_sparse,
union_categoricals,
)
from ..utils import is_arraylike, typename
from ._compat import PANDAS_GT_100
from .core import DataFrame, Index, Scalar, Series, _Frame
from .dispatch import (
categorical_dtype_dispatch,
concat,
concat_dispatch,
get_parallel_type,
group_split_dispatch,
hash_object_dispatch,
is_categorical_dtype_dispatch,
make_meta,
make_meta_obj,
meta_nonempty,
tolist_dispatch,
union_categoricals_dispatch,
)
from .extensions import make_array_nonempty, make_scalar
from .utils import (
_empty_series,
_nonempty_scalar,
_scalar_from_dtype,
is_categorical_dtype,
is_float_na_dtype,
is_integer_na_dtype,
)
##########
# Pandas #
##########
@make_scalar.register(np.dtype)
def _(dtype):
return _scalar_from_dtype(dtype)
@make_scalar.register(pd.Timestamp)
@make_scalar.register(pd.Timedelta)
@make_scalar.register(pd.Period)
@make_scalar.register(pd.Interval)
def _(x):
return x
@make_meta.register((pd.Series, pd.DataFrame))
def make_meta_pandas(x, index=None):
return x.iloc[:0]
@make_meta.register(pd.Index)
def make_meta_index(x, index=None):
return x[0:0]
meta_object_types = (pd.Series, pd.DataFrame, pd.Index, pd.MultiIndex)
try:
import scipy.sparse as sp
meta_object_types += (sp.spmatrix,)
except ImportError:
pass
@make_meta_obj.register(meta_object_types)
def make_meta_object(x, index=None):
"""Create an empty pandas object containing the desired metadata.
Parameters
----------
x : dict, tuple, list, pd.Series, pd.DataFrame, pd.Index, dtype, scalar
To create a DataFrame, provide a `dict` mapping of `{name: dtype}`, or
an iterable of `(name, dtype)` tuples. To create a `Series`, provide a
tuple of `(name, dtype)`. If a pandas object, names, dtypes, and index
should match the desired output. If a dtype or scalar, a scalar of the
same dtype is returned.
index : pd.Index, optional
Any pandas index to use in the metadata. If none provided, a
`RangeIndex` will be used.
Examples
--------
>>> make_meta([('a', 'i8'), ('b', 'O')]) # doctest: +SKIP
Empty DataFrame
Columns: [a, b]
Index: []
>>> make_meta(('a', 'f8')) # doctest: +SKIP
Series([], Name: a, dtype: float64)
>>> make_meta('i8') # doctest: +SKIP
1
"""
if is_arraylike(x) and x.shape:
return x[:0]
if index is not None:
index = make_meta(index)
if isinstance(x, dict):
return pd.DataFrame(
{c: _empty_series(c, d, index=index) for (c, d) in x.items()}, index=index
)
if isinstance(x, tuple) and len(x) == 2:
return _empty_series(x[0], x[1], index=index)
elif isinstance(x, (list, tuple)):
if not all(isinstance(i, tuple) and len(i) == 2 for i in x):
raise ValueError(
"Expected iterable of tuples of (name, dtype), got {0}".format(x)
)
return pd.DataFrame(
{c: _empty_series(c, d, index=index) for (c, d) in x},
columns=[c for c, d in x],
index=index,
)
elif not hasattr(x, "dtype") and x is not None:
# could be a string, a dtype object, or a python type. Skip `None`,
# because it is implictly converted to `dtype('f8')`, which we don't
# want here.
try:
dtype = np.dtype(x)
return _scalar_from_dtype(dtype)
except Exception:
# Continue on to next check
pass
if is_scalar(x):
return _nonempty_scalar(x)
raise TypeError("Don't know how to create metadata from {0}".format(x))
@meta_nonempty.register(object)
def meta_nonempty_object(x):
"""Create a nonempty pandas object from the given metadata.
Returns a pandas DataFrame, Series, or Index that contains two rows
of fake data.
"""
if is_scalar(x):
return _nonempty_scalar(x)
else:
raise TypeError(
"Expected Pandas-like Index, Series, DataFrame, or scalar, "
"got {0}".format(typename(type(x)))
)
@meta_nonempty.register(pd.DataFrame)
def meta_nonempty_dataframe(x):
idx = meta_nonempty(x.index)
dt_s_dict = dict()
data = dict()
for i, c in enumerate(x.columns):
series = x.iloc[:, i]
dt = series.dtype
if dt not in dt_s_dict:
dt_s_dict[dt] = _nonempty_series(x.iloc[:, i], idx=idx)
data[i] = dt_s_dict[dt]
res = pd.DataFrame(data, index=idx, columns=np.arange(len(x.columns)))
res.columns = x.columns
if PANDAS_GT_100:
res.attrs = x.attrs
return res
_numeric_index_types = (pd.Int64Index, pd.Float64Index, pd.UInt64Index)
@meta_nonempty.register(pd.Index)
def _nonempty_index(idx):
typ = type(idx)
if typ is pd.RangeIndex:
return pd.RangeIndex(2, name=idx.name)
elif typ in _numeric_index_types:
return typ([1, 2], name=idx.name)
elif typ is pd.Index:
return pd.Index(["a", "b"], name=idx.name)
elif typ is pd.DatetimeIndex:
start = "1970-01-01"
# Need a non-monotonic decreasing index to avoid issues with
# partial string indexing see https://github.com/dask/dask/issues/2389
# and https://github.com/pandas-dev/pandas/issues/16515
# This doesn't mean `_meta_nonempty` should ever rely on
# `self.monotonic_increasing` or `self.monotonic_decreasing`
try:
return pd.date_range(
start=start, periods=2, freq=idx.freq, tz=idx.tz, name=idx.name
)
except ValueError: # older pandas versions
data = [start, "1970-01-02"] if idx.freq is None else None
return pd.DatetimeIndex(
data, start=start, periods=2, freq=idx.freq, tz=idx.tz, name=idx.name
)
elif typ is pd.PeriodIndex:
return pd.period_range(
start="1970-01-01", periods=2, freq=idx.freq, name=idx.name
)
elif typ is pd.TimedeltaIndex:
start = np.timedelta64(1, "D")
try:
return pd.timedelta_range(
start=start, periods=2, freq=idx.freq, name=idx.name
)
except ValueError: # older pandas versions
start = np.timedelta64(1, "D")
data = [start, start + 1] if idx.freq is None else None
return pd.TimedeltaIndex(
data, start=start, periods=2, freq=idx.freq, name=idx.name
)
elif typ is pd.CategoricalIndex:
if len(idx.categories) == 0:
data = pd.Categorical(_nonempty_index(idx.categories), ordered=idx.ordered)
else:
data = pd.Categorical.from_codes(
[-1, 0], categories=idx.categories, ordered=idx.ordered
)
return pd.CategoricalIndex(data, name=idx.name)
elif typ is pd.MultiIndex:
levels = [_nonempty_index(l) for l in idx.levels]
codes = [[0, 0] for i in idx.levels]
try:
return pd.MultiIndex(levels=levels, codes=codes, names=idx.names)
except TypeError: # older pandas versions
return pd.MultiIndex(levels=levels, labels=codes, names=idx.names)
raise TypeError(
"Don't know how to handle index of type {0}".format(typename(type(idx)))
)
@meta_nonempty.register(pd.Series)
def _nonempty_series(s, idx=None):
# TODO: Use register dtypes with make_array_nonempty
if idx is None:
idx = _nonempty_index(s.index)
dtype = s.dtype
if len(s) > 0:
# use value from meta if provided
data = [s.iloc[0]] * 2
elif is_datetime64tz_dtype(dtype):
entry = pd.Timestamp("1970-01-01", tz=dtype.tz)
data = [entry, entry]
elif is_categorical_dtype(dtype):
if len(s.cat.categories):
data = [s.cat.categories[0]] * 2
cats = s.cat.categories
else:
data = _nonempty_index(s.cat.categories)
cats = s.cat.categories[:0]
data = pd.Categorical(data, categories=cats, ordered=s.cat.ordered)
elif is_integer_na_dtype(dtype):
data = pd.array([1, None], dtype=dtype)
elif is_float_na_dtype(dtype):
data = pd.array([1.0, None], dtype=dtype)
elif is_period_dtype(dtype):
# pandas 0.24.0+ should infer this to be Series[Period[freq]]
freq = dtype.freq
data = [pd.Period("2000", freq), pd.Period("2001", freq)]
elif is_sparse(dtype):
entry = _scalar_from_dtype(dtype.subtype)
if PANDAS_GT_100:
data = pd.array([entry, entry], dtype=dtype)
else:
data = pd.SparseArray([entry, entry], dtype=dtype)
elif is_interval_dtype(dtype):
entry = _scalar_from_dtype(dtype.subtype)
data = pd.array([entry, entry], dtype=dtype)
elif type(dtype) in make_array_nonempty._lookup:
data = make_array_nonempty(dtype)
else:
entry = _scalar_from_dtype(dtype)
data = np.array([entry, entry], dtype=dtype)
out = | pd.Series(data, name=s.name, index=idx) | pandas.Series |
# Import the required packages
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import math
import streamlit as st
import pickle
from pickle import load
from PIL import Image
import seaborn as sns
import statsmodels.api as sm
import lime.lime_tabular
from sklearn.model_selection import train_test_split
import string
import sklearn
from sklearn.feature_extraction.text import TfidfVectorizer
# Set Recursion Limit
import sys
sys.setrecursionlimit(40000)
import re
import nltk
import regex as re
from nltk.corpus import stopwords
from sklearn.pipeline import make_pipeline
from sklearn.metrics import accuracy_score
import lightgbm as lgb
from lightgbm import LGBMClassifier
import streamlit.components.v1 as components
import tweepy
from collections import Counter
from wordcloud import WordCloud
import datetime
import plotly.express as px
import time
import pydeck as pdk
import SessionState # Assuming SessionState.py lives on this folder
st.sidebar.title('Dashboard Control')
control = st.sidebar.radio('Navigation Bar', ('Home', 'Live Tweet Feed', 'Time Series Analysis', 'XAI'))
if control == 'Home':
### Sentiment Code goes here
st.markdown('<h1 style="color:#8D3DAF;text-align:center;font-family: Garamond, serif;"><b>RAKSHAK</b></h1>',unsafe_allow_html=True)
st.markdown('<h2 style="color:#E07C24;text-align:center;font-family: Georgia, serif;"><b>Time Series Sentiment Analysis Of Natural Hazard Relief Operations Through Social Media Data</b></h2>',unsafe_allow_html=True)
#st.markdown("The dashboard will help the government and humanitarian aid agencies to plan and coordinate the natural disaster relief efforts, resulting in more people being saved and more effective distribution of emergency supplies during a natural hazard")
st.header("Natural Hazard Data Collected Sample")
# Dataset
# Load the Dataset
tweets1 = pd.read_csv("https://raw.githubusercontent.com/anidevhere/Temp/main/nepal_mix_1.csv")[['text','type']]
tweets2 = pd.read_csv("https://raw.githubusercontent.com/anidevhere/Temp/main/italy_mix_1.csv")[['text','type']]
tweets3 = pd.read_csv("https://raw.githubusercontent.com/anidevhere/Temp/main/Covid-19.csv")[['text','type']]
names = [tweets1,tweets2,tweets3]
# Concatenate the datasets
tweets = pd.concat(names,ignore_index = True)
# Reshuffle the dataset
tweets = tweets.sample(frac = 1)
# Reindex the dataset
tweets['index'] = list(range(0,tweets.shape[0],1))
tweets.set_index('index', inplace=True)
tweets['type'] = tweets['type'].map({0: 'Need', 1: 'Availability', 2: 'Other'})
# Change column names for consistency
tweets.columns = ['text', 'type']
# Dataset Description
h = st.sidebar.slider('Select the number of tweets using the slider', 1, 100, 10)
data_tweets = tweets.sample(h)
data_tweets['index'] = list(range(0, h, 1))
data_tweets.set_index('index', inplace=True)
st.table(data_tweets)
# Checking for class balancing and get unique labels:
st.header("Count Of Tweets In Each Class")
chart_visual_class_balancing = st.sidebar.checkbox('Class Labels', True)
if chart_visual_class_balancing==True:
fig = plt.figure(figsize=(8, 4))
#sns.countplot(y=tweets.loc[:, 'type'],data=tweets).set_title("Count of tweets in each class")
fig = px.histogram(tweets, x="type",color="type",title="Count of tweets in each class")
st.plotly_chart(fig)
# Wordclouds
# Selection of Input & Output Variables
X = tweets.loc[:, 'text']
Y = tweets.loc[:, 'type']
X = list(X)
def preprocess_dataset(d):
# Define count variables
cnt=0
punctuation_count = 0
digit_count = 0
# Convert the corpus to lowercase
lower_corpus = []
for i in range(len(d)):
lower_corpus.append(" ".join([word.lower() for word in d[i].split()]))
# Remove any special symbol or punctuation
without_punctuation_corpus = []
for i in range(len(lower_corpus)):
p = []
for ch in lower_corpus[i]:
if ch not in string.punctuation:
p.append(ch)
else:
p.append(" ")
# Count of punctuation marks removed
punctuation_count += 1
x = ''.join(p)
if len(x) > 0:
without_punctuation_corpus.append(x)
# Remove urls with http, https or www and Retweets RT
without_url_corpus = []
for i in range(len(without_punctuation_corpus)):
text = without_punctuation_corpus[i]
text = re.sub(r"http\S*||www\S*", "", text)
text = re.sub(r"RT ", "", text)
without_url_corpus.append(text)
# Remove special characters and numbers from the corpus
without_digit_corpus = []
for i in range(len(without_url_corpus)):
p = []
for word in without_url_corpus[i].split():
if word.isalpha():
p.append(word)
else:
# Count of punctuation marks removed
digit_count += 1
x = ' '.join(p)
without_digit_corpus.append(x)
# Tokenize the corpus
# word_tokenize(s): Tokenize a string to split off punctuation other than periods
# With the help of nltk.tokenize.word_tokenize() method, we are able to extract the tokens
# from string of characters by using tokenize.word_tokenize() method.
# Tokenization was done to support efficient removal of stopwords
total_count = 0
tokenized_corpus = []
for i in without_digit_corpus:
tokenized_tweet = nltk.word_tokenize(i)
tokenized_corpus.append(tokenized_tweet)
# Count the length of tokenized corpus
total_count += len(list(tokenized_tweet))
# Remove Stopwords
stopw = stopwords.words('english')
count = 0
tokenized_corpus_no_stopwords = []
for i,c in enumerate(tokenized_corpus):
tokenized_corpus_no_stopwords.append([])
for word in c:
if word not in stopw:
tokenized_corpus_no_stopwords[i].append(word)
else:
count += 1
# lemmatization and removing words that are too large and small
lemmatized_corpus = []
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
ct = 0
cnt_final=0
dictt = {}
for i in range(0,len(tokenized_corpus_no_stopwords)):
lemmatized_corpus.append([])
for w in tokenized_corpus_no_stopwords[i]:
# lematizing only those words whose length >= 2 and <=10
# Considering words with length greater than or equal to 2 and less than or equal to 10
if(len(w)>2 and len(w)<=10):
lemmatized_corpus[i].append(lemmatizer.lemmatize(w))
cnt_final+=1
# Count of final corpus
# This is the length of total corpus that went through the process of lematization
ct+=1
############## Removing words of large and small length
# Doing a survey to find out the length of words so we can remove the too small and too large words from the Corpus
# plt.bar(*zip(*dictt.items()))
# plt.show()
# Punctuation Preprocessing
preprocessed_corpus = []
for i,c in enumerate(lemmatized_corpus):
preprocessed_corpus.append([])
for word in c:
x = ''.join([ch for ch in word if ch not in string.punctuation])
if len(x) > 0:
preprocessed_corpus[i].append(x)
# Clear unwanted data variables to save RAM due to memory limitations
del lower_corpus
del without_punctuation_corpus
del without_digit_corpus
del tokenized_corpus
del tokenized_corpus_no_stopwords
del lemmatized_corpus
return preprocessed_corpus
# Preprocess the Input Variables
preprocessed_corpus = preprocess_dataset(X)
data_corpus = []
for i in preprocessed_corpus:
data_corpus.append(" ".join([w for w in i]))
# Creating a word cloud
st.header("Wordclouds For Dataset")
fig, axes = plt.subplots(1, 2)
# Worcloud for processed dataset
words1 = ' '.join([tweet for tweet in X])
words2 = ' '.join([tweet for tweet in data_corpus])
wordCloud1 = WordCloud(background_color ='black').generate(words1)
wordCloud2 = WordCloud(background_color ='black').generate(words2)
# Display the generated image:
axes[0].title.set_text("Raw Dataset")
axes[0].imshow(wordCloud1)
axes[0].axis("off")
axes[1].title.set_text("Processed Dataset")
axes[1].imshow(wordCloud2)
axes[1].axis("off")
st.pyplot(fig)
# Create most used hashtags
st.header("Top Hashtag Used in the Datasets")
fig, axes = plt.subplots(1, 3)
tweets1 = pd.read_csv("https://raw.githubusercontent.com/anidevhere/Temp/main/nepal_mix_1.csv")[['text','type']]
tweets2 = pd.read_csv("https://raw.githubusercontent.com/anidevhere/Temp/main/italy_mix_1.csv")[['text','type']]
tweets3 = pd.read_csv("https://raw.githubusercontent.com/anidevhere/Temp/main/Covid-19.csv")[['text','type']]
X1 = list(tweets1.loc[:, 'text'])
X2 = list(tweets2.loc[:, 'text'])
X3 = list(tweets3.loc[:, 'text'])
dc1 = []
pd1 = preprocess_dataset(X1)
for i in pd1:
dc1 += i
c1 = Counter(dc1)
mfw1 = c1.most_common(10)
df1 = pd.DataFrame(mfw1)
df1.columns = ['Word', 'Count']
axes[0] = px.line(df1, x='Word', y='Count',title='Nepal Earthquake 2015',labels={'Word':'Hashtag', 'Count':'Number of Hashtag tweeted'})
st.plotly_chart(axes[0])
dc2 = []
pd2 = preprocess_dataset(X2)
for i in pd2:
dc2 += i
c2 = Counter(dc2)
mfw2 = c2.most_common(10)
df2 = pd.DataFrame(mfw2)
df2.columns = ['Word', 'Count']
axes[1] = px.line(df2, x='Word', y='Count',title='Italy Earthquake 2016', labels={'Word':'Hashtag', 'Count':'Number of Hashtag tweeted'})
st.plotly_chart(axes[1])
dc3 = []
pd3 = preprocess_dataset(X3)
for i in pd3:
dc3 += i
c3 = Counter(dc3)
mfw3 = c3.most_common(10)
df3 = pd.DataFrame(mfw3)
df3.columns = ['Word', 'Count']
axes[2] = px.line(df3, x='Word', y='Count',title='COVID-19',labels={'Word':'Hashtag', 'Count':'Number of Hashtag tweeted'})
st.plotly_chart(axes[2])
#df3.set_index('Word', inplace=True)
#axes[2].plot(df3['Count'], marker='o', linewidth=0.5,ls='solid', c='blue')
#axes[2].tick_params(axis ='x', rotation =-90)
#axes[2].set_xlabel('Hashtag')
#axes[2].set_ylabel('Number of Hashtag tweeted')
#axes[2].title.set_text("COVID-19")
st.header("Sentiments of Tweets Collected")
st.caption("Select Start & End Date to display Sentiments of tweets collected")
s_date = st.date_input("Start Date", min_value=datetime.datetime(2021, 4, 1), max_value=datetime.datetime(2021, 4, 30), value=datetime.datetime(2021, 4, 1))
e_date = st.date_input("End Date", min_value=datetime.datetime(2021, 4, 1), max_value=datetime.datetime(2021, 4, 30), value=datetime.datetime(2021, 4, 30))
data = pd.read_csv('sentiment_april.csv')[['Need','Availability']]
data_T = data.T
date1 = int(str(s_date)[8:])-1
date2 = int(str(e_date)[8:])
data_T["sum"] = data_T[list(range(date1,date2,1))].sum(axis=1)
l_name = ['Need', 'Availability']
l_value = data_T['sum']
pie_dict = {'name': l_name, 'value': l_value}
pie_df = pd.DataFrame(pie_dict)
fig_pie = px.pie(pie_df, values='value', names='name', title='Sentiments of tweet collected between '+str(s_date)+' and '+str(e_date))
st.plotly_chart(fig_pie)
# Show locations for tweets
st.header("Map for Location of Each User")
df = pd.read_csv('lat-long.csv')
df.columns = ['lat', 'lon', 'country']
st.map(df)
elif control == 'Live Tweet Feed':
### Libe Tweet feed goes here
st.markdown('<h1 style="color:#E07C24;;text-align:center;"><b>Live Tweet Feed</b></h1>',unsafe_allow_html=True)
st.header("Live Tweet Feed Sample")
hashtag = str(st.text_input("Enter the keyword or hashtag for live twitter fee", "#coronavirus"))
fetch_tweets = st.button("Fetch Tweets")
####input your credentials here
consumer_key = "IE5dmFVlYdg5aNrsNnZiXZVPa"
consumer_secret = "<KEY>"
access_token = "<KEY>"
access_token_secret = "<KEY>"
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth,wait_on_rate_limit=True)
if fetch_tweets:
# Current Time
current_time = time.time()
diff = 0
real_time = 0
live_tweet_text = []
live_tweet_date = []
live_tweet_id = []
lt_user_name = []
lt_user_location = []
lt_user_screenname=[]
lt_followers = []
lt_following = []
while(diff < 10):
for tweet in tweepy.Cursor(api.search_tweets,q=hashtag,count=10,lang="en",since="2021-12-11").items():
real_time = time.time()
diff = real_time - current_time
if diff >10:
break
if (not tweet.retweeted) and ('RT @' not in tweet.text):
#print(tweet,"\n\n\n\n\n")
live_tweet_text.append(tweet.text)
live_tweet_date.append(tweet.created_at)
live_tweet_id.append(tweet.id)
lt_user_name.append(tweet.user.name)
lt_user_location.append(tweet.user.location)
lt_user_screenname.append(tweet.user.screen_name)
lt_followers.append(str(tweet.user.followers_count))
lt_following.append(str(tweet.user.friends_count))
live_tweet_feed_dict = {'Tweet ID':live_tweet_id, 'Tweet': live_tweet_text, 'Date & Time': live_tweet_date, 'Username': lt_user_screenname, 'User Full Name': lt_user_name, 'Location': lt_user_location, 'Follower Count': lt_followers, 'Following Count': lt_following}
live_tweet_feed = pd.DataFrame(live_tweet_feed_dict)
st.dataframe(live_tweet_feed)
elif control == 'Time Series Analysis':
### Streamlit code starts here
st.markdown('<h1 style="color:#E07C24;;text-align:center;"><b>Time Series Analysis of Disaster Tweets</b></h1>',unsafe_allow_html=True)
### Time Series Code goes here
# Dataset
# Load the Dataset
tweets1 = pd.read_csv("https://raw.githubusercontent.com/anidevhere/Temp/main/nepal_mix_1.csv")[['text','type']]
tweets2 = pd.read_csv("https://raw.githubusercontent.com/anidevhere/Temp/main/italy_mix_1.csv")[['text','type']]
tweets3 = pd.read_csv("https://raw.githubusercontent.com/anidevhere/Temp/main/Covid-19.csv")[['text','type']]
names = [tweets1,tweets2,tweets3]
# Concatenate the datasets
tweets = pd.concat(names,ignore_index = True)
# Reshuffle the dataset
tweets = tweets.sample(frac = 1)
# Reindex the dataset
tweets['index'] = list(range(0,tweets.shape[0],1))
tweets.set_index('index', inplace=True)
tweets['type'] = tweets['type'].map({0: 'Need', 1: 'Availability', 2: 'Other'})
# Change column names for consistency
tweets.columns = ['text', 'type']
tweets['type'] = tweets['type'].map({'Need':0, 'Availability':1,'Other':2})
# Get all the labels used in the labelling column
label = tweets.type.unique()
print("Labels:", label)
# Remove label 2 from the list because not required for time series analysis
label = np.delete(label,np.where(label == 2))
print("Labels:", label)
# Add names to the numerical labels
label_name = []
for i in label:
if i == 0:
label_name.append("Need")
elif i == 1:
label_name.append("Availability")
# Choose interval
interval = 30
start_date = "2021-04-01"
# Create Timestamps with intervals
ds = pd.date_range(start=start_date, periods=interval)
dates = []
for i in ds:
dates.append(i.strftime('%m-%d-%Y'))
del ds
# Divide the Dataset into intervals
# Divide the dataset into the given number of intervals
num_of_tweets_per_interval = math.floor(tweets.shape[0]/interval)
# Create Time Series with intervals
data = []
count_of_data = []
for i in label:
count_of_data.append([])
for i in range(1,interval+1,1):
# Draw a sample from the tweets
tw = tweets.sample(n=num_of_tweets_per_interval, random_state=10, replace=False)
# Append the statistics of the drawn sample to the list
stat = dict()
for j in range(0,len(label)):
stat[label[j]] = list(tw['type']).count(label[j])
count_of_data[j].append(list(tw['type']).count(label[j]))
data.append(stat)
# Remove the already drawn tweets from the dataset
tweets.drop(labels=list(tw.index.values),inplace=True)
# Real Time Series starts here
# Load Dataset
df = pd.DataFrame(count_of_data).T
# Set Index
df['Date'] = pd.to_datetime(dates)
df.set_index('Date', inplace=True)
df.columns = ['Need', 'Availability']
# Delete this asap
count_of_data = [[44, 44, 54, 51, 42, 50, 48, 52, 44, 49, 50, 57, 44, 55, 52, 46, 49, 59, 44, 48, 56, 45, 54, 47, 49, 62, 45, 53, 43, 41], [42, 46, 33, 33, 35, 39, 33, 35, 36, 41, 37, 39, 38, 37, 37, 32, 32, 37, 44, 46, 43, 46, 33, 34, 44, 37, 45, 36, 39, 51]]
df = pd.read_csv('time_series.csv')
df['Date'] = pd.to_datetime(dates)
df.set_index('Date', inplace=True)
df.columns = ['Need', 'Availability']
# Delete above asap
st.title("Twitter Data Description")
chart_visual_tweets = st.selectbox('Select Chart/Plot type',
('Stacked Bar Chart', 'Side-by-Side Bar Chart', 'Line Chart'))
# Plot 1
if chart_visual_tweets=='Side-by-Side Bar Chart':
# set width of bars
barWidth = 0.25
# Set position of bar on X axis
r = [np.arange(interval)]
for i in range(1, len(label)):
r1 = [x + barWidth for x in r[-1]]
r.append(r1)
# Plotting a line plot after changing it's width and height
f = plt.figure()
f.set_figwidth(20)
f.set_figheight(8)
# Make the plot
for i,lab in enumerate(label):
plt.bar(r[i], count_of_data[i], width=barWidth, edgecolor='white', label=label_name[i])
# Add xticks on the middle of the group bars
plt.xlabel('Time Series', fontweight='bold')
plt.xticks([r + barWidth for r in range(len(count_of_data[0]))], list(dates))
plt.tick_params(axis ='x', rotation =90)
# Create legend & Show graphic
plt.legend()
plt.show()
st.pyplot(f)
# Plot 2
if chart_visual_tweets=='Stacked Bar Chart':
# Plotting a line plot after changing it's width and height
f = plt.figure()
f.set_figwidth(20)
f.set_figheight(8)
b = np.zeros(interval)
for i,lab in enumerate(label):
plt.bar(dates, count_of_data[i],bottom=b, edgecolor='white', label=label_name[i])
b += np.array(count_of_data[i])
plt.xlabel('Time Series', fontweight='bold')
plt.tick_params(axis ='x', rotation =90)
# Create legend & Show graphic
plt.legend()
plt.show()
st.pyplot(f)
# Plot 3
if chart_visual_tweets=='Line Chart':
# Plotting a line plot after changing it's width and height
f = plt.figure()
f.set_figwidth(20)
f.set_figheight(8)
ls = ['dashed', 'solid']
for i,lab in enumerate(label):
plt.plot(count_of_data[i], label=label_name[i], linestyle=ls[i], marker='o')
plt.xlabel('Time Series', fontweight='bold')
plt.tick_params(axis ='x', rotation =90)
# Create legend & Show graphic
plt.legend()
plt.show()
st.pyplot(f)
################################### Time Series Analysis starts here
st.title("Time Series Analysis of Tweets")
chart_visual_time_series = st.radio('Select Need/Availability Label for Time series distribution',('Need', 'Availability'))
options = st.multiselect(
'Select options for Data Resampling',
['D', '3D', 'W', '2W'],
['D', '3D'])
# y represemts the Need Label
# z represents the Availability Label
y = df['Need']
z = df['Availability']
if chart_visual_time_series=='Need':
fig, ax = plt.subplots(figsize=(20, 6))
if 'D' in options:
ax.plot(y, marker='o', linewidth=0.5, label='Daily',ls='solid', c='red')
if '3D' in options:
ax.plot(y.resample('3D').mean(),marker='o', markersize=8, linestyle='dashed', label='Half-Weekly Mean Resample')
if 'W' in options:
ax.plot(y.resample('W').mean(),marker='o', markersize=8, linestyle='-', label='Weekly Mean Resample')
if '2W' in options:
ax.plot(y.resample('2W').mean(),marker='o', markersize=8, linestyle='dotted', label='Bi-weekly Mean Resample')
ax.set_ylabel('Frequency')
ax.set_xlabel('Date')
ax.legend()
st.pyplot(fig)
if chart_visual_time_series=="Availability":
fig, ax = plt.subplots(figsize=(20, 6))
if 'D' in options:
ax.plot(z, marker='o', linewidth=0.5, label='Daily',ls='solid', c='red')
if '3D' in options:
ax.plot(z.resample('3D').mean(),marker='o', markersize=8, linestyle='dashed', label='Half-Weekly Mean Resample')
if 'W' in options:
ax.plot(z.resample('W').mean(),marker='o', markersize=8, linestyle='-', label='Weekly Mean Resample')
if '2W' in options:
ax.plot(z.resample('2W').mean(),marker='o', markersize=8, linestyle='dotted', label='Bi-weekly Mean Resample')
ax.set_ylabel('Frequency')
ax.set_xlabel('Date')
ax.legend()
st.pyplot(fig)
################################### Seasonal Decomposition starts here
# The next step is to decompose the data to view more of the complexity behind the linear visualization.
# A useful Python function called seasonal_decompose within the 'statsmodels' package can help us to decompose the data
# into four different components:
# Observed
# Trended
# Seasonal
# Residual
st.title("Decompose the Data")
chart_visual_seasonal_decomposition = st.radio('Select Need/Availability Label for Seasonal decomposition',
('Need of resources', 'Availability of resources'))
def seasonal_decompose (x):
decomposition_x = sm.tsa.seasonal_decompose(x, model='additive',extrapolate_trend='freq')
fig_x = decomposition_x.plot()
fig_x.set_size_inches(14,7)
plt.show()
st.pyplot(fig_x)
if chart_visual_seasonal_decomposition == "Need of resources":
seasonal_decompose(y)
elif chart_visual_seasonal_decomposition == "Availability of resources":
seasonal_decompose(z)
elif control == 'XAI':
### XAI - Explainable Artificial Intelligence
# Dataset
# Load Dataset
tweets = | pd.read_csv("dataset.csv",lineterminator='\n') | pandas.read_csv |
#
# multistagefingerprint.py
#
# Author(s):
# <NAME> <<EMAIL>>
#
# Copyright (c) 2018-2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import math
import numpy as np
from collections import namedtuple
import pandas as pd
from sklearn.metrics import confusion_matrix
import os
import pickle
from fingerprint import Fingerprint
from fingerprint import MultiStageFingerprint
from utils import read_cli, get_exp_dir, load_data, show_accuracy
def onehot2factor(df):
"""Convert one-hot encodings into categorical variables (strings)."""
q_names = Fingerprint.get_q_vars(df.columns)
q_factors = list(set([q.split('_')[0] for q in q_names]))
q_factors.sort()
for Qi in q_factors:
# compute string length that can represent all the levels of this factor
qn = [q for q in df.columns if q.startswith(Qi)]
n_chars = math.ceil(math.log10(len(qn) + 1)) # 2 digits if >= 10, 3 if >= 100, ...
# convert one-hot encoding to string
levels = np.argmax(df.loc[:, df.columns.isin(qn)].to_numpy(), axis=1) + 1
levels = [str(l).rjust(n_chars, '0') for l in levels]
# remove one-hot and insert strings
df = df.drop(labels=qn, axis=1)
n = int(Qi.replace('q', ''))
df.insert(n, Qi, levels)
return df
def prepare_msf_data(df_train, df_test):
"""Load and prepare data for fingerprint method."""
df_train = onehot2factor(df_train)
df_test = onehot2factor(df_test)
Bag = namedtuple('Bag', ['k', 'y', 'df'])
train_bags = [Bag(k=b[0], y=int(b[1]['y'].mode()), df=b[1].reset_index().drop(columns=['index', 'k', 'y'])) for b in df_train.groupby('k')]
b_train = [b.df for b in train_bags]
y_train = [b.y for b in train_bags]
if 'y' in df_test.columns:
test_bags = [Bag(k=b[0], y=int(b[1]['y'].mode()), df=b[1].reset_index().drop(columns=['index', 'k', 'y'])) for b in df_test.groupby('k')]
else:
test_bags = [Bag(k=b[0], y=-1, df=b[1].reset_index().drop(columns=['index', 'k'])) for b in df_test.groupby(df_test.k)]
b_test = [b.df for b in test_bags]
ky_test = | pd.DataFrame([(b.k, b.y) for b in test_bags], columns=['k', 'y']) | pandas.DataFrame |
# Data Science with SQL Server Quick Start Guide
# Chapter 05
# Imports
import numpy as np
import pandas as pd
import pyodbc
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
import scipy as sc
# Handling NULLs
con = pyodbc.connect('DSN=AWDW;UID=RUser;PWD=<PASSWORD>')
query = """SELECT c1, c2, c3
FROM dbo.NULLTest;"""
NULLTest = pd.read_sql(query, con)
NULLTest
# Checking for NULLs
pd.isnull(NULLTest)
# Omitting
NULLTest.dropna(axis = 'rows')
NULLTest.dropna(axis = 'columns')
# Aggregate functions
NULLTest.c2.mean()
NULLTest.c2.mean(skipna = False)
# Reading the data from SQL Server
con = pyodbc.connect('DSN=AWDW;UID=RUser;PWD=<PASSWORD>')
query = """SELECT CustomerKey, CommuteDistance,
TotalChildren, NumberChildrenAtHome,
Gender, HouseOwnerFlag,
NumberCarsOwned, MaritalStatus,
Age, YearlyIncome, BikeBuyer,
EnglishEducation AS Education,
EnglishOccupation AS Occupation
FROM dbo.vTargetMail"""
TM = pd.read_sql(query, con)
# check the Age
TM["Age"].describe()
# Generating dummies (indicators)
pd.get_dummies(TM.MaritalStatus)
pd.get_dummies(TM.MaritalStatus, prefix = 'TM')
# Create the dummies
TM1 = TM[['MaritalStatus']].join(pd.get_dummies(TM.MaritalStatus, prefix = 'TM'))
TM1.tail(3)
# Show the Age in 20 equal width bins
TM['AgeEWB'] = pd.cut(TM['Age'], 20)
TM['AgeEWB'].value_counts()
pd.crosstab(TM.AgeEWB,
columns = 'Count') .plot(kind = 'bar',
legend = False,
title = 'AgeEWB20')
plt.show()
# Equal width binning - 5 bins
TM['AgeEWB'] = pd.cut(TM['Age'], 5)
TM['AgeEWB'].value_counts(sort = False)
# Equal height binning
TM['AgeEHB'] = pd.qcut(TM['Age'], 5)
TM['AgeEHB'].value_counts(sort = False)
# Custom binning
custombins = [16, 22, 29, 39, 54, 88]
TM['AgeCUB'] = | pd.cut(TM['Age'], custombins) | pandas.cut |
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
#-------------read csv---------------------
df_2010_2011 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2010_2011.csv")
df_2012_2013 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2012_2013.csv")
df_2014_2015 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2014_2015.csv")
df_2016_2017 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2016_2017.csv")
df_2018_2019 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2018_2019.csv")
df_2010_2011['prcab'].fillna(0)
df_2012_2013['prcab'].fillna(0)
df_2014_2015['prcab'].fillna(0)
df_2016_2017['prcab'].fillna(0)
df_2018_2019['prcab'].fillna(0)
print(df_2018_2019['prcab'])
mask = df_2010_2011['surgyear'] != 2010
df_2011 = df_2010_2011[mask]
df_2010 = df_2010_2011[~mask]
mask2 = df_2012_2013['surgyear'] != 2012
df_2013 = df_2012_2013[mask2]
df_2012 = df_2012_2013[~mask2]
mask3 = df_2014_2015['surgyear'] != 2014
df_2015 = df_2014_2015[mask3]
df_2014 = df_2014_2015[~mask3]
mask4 = df_2016_2017['surgyear'] != 2016
df_2017 = df_2016_2017[mask4]
df_2016 = df_2016_2017[~mask4]
mask5 = df_2018_2019['surgyear'] != 2018
df_2019 = df_2018_2019[mask5]
df_2018 = df_2018_2019[~mask5]
avg_siteid = pd.DataFrame()
avg_surgid = pd.DataFrame()
# #tmpHilla=df_2018_2019.columns
# tmpHilla=pd.DataFrame(df_2018_2019.columns.values.tolist())
# tmpHilla.to_csv("/tmp/pycharm_project_355/columns.csv")
# my_list = df_2010_2011.columns.values.tolist()
# print (my_list)
# print()
# my_list = df_2012_2013.columns.values.tolist()
# print (my_list)
# print()
# my_list = df_2014_2015.columns.values.tolist()
# print (my_list)
# print()
# my_list = df_2016_2017.columns.values.tolist()
# print (my_list)
# print()
# my_list = df_2018_2019.columns.values.tolist()
# print (my_list)
# print()
#-------------------merge all csv--------------------------
# dfMerge1 = pd.merge(df_2010_2011, df_2012_2013, on='surgorder')
# dfMerge2 = pd.merge(dfMerge1, df_2014_2015, on='surgorder')
# dfMerge = pd.merge(dfMerge2, df_2016_2017, on='surgorder')
#dfMerge = pd.merge(df_2010_2011, df_2012_2013, on='SiteID')
#count distinc
#table.groupby('YEARMONTH').CLIENTCODE.nunique()
def groupby_siteid():
df_2010 = df_2010_2011.groupby('siteid')['surgyear'].apply(lambda x: (x== 2010 ).sum()).reset_index(name='2010')
df_2011 = df_2010_2011.groupby('siteid')['surgyear'].apply(lambda x: (x== 2011 ).sum()).reset_index(name='2011')
df_2012 = df_2012_2013.groupby('siteid')['surgyear'].apply(lambda x: (x== 2012 ).sum()).reset_index(name='2012')
df_2013 = df_2012_2013.groupby('siteid')['surgyear'].apply(lambda x: (x== 2013 ).sum()).reset_index(name='2013')
df_2014 = df_2014_2015.groupby('siteid')['surgyear'].apply(lambda x: (x== 2014 ).sum()).reset_index(name='2014')
df_2015 = df_2014_2015.groupby('siteid')['surgyear'].apply(lambda x: (x== 2015 ).sum()).reset_index(name='2015')
df_2016 = df_2016_2017.groupby('siteid')['surgyear'].apply(lambda x: (x== 2016 ).sum()).reset_index(name='2016')
df_2017 = df_2016_2017.groupby('siteid')['surgyear'].apply(lambda x: (x== 2017 ).sum()).reset_index(name='2017')
df_2018 = df_2018_2019.groupby('siteid')['surgyear'].apply(lambda x: (x== 2018 ).sum()).reset_index(name='2018')
df_2019 = df_2018_2019.groupby('siteid')['surgyear'].apply(lambda x: (x== 2019 ).sum()).reset_index(name='2019')
df1 =pd.merge(df_2010, df_2011, on='siteid')
df2 =pd.merge(df1, df_2012, on='siteid')
df3 =pd.merge(df2, df_2013, on='siteid')
df4 =pd.merge(df3, df_2014, on='siteid')
df5 =pd.merge(df4, df_2015, on='siteid')
df6 =pd.merge(df5, df_2016, on='siteid')
df7 =pd.merge(df6, df_2017, on='siteid')
df8 =pd.merge(df7, df_2018, on='siteid')
df_sum_all_Years =pd.merge(df8, df_2019, on='siteid')
cols = df_sum_all_Years.columns.difference(['siteid'])
df_sum_all_Years['Distinct_years'] = df_sum_all_Years[cols].gt(0).sum(axis=1)
cols_sum = df_sum_all_Years.columns.difference(['siteid','Distinct_years'])
df_sum_all_Years['Year_sum'] =df_sum_all_Years.loc[:,cols_sum].sum(axis=1)
df_sum_all_Years['Year_avg'] = df_sum_all_Years['Year_sum']/df_sum_all_Years['Distinct_years']
df_sum_all_Years.to_csv("total op sum all years siteid.csv")
print("details on site id dist:")
print ("num of all sites: ", len(df_sum_all_Years))
less_8 =df_sum_all_Years[df_sum_all_Years['Distinct_years'] !=10]
less_8.to_csv("total op less 10 years siteid.csv")
print("num of sites with less years: ", len(less_8))
x = np.array(less_8['Distinct_years'])
print(np.unique(x))
avg_siteid['siteid'] = df_sum_all_Years['siteid']
avg_siteid['total_year_avg'] = df_sum_all_Years['Year_avg']
def groupby_surgid():
df_2010 = df_2010_2011.groupby('surgid')['surgyear'].apply(lambda x: (x== 2010 ).sum()).reset_index(name='2010')
df_2011 = df_2010_2011.groupby('surgid')['surgyear'].apply(lambda x: (x== 2011 ).sum()).reset_index(name='2011')
df_2012 = df_2012_2013.groupby('surgid')['surgyear'].apply(lambda x: (x== 2012 ).sum()).reset_index(name='2012')
df_2013 = df_2012_2013.groupby('surgid')['surgyear'].apply(lambda x: (x== 2013 ).sum()).reset_index(name='2013')
df_2014 = df_2014_2015.groupby('surgid')['surgyear'].apply(lambda x: (x== 2014 ).sum()).reset_index(name='2014')
df_2015 = df_2014_2015.groupby('surgid')['surgyear'].apply(lambda x: (x== 2015 ).sum()).reset_index(name='2015')
df_2016 = df_2016_2017.groupby('surgid')['surgyear'].apply(lambda x: (x== 2016 ).sum()).reset_index(name='2016')
df_2017 = df_2016_2017.groupby('surgid')['surgyear'].apply(lambda x: (x== 2017 ).sum()).reset_index(name='2017')
df_2018 = df_2018_2019.groupby('surgid')['surgyear'].apply(lambda x: (x== 2018 ).sum()).reset_index(name='2018')
df_2019 = df_2018_2019.groupby('surgid')['surgyear'].apply(lambda x: (x== 2019 ).sum()).reset_index(name='2019')
df1 =pd.merge(df_2010, df_2011, on='surgid')
df2 =pd.merge(df1, df_2012, on='surgid')
df3 =pd.merge(df2, df_2013, on='surgid')
df4 =pd.merge(df3, df_2014, on='surgid')
df5 =pd.merge(df4, df_2015, on='surgid')
df6 =pd.merge(df5, df_2016, on='surgid')
df7 =pd.merge(df6, df_2017, on='surgid')
df8 =pd.merge(df7, df_2018, on='surgid')
df_sum_all_Years =pd.merge(df8, df_2019, on='surgid')
cols = df_sum_all_Years.columns.difference(['surgid'])
df_sum_all_Years['Distinct_years'] = df_sum_all_Years[cols].gt(0).sum(axis=1)
cols_sum = df_sum_all_Years.columns.difference(['surgid','Distinct_years'])
df_sum_all_Years['Year_sum'] =df_sum_all_Years.loc[:,cols_sum].sum(axis=1)
df_sum_all_Years['Year_avg'] = df_sum_all_Years['Year_sum']/df_sum_all_Years['Distinct_years']
df_sum_all_Years.to_csv("sum all years surgid.csv")
print()
print("details of surgid dist:")
print("num of all surgid: ", len(df_sum_all_Years))
less_8 =df_sum_all_Years[df_sum_all_Years['Distinct_years'] !=10]
less_8.to_csv("less 10 years surgid.csv")
print("num of doctors with less years: ", len(less_8))
x = np.array(less_8['Distinct_years'])
print(np.unique(x))
avg_surgid['surgid'] = df_sum_all_Years['surgid']
avg_surgid['total_year_avg'] = df_sum_all_Years['Year_avg']
def groupby_hospid():
df_2010 = df_2010_2011.groupby('hospid')['surgyear'].apply(lambda x: (x== 2010 ).sum()).reset_index(name='2010')
df_2011 = df_2010_2011.groupby('hospid')['surgyear'].apply(lambda x: (x== 2011 ).sum()).reset_index(name='2011')
df_2012 = df_2012_2013.groupby('hospid')['surgyear'].apply(lambda x: (x== 2012 ).sum()).reset_index(name='2012')
df_2013 = df_2012_2013.groupby('hospid')['surgyear'].apply(lambda x: (x== 2013 ).sum()).reset_index(name='2013')
df_2014 = df_2014_2015.groupby('hospid')['surgyear'].apply(lambda x: (x== 2014 ).sum()).reset_index(name='2014')
df_2015 = df_2014_2015.groupby('hospid')['surgyear'].apply(lambda x: (x== 2015 ).sum()).reset_index(name='2015')
df_2016 = df_2016_2017.groupby('hospid')['surgyear'].apply(lambda x: (x== 2016 ).sum()).reset_index(name='2016')
df_2017 = df_2016_2017.groupby('hospid')['surgyear'].apply(lambda x: (x== 2017 ).sum()).reset_index(name='2017')
df_2018 = df_2018_2019.groupby('hospid')['surgyear'].apply(lambda x: (x== 2018 ).sum()).reset_index(name='2018')
df_2019 = df_2018_2019.groupby('hospid')['surgyear'].apply(lambda x: (x== 2019 ).sum()).reset_index(name='2019')
df1 =pd.merge(df_2010, df_2011, on='hospid')
df2 =pd.merge(df1, df_2012, on='hospid')
df3 =pd.merge(df2, df_2013, on='hospid')
df4 =pd.merge(df3, df_2014, on='hospid')
df5 =pd.merge(df4, df_2015, on='hospid')
df6 =pd.merge(df5, df_2016, on='hospid')
df7 =pd.merge(df6, df_2017, on='hospid')
df8 =pd.merge(df7, df_2018, on='hospid')
df_sum_all_Years =pd.merge(df8, df_2019, on='hospid')
cols = df_sum_all_Years.columns.difference(['hospid'])
df_sum_all_Years['Distinct_years'] = df_sum_all_Years[cols].gt(0).sum(axis=1)
cols_sum = df_sum_all_Years.columns.difference(['hospid','Distinct_years'])
df_sum_all_Years['Year_sum'] =df_sum_all_Years.loc[:,cols_sum].sum(axis=1)
df_sum_all_Years['Year_avg'] = df_sum_all_Years['Year_sum']/df_sum_all_Years['Distinct_years']
df_sum_all_Years.to_csv("sum all years hospid.csv")
print(df_sum_all_Years)
print ("num of all sites: ", len(df_sum_all_Years))
less_8 =df_sum_all_Years[df_sum_all_Years['Distinct_years'] !=10]
less_8.to_csv("less 10 years hospid.csv")
print("num of hospital with less years: ", len(less_8))
x = np.array(less_8['Distinct_years'])
print(np.unique(x))
return df_sum_all_Years
def draw_hist(data,num_of_bins,title,x_title,y_title,color):
plt.hist(data, bins=num_of_bins, color=color,ec="black")
plt.title(title)
plt.xlabel(x_title)
plt.ylabel(y_title)
plt.show()
def group_by_count(group_by_value,name):
df_2010_2011_gb = df_2010_2011.groupby(group_by_value)[group_by_value].count().reset_index(name=name)
df_2012_2013_gb = df_2012_2013.groupby(group_by_value)[group_by_value].count().reset_index(name=name)
df_2014_2015_gb = df_2014_2015.groupby(group_by_value)[group_by_value].count().reset_index(name=name)
df_2016_2017_gb = df_2016_2017.groupby(group_by_value)[group_by_value].count().reset_index(name=name)
df_2018_2019_gb = df_2018_2019.groupby(group_by_value)[group_by_value].count().reset_index(name=name)
df_merge_1=pd.merge(df_2010_2011_gb,df_2012_2013_gb, on=group_by_value)
df_merge_2=pd.merge(df_merge_1,df_2014_2015_gb, on=group_by_value)
df_merge_3=pd.merge(df_merge_2,df_2016_2017_gb, on=group_by_value)
df_merge_4=pd.merge(df_merge_3,df_2018_2019_gb, on=group_by_value)
cols = df_merge_4.columns.difference([group_by_value])
df_merge_4[name] = df_merge_4.loc[:,cols].sum(axis=1)
df_new=pd.DataFrame()
df_new[group_by_value] = df_merge_4[group_by_value]
df_new[name] = df_merge_4[name]
return df_new
def groupby_siteid_prcab():
df2010 = df_2010.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2010_reop')
df2011 = df_2011.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2011_reop')
df2012 = df_2012.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2012_reop')
df2013 = df_2013.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2013_reop')
df2014 = df_2014.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2014_reop')
df2015 = df_2015.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2015_reop')
df2016 = df_2016.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2016_reop')
df2017 = df_2017.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2017_reop')
df2018 = df_2018.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2018_reop')
df2019 = df_2019.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2019_reop')
df1 = pd.merge(df2010, df2011, on='siteid')
df2 = pd.merge(df1, df2012, on='siteid')
df3 = pd.merge(df2, df2013, on='siteid')
df4 = pd.merge(df3, df2014, on='siteid')
df5 = pd.merge(df4, df2015, on='siteid')
df6 = pd.merge(df5, df2016, on='siteid')
df7 = pd.merge(df6, df2017, on='siteid')
df8 = pd.merge(df7, df2018, on='siteid')
df_sum_all_Years = pd.merge(df8, df2019, on='siteid')
cols = df_sum_all_Years.columns.difference(['siteid'])
df_sum_all_Years['Distinct_years_reop'] = df_sum_all_Years[cols].gt(0).sum(axis=1)
cols_sum = df_sum_all_Years.columns.difference(['siteid', 'Distinct_years_reop'])
df_sum_all_Years['Year_sum_reop'] = df_sum_all_Years.loc[:, cols_sum].sum(axis=1)
df_sum_all_Years['Year_avg_reop'] = df_sum_all_Years['Year_sum_reop'] / df_sum_all_Years['Distinct_years_reop']
df_sum_all_Years.to_csv("sum all years siteid reop.csv")
less_8 = df_sum_all_Years[df_sum_all_Years['Distinct_years_reop'] != 10]
less_8.to_csv("less 10 years reop siteid.csv")
print("num of sites with less years: ", len(less_8))
x = np.array(less_8['Distinct_years_reop'])
print(np.unique(x))
df_10 = df_2010.groupby('siteid')['prcab'].apply(lambda x:((x==2) | (x==0)).sum()).reset_index(name='2010_Firstop')
df_11 = df_2011.groupby('siteid')['prcab'].apply(lambda x:((x==2) | (x==0)).sum()).reset_index(name='2011_Firstop')
df_12 = df_2012.groupby('siteid')['prcab'].apply(lambda x:((x==2) | (x==0)).sum()).reset_index(name='2012_Firstop')
df_13 = df_2013.groupby('siteid')['prcab'].apply(lambda x:((x==2) | (x==0)).sum()).reset_index(name='2013_Firstop')
df_14 = df_2014.groupby('siteid')['prcab'].apply(lambda x:((x==2) | (x==0)).sum()).reset_index(name='2014_Firstop')
df_15 = df_2015.groupby('siteid')['prcab'].apply(lambda x:((x==2) | (x==0)).sum()).reset_index(name='2015_Firstop')
df_16 = df_2016.groupby('siteid')['prcab'].apply(lambda x:((x==2) | (x==0)).sum()).reset_index(name='2016_Firstop')
df_17 = df_2017.groupby('siteid')['prcab'].apply(lambda x:((x==2) | (x==0)).sum()).reset_index(name='2017_Firstop')
df_18 = df_2018.groupby('siteid')['prcab'].apply(lambda x:((x==2) | (x==0)).sum()).reset_index(name='2018_Firstop')
df_19 = df_2019.groupby('siteid')['prcab'].apply(lambda x:((x==2) | (x==0)).sum()).reset_index(name='2019_Firstop')
d1 = pd.merge(df_10, df_11, on='siteid')
d2 = pd.merge(d1, df_12, on='siteid')
d3 = | pd.merge(d2, df_13, on='siteid') | pandas.merge |
import datetime
import inspect
import numpy.testing as npt
import os.path
import pandas as pd
import sys
from tabulate import tabulate
import unittest
# #find parent directory and import model
# parentddir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
# sys.path.append(parentddir)
from ..rice_exe import Rice
test = {}
class TestRice(unittest.TestCase):
"""
Unit tests for Rice.
"""
print("rice unittests conducted at " + str(datetime.datetime.today()))
def setUp(self):
"""
Setup routine for rice tests
:return:
"""
pass
def teardown(self):
"""
Teardown routine for rice tests
:return:
"""
pass
# teardown called after each test
# e.g. maybe write test results to some text file
# dsed * area * pb
# (self.dsed * self.area * self.pb)
def create_rice_object(self):
# create empty pandas dataframes to create empty object for testing
df_empty = pd.DataFrame()
# create an empty rice object
rice_empty = Rice(df_empty, df_empty)
return rice_empty
def test_rice_msed_unit(self):
"""
Unit tests for calcmsed
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
rice_empty = self.create_rice_object()
try:
expected_results = [10.0, 2745.135 , 386.7105]
rice_empty.dsed = pd.Series([1.0, 5.3, 8.25], dtype='float')
rice_empty.area = pd.Series([10.0, 345.3, 23.437], dtype='float')
rice_empty.pb = pd.Series([1.0, 1.5, 2.0], dtype='float')
result = rice_empty.calc_msed()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
# (self.dw * self.area) + (self.dsed * self.osed * self.area)
def test_rice_vw_unit(self):
"""
Unit tests for calcvw
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
rice_empty = self.create_rice_object()
try:
expected_results = [25.89, 2414.5793, 415.6669]
rice_empty.dw = pd.Series([2.2, 4.56, 12.934], dtype='float')
rice_empty.area = pd.Series([10.0, 345.3, 23.437], dtype='float')
rice_empty.dsed = pd.Series([1.0, 5.3, 8.25], dtype='float')
rice_empty.osed = pd.Series([0.389, 0.459, 0.582], dtype='float')
result = rice_empty.calc_vw()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
# (self.mai/self.area)*10000
def test_rice_mass_area_unit(self):
"""
Unittests for calcmass_area
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
rice_empty = self.create_rice_object()
try:
expected_results = [8960.0, 2606.4292, 138567.2228]
rice_empty.area = pd.Series([10.0, 345.3, 23.437], dtype='float')
rice_empty.mai = pd.Series([8.96, 90.0, 324.76], dtype='float')
result = rice_empty.calc_mass_area()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
# (self.out_mass_area / (self.dw + (self.dsed * (self.osed + (self.pb * self.Kd*1e-5)))))*100
def test_rice_cw_unit(self):
"""
unittests for calccw
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
rice_empty = self.create_rice_object()
try:
expected_results = [346.0662, 1155.6686, 948.6060]
rice_empty.dw = pd.Series([2.2, 4.56, 12.934], dtype='float')
rice_empty.dsed = | pd.Series([1.0, 5.3, 8.25], dtype='float') | pandas.Series |
import abc
import os
import collections
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import plotly.express as px
import plotly.graph_objects as go
from abc import abstractmethod
from typing import Callable, Sequence
import tfwda.logger.standard
class IFPlotter(metaclass = abc.ABCMeta):
"""Interface for the Plotter
Methods (abstract)
------------------
plot(model_name str, flattened_weight list[np.ndarray], metadata collections.OrderedDict)
Responsible for plotting
"""
@abstractmethod
def plot(self, model_name: str, flattened_weight: list[np.ndarray], metadata: collections.OrderedDict):
"""Plotting the flattened model weights
Parameters
----------
model_name : str
Name of the model
flattened_weight : list[np.ndarray]
Flattened weights of the model
metadata : collections.OrderedDict
Metadata of the weights
"""
pass
class Plotter(IFPlotter):
"""The Plotter takes the weights as input and generates the corresponding histograms.
Parameters
----------
logger : logger.standard.Logger
Logger instance
path_to_dir : str
Directory where the plots are stored to
"""
def __init__(self, logger: tfwda.logger.standard.Logger, path_to_dir: str):
self.logger = logger
if not os.path.exists(path_to_dir):
self.logger.log(f"The folder {path_to_dir} does not exist, if you want to continue, press Y, if not, press N...", "Warning")
confirmation = input()
if not confirmation in ["Y", "Yes", "yes"]:
exit()
os.mkdir(path_to_dir)
self.path_to_dir = path_to_dir
def plot(self, model_name: str, flattened_weight: list[np.ndarray], metadata: collections.OrderedDict) -> None:
"""Plotting the weights and storing these to the location given by `path_to_dir`
Parameters
----------
model_name : str
Name of the model
flattened_weight : list[np.ndarray]
The flattened weights of the model
metadata : collections.OrderedDict
Metdata of the weights
"""
for weight, name, shape, dtype in zip(flattened_weight, metadata['names'], metadata['shapes'], metadata['dtypes']):
df = pd.DataFrame({'weight': weight})
fig = px.histogram(df, x = "weight", labels = {'x': "Weights", 'y': "Frequency"})
name = name.replace("/", "_")
name = name.replace(":", "_")
fig.write_image(f"{self.path_to_dir}/{model_name}_{name}_{shape}_{dtype}.png")
@staticmethod
def display_hist(weight: list) -> None:
"""Is used for plotting a histogram weight plot, the plot is only display
and not stored
Parameters
----------
weight : list
A layer of a neural network contains N weights,
these weights are given as a list of values
"""
df = | pd.DataFrame({'weight': weight}) | pandas.DataFrame |
#! ./venv/bin/python
import os
import sys
import pandas as pd
from datetime import date
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as ec
if 'win' in sys.platform:
import geckodriver_autoinstaller
geckodriver_autoinstaller.install()
script_dir = os.path.dirname(os.path.realpath(__file__))
user_data_path = script_dir + '/user_data.csv'
building_list = ['LEE', 'SEC', 'SOL', 'WEV', 'ZUE']
id_prefix = '_content_specialinterest_mtec_d-mtec_en_news_corona_coronavirus-tracking---attendance-at-d-mtec-facilities_jcr_content_par_container_'
def open_firefox(headless=True, verbose=True) -> webdriver.firefox.webdriver.WebDriver:
if verbose:
print('Opening Firefox...')
from selenium.webdriver.firefox.options import Options
_driver_options = Options()
if headless:
_driver_options.add_argument('-headless')
return webdriver.Firefox(executable_path='geckodriver', options=_driver_options)
def load_user_data() -> dict:
_dict = pd.read_csv(
user_data_path,
header=None,
index_col=0,
squeeze=True,
).to_dict()
return _dict
def setup() -> None:
import inquirer
if os.path.exists(user_data_path):
user_data = load_user_data()
questions = [
inquirer.Text(
'first_name',
message='Enter first name',
default=user_data['first_name'],
),
inquirer.Text(
'last_name',
message='Enter last name',
default=user_data['last_name'],
),
inquirer.Text(
'mail_address',
message='Enter e-mail address',
default=user_data['mail_address'],
),
inquirer.List(
'building',
message="Which building are you in?",
choices=building_list,
default=user_data['building']
),
inquirer.Text(
'floor',
message='Enter floor',
default=user_data['floor']
),
inquirer.Text(
'room',
message='Enter room number',
default=user_data['room'],
),
inquirer.Confirm(
'receive_copy',
message='Do you want to receive a copy via email?',
default=user_data['receive_copy'],
),
]
else:
questions = [
inquirer.Text(
'first_name',
message='Enter first name',
),
inquirer.Text(
'last_name',
message='Enter last name',
),
inquirer.Text(
'mail_address',
message='Enter e-mail address',
),
inquirer.List(
'building',
message="Which building are you in?",
choices=building_list,
),
inquirer.Text(
'floor',
message='Enter floor',
),
inquirer.Text(
'room',
message='Enter room number',
),
inquirer.Confirm(
'receive_copy',
message='Do you want to receive a copy via email?',
default=True,
),
]
user_data = inquirer.prompt(questions)
| pd.Series(user_data) | pandas.Series |
import sys
import pandas as pd
from sqlalchemy import create_engine
def load_data(messages_filepath, categories_filepath):
"""
Load messages and categories from csv files into a pandas dataframe.
Arguments:
messages_filepath -- path to location of messages csv file
categories_filepath -- path to location of categories csv files
Returns:
df -- pandas dataframe with messages and categories
"""
# load messages dataset
messages = pd.read_csv(messages_filepath)
# load categories dataset
categories = pd.read_csv(categories_filepath)
# merge datasets
df = | pd.merge(messages, categories, how='outer', on='id') | pandas.merge |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
dataset = pd.read_csv('train.csv')
X = dataset.iloc[:, 1:4].values
y = dataset.iloc[:, 0].values
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelencoder_X_1 = LabelEncoder()
X[:, 2] = labelencoder_X_1.fit_transform(X[:, 2])
onehotencoder = OneHotEncoder(categorical_features = [2])
X = onehotencoder.fit_transform(X).toarray()
X = X[:, 1:]
from sklearn.tree import DecisionTreeRegressor
regressor = DecisionTreeRegressor(random_state = 0)
regressor.fit(X, y)
dataset_test = | pd.read_csv('private_test_x.csv') | pandas.read_csv |
import csv
import json
from functools import total_ordering
from io import StringIO
from typing import Union
import pandas
from pandas import DataFrame
from canvasxpress.data.base import CXDataProfile, CXMatrixData
@total_ordering
class CXDataframeData(CXMatrixData):
"""
A CXData class dedicated to processing Python DataFrame, matrix-structured
data.
"""
__data: DataFrame = DataFrame()
"""
The data managed by an object of this class.
"""
@property
def dataframe(self) -> DataFrame:
"""
Provides the data managed by the object.
:returns: `DataFrame` The managed data.
"""
return self.__data
@dataframe.setter
def dataframe(
self,
value: Union[DataFrame, None] = None
) -> None:
"""
Sets the dataframe managed by the object.
:param value: `Union[DataFrame, None]`
`None` results in an empty `DataFrame`. A deepcopy will be made of
`DataFrame` values.
"""
self.data = value
@property
def data(self) -> dict:
"""
Provides the data managed by the object.
:returns: `DataFrame` The managed data.
"""
return self.dataframe.to_dict(orient="list")
@data.setter
def data(
self,
value: Union['CXDataframeData', DataFrame, dict, str, None] = None
) -> None:
"""
Sets the dataframe managed by the object.
:param value: `Union['CXDataframeData', DataFrame, dict, str, None]`
`None` results in an empty `DataFrame`. A deepcopy will be made of
`DataFrame` or equivalent values.
"""
if value is None:
self.__data = | DataFrame() | pandas.DataFrame |
"""Define the schema for the court summary report."""
from dataclasses import dataclass, field, fields
from typing import Any, Dict, Iterator, List, Optional
import desert
import pandas as pd
from ..utils import DataclassSchema, TimeField
@dataclass
class Sentence(DataclassSchema):
"""
A Sentence object.
Parameters
----------
sentence_type: str
The sentence type
sentence_dt: str
The date of the sentence
program_period: str, optional
The program period
sentence_length: str, optional
The length of the sentence
"""
sentence_type: str
sentence_dt: str = desert.field(
TimeField(format="%m/%d/%Y", allow_none=True)
) # type: ignore
program_period: str = ""
sentence_length: str = ""
def __repr__(self) -> str:
"""Return a string representation of the object."""
cls = self.__class__.__name__
if not pd.isna(self.sentence_dt):
dt = self.sentence_dt.strftime("%m/%d/%y") # type: ignore
dt = f"'{dt}'"
else:
dt = "NaT"
s = f"sentence_dt={dt}, sentence_type='{self.sentence_type}'"
return f"{cls}({s})"
@dataclass
class Charge(DataclassSchema):
"""
A Charge object.
Parameters
----------
seq_no: int
the charge sequence number
statute: str
the statute
description: str, optional
description of the statute
grade: str, optional
the grade, e.g., felony, misdemeanor, etc.
disposition: str, optional
the disposition for the charge, if present
sentences: List[Sentence], optional
list of any sentences associated with the charge
"""
seq_no: str
statute: str
description: str = ""
grade: str = ""
disposition: str = ""
sentences: List[Sentence] = field(default_factory=list)
@property
def meta(self) -> Dict[str, Any]:
"""Return the meta information associated with the charge."""
exclude = ["sentences"]
return {
f.name: getattr(self, f.name)
for f in fields(self)
if f.name not in exclude
}
def __iter__(self) -> Iterator[Sentence]:
"""Iterate through the sentences."""
return iter(self.sentences)
def __len__(self) -> int:
"""Return the length of the sentences."""
return len(self.sentences)
def __getitem__(self, index: int) -> Sentence:
"""Index the sentences."""
return self.sentences.__getitem__(index)
def __repr__(self) -> str:
"""Return a string representation of the object."""
cls = self.__class__.__name__
cols = ["seq_no", "statute", "description"]
s = ", ".join([f"{col}='{getattr(self, col)}'" for col in cols])
s += f", num_sentences={len(self.sentences)}"
return f"{cls}({s})"
@dataclass
class Docket(DataclassSchema):
"""
A Docket object.
Parameters
----------
docket_number: str
The docket number
proc_status: str
The status of the docket proceedings
dc_no: str
The DC incident number
otn: str
The offense tracking number
county: str
The PA county where case is being conducted
status: str
The docket status as determined by the section on the court
summary, e.g., "Active", "Closed", etc.
extra: List[Any]
List of any additional header information
arrest_dt: str
The arrest date
psi_num: str, optional
Pre-sentence investigation number
prob_num: str, optional
The probation number
disp_date: str, optional
The date of disposition
disp_judge: str, optional
The disposition judge
def_atty: str, optional
The name of the defense attorney
legacy_no: str, optional
The legacy number for the docket
last_action: str, optional
The last action in the case
last_action_room: str, optional
The room where last action occurred
next_action: str, optional
The next action to occur
next_action_room: str, optional
The room where next action will occur
next_action_date: str, optional
The date of the next action
trial_dt: str, optional
The date of the trial
last_action_date: str, optional
The date of the last action
charges: str, optional
A list of charges associated with this case
"""
docket_number: str
proc_status: str
dc_no: str
otn: str
county: str
status: str
extra: List[Any]
arrest_dt: str = desert.field(
TimeField(format="%m/%d/%Y", allow_none=True)
) # type: ignore
psi_num: str = ""
prob_num: str = ""
disp_judge: str = ""
def_atty: str = ""
legacy_no: str = ""
last_action: str = ""
last_action_room: str = ""
next_action: str = ""
next_action_room: str = ""
next_action_date: Optional[str] = desert.field(
TimeField(format="%m/%d/%Y", allow_none=True), default=""
) # type: ignore
last_action_date: Optional[str] = desert.field(
TimeField(format="%m/%d/%Y", allow_none=True), default=""
) # type: ignore
trial_dt: Optional[str] = desert.field(
TimeField(format="%m/%d/%Y", allow_none=True), default=""
) # type: ignore
disp_date: Optional[str] = desert.field(
TimeField(format="%m/%d/%Y", allow_none=True), default=""
) # type: ignore
charges: List[Charge] = field(default_factory=list)
def to_pandas(self) -> pd.DataFrame:
"""Return a dataframe representation of the data."""
# Each row is a Charge
out = pd.DataFrame([c.to_dict() for c in self])
# Convert sentences dicts to Sentence objects
out["sentences"] = out["sentences"].apply(
lambda l: [Sentence(**v) for v in l]
)
return out
@property
def meta(self) -> Dict[str, Any]:
"""Return the meta information associated with the docket."""
exclude = ["charges"]
return {
f.name: getattr(self, f.name)
for f in fields(self)
if f.name not in exclude
}
def __getitem__(self, index: int) -> Charge:
"""Index the charges."""
return self.charges.__getitem__(index)
def __iter__(self) -> Iterator[Charge]:
"""Iterate through the charges."""
return iter(self.charges)
def __len__(self) -> int:
"""Return the number of charges."""
return len(self.charges)
def __repr__(self) -> str:
"""Return a string representation of the object."""
cls = self.__class__.__name__
if not | pd.isna(self.arrest_dt) | pandas.isna |
import pandas as pd
import pickle
import numpy as np
import optparse
import os
import h5py
from pathlib import Path
from collections import OrderedDict
from typing import Union, Tuple, Dict, Optional, List
from functools import partial
from sklearn.model_selection import train_test_split, StratifiedKFold, KFold
from sklearn.cluster import k_means
from lumin.data_processing.hep_proc import calc_pair_mass, proc_event
from lumin.data_processing.pre_proc import fit_input_pipe, proc_cats
from lumin.data_processing.file_proc import df2foldfile
from lumin.utils.misc import ids2unique, str2bool
def calc_pair_transverse_mass(df:pd.DataFrame, masses:Union[Tuple[float,float],Tuple[np.ndarray,np.ndarray]], feat_map:Dict[str,str]) -> np.ndarray:
r'''
Vectorised computation of invarient transverse mass of pair of particles with given masses, using transverse components of 3-momenta.
Only works for vectors defined in Cartesian coordinates.
Arguments:
df: DataFrame vector components
masses: tuple of masses of particles (either constant or different pair of masses per pair of particles)
feat_map: dictionary mapping of requested momentum components to the features in df
Returns:
np.array of invarient masses
'''
# TODO: rewrite to not use a DataFrame for holding parent vector
# TODO: add inplace option
# TODO: extend to work on pT, eta, phi coordinates
tmp = pd.DataFrame()
tmp['0_E'] = np.sqrt((masses[0]**2)+np.square(df.loc[:, feat_map['0_px']])+np.square(df.loc[:, feat_map['0_py']]))
tmp['1_E'] = np.sqrt((masses[1]**2)+np.square(df.loc[:, feat_map['1_px']])+np.square(df.loc[:, feat_map['1_py']]))
tmp['p_px'] = df.loc[:, feat_map['0_px']]+df.loc[:, feat_map['1_px']]
tmp['p_py'] = df.loc[:, feat_map['0_py']]+df.loc[:, feat_map['1_py']]
tmp['p_E'] = tmp.loc[:, '0_E']+tmp.loc[:, '1_E']
tmp['p_p2'] = np.square(tmp.loc[:, 'p_px'])+np.square(tmp.loc[:, 'p_py'])
tmp['p_mass'] = np.sqrt(np.square(tmp.loc[:, 'p_E'])-tmp.loc[:, 'p_p2'])
return tmp.p_mass.values
def add_mass_feats(df:pd.DataFrame) -> None:
'''Add extra mass features used by Melis https://pdfs.semanticscholar.org/01e7/aee90cb61178fcb09d3fa813294a216116a2.pdf'''
# ln(1 + m_inv(tau, jet_0))
m = calc_pair_mass(df, (0,0), feat_map={'0_px':'PRI_tau_px', '0_py':'PRI_tau_py', '0_pz':'PRI_tau_pz',
'1_px':'PRI_jet_leading_px', '1_py':'PRI_jet_leading_py', '1_pz':'PRI_jet_leading_pz'})
df['EXT_m_tj0'] = np.log(1+m)
# ln(1 + m_inv(tau, jet_1))
m = calc_pair_mass(df, (0,0), feat_map={'0_px':'PRI_tau_px', '0_py':'PRI_tau_py', '0_pz':'PRI_tau_pz',
'1_px':'PRI_jet_subleading_px', '1_py':'PRI_jet_subleading_py', '1_pz':'PRI_jet_subleading_pz'})
df['EXT_m_tj1'] = np.log(1+m)
# ln(1 + m_inv(tau, lep))
m = calc_pair_mass(df, (0,0), feat_map={'0_px':'PRI_tau_px', '0_py':'PRI_tau_py', '0_pz':'PRI_tau_pz',
'1_px':'PRI_lep_px', '1_py':'PRI_lep_py', '1_pz':'PRI_lep_pz'})
df['EXT_m_tl'] = np.log(1+m)
# ln(1 + mt_inv(tau, jet_0))
m = calc_pair_transverse_mass(df, (0,0), feat_map={'0_px':'PRI_tau_px', '0_py':'PRI_tau_py',
'1_px':'PRI_jet_leading_px', '1_py':'PRI_jet_leading_py'})
df['EXT_mt_tj0'] = np.log(1+m)
# ln(1 + mt_inv(tau, jet_1))
m = calc_pair_transverse_mass(df, (0,0), feat_map={'0_px':'PRI_tau_px', '0_py':'PRI_tau_py',
'1_px':'PRI_jet_subleading_px', '1_py':'PRI_jet_subleading_py'})
df['EXT_mt_tj1'] = np.log(1+m)
def import_data(data_path:Path=Path("../data/"),
rotate:bool=False, flip_y:bool=False, flip_z:bool=False, cartesian:bool=True,
mode:str='OpenData',
val_size:float=0.2, seed:Optional[int]=None, cat_feats:Optional[List[str]]=None, extra:bool=False):
'''Import and split data from CSV(s)'''
if cat_feats is None: cat_feats = []
if mode == 'OpenData': # If using data from CERN Open Access
data = pd.read_csv(data_path/'atlas-higgs-challenge-2014-v2.csv')
data.rename(index=str, columns={"KaggleWeight": "gen_weight", 'PRI_met': 'PRI_met_pt'}, inplace=True)
data.drop(columns=['Weight'], inplace=True)
training_data = pd.DataFrame(data.loc[data.KaggleSet == 't'])
training_data.drop(columns=['KaggleSet'], inplace=True)
test = pd.DataFrame(data.loc[(data.KaggleSet == 'b') | (data.KaggleSet == 'v')])
test['private'] = 0
test.loc[(data.KaggleSet == 'v'), 'private'] = 1
test['gen_target'] = 0
test.loc[test.Label == 's', 'gen_target'] = 1
test.drop(columns=['KaggleSet', 'Label'], inplace=True)
else: # If using data from Kaggle
training_data = | pd.read_csv(data_path/'training.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Important Variable Selection with SNPs
Created on Fri Jan 31 16:31:01 2020
@author: <NAME>
"""
# Import the libraries
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import GridSearchCV, train_test_split
from sklearn.svm import SVR
from sklearn.linear_model import MultiTaskLassoCV, MultiTaskElasticNetCV, LassoCV, ElasticNetCV, MultiTaskElasticNet, MultiTaskLasso
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import r2_score, mean_squared_error
# Using chunk size to read rice data
def read_x_cont():
chunksize = 100
X_ct = pd.DataFrame()
for chunk in pd.read_csv("X_cont_ls_el.csv",low_memory=False, chunksize=chunksize, memory_map=True):
X_ct = pd.concat([X_ct, chunk])
return(X_ct)
# Function of data preprocessing
def process_variable(X, y):
# Drop 'IID' columns
X = X.drop('IID', axis = 1)
# Split data to training and testing set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=10)
# Convert from integer to float
X_train= X_train.astype(float, 32)
X_test = X_test.astype(float, 32)
# Apply the same scaling to both datasets
scaler = StandardScaler()
X_train_scl = scaler.fit_transform(X_train)
X_test_scl = scaler.transform(X_test) # we transform rather than fit_transform
return(X_train_scl, X_test_scl, y_train, y_test)
"""Random Forest Regressor"""
#Function to run random forest with grid search and k-fold cross-validation.
def get_rf_model(X_train, y_train, X_test, y_test):
# Hyperparameters search grid
rf_param_grid = {'bootstrap': [False, True],
'n_estimators': [60, 70, 80, 90, 100],
'max_features': [0.6, 0.65, 0.7, 0.75, 0.8],
'min_samples_leaf': [1],
'min_samples_split': [2]
}
# Instantiate random forest regressor
rf_estimator = RandomForestRegressor(random_state=None)
# Create the GridSearchCV object
rf_model = GridSearchCV(estimator=rf_estimator, param_grid=rf_param_grid, cv=10, scoring='neg_mean_squared_error', n_jobs=-1, iid = True)
# Train the regressor
rf_model.fit(X_train, y_train)
# Get the best model
rf_model_best = rf_model.best_estimator_
# Make predictions using the optimised parameters
rf_pred = rf_model_best.predict(X_test)
# Find mean squared error
mse = mean_squared_error(y_test, rf_pred)
# Find r-squared
r2 = r2_score(y_test, rf_pred)
best_prs = rf_model.best_params_
print("Best Parameters:\n", rf_model.best_params_)
print("Best Score:\n", 'mse:', mse, 'r2:', r2)
return(mse, r2, best_prs)
"""Support Vector Regressor"""
#Function to run support vector machine with grid search and k-fold cross-validation.
def get_svm_model(X_train, y_train, X_test, y_test):
# Parameter grid
svm_param_grid = {'C': [0.1, 1, 10, 100], 'gamma': [1, 0.1, 0.01, 0.001, 0.0001, 10], "kernel": ["rbf"]}
# Create SVM grid search regressor
svm_grid = GridSearchCV(estimator = SVR(), param_grid= svm_param_grid, cv=10, scoring='neg_mean_squared_error', n_jobs=-1, iid = True)
# Train the regressor
svm_grid.fit(X_train, y_train)
# Get the best model
svm_model_best = svm_grid.best_estimator_
# Make predictions using the optimised parameters
svm_pred = svm_model_best.predict(X_test)
# Find mean squared error
mse = mean_squared_error(y_test, svm_pred)
# Find r-squared
r2 = r2_score(y_test, svm_pred)
best_prs = svm_grid.best_params_
print("Best Parameters:\n", svm_grid.best_params_)
print("Best Score:\n", 'mse:', mse, 'r2:', r2)
return(mse, r2, best_prs)
"""Lasso and Multi Task Lasso"""
#Lasso
def get_lasso_cv(X_train, y_train, X_test, y_test, cols):
# Create Lasso CV
ls_grid = LassoCV(cv = 10, random_state = 0, n_jobs = -1)
# Train the regressor
ls_grid.fit(X_train, y_train)
# Make predictions using the optimised parameters
ls_pred = ls_grid.predict(X_test)
# Find mean squared error
mse = mean_squared_error(y_test, ls_pred)
# Find r-squared
r2 = r2_score(y_test, ls_pred)
best_prs = ls_grid.alpha_
print("Best Parameters:\n", best_prs)
print("Best Score:\n", 'mse:', mse, 'r2:', r2)
# Get coefficients of the model
coef = pd.DataFrame(ls_grid.coef_.T, index = cols)
var = list(coef[coef[0] != 0].index)
print(coef.head())
print("Lasso picked " + str(sum(coef[0] != 0)) + " variables and eliminated the other " + str(sum(coef[0] == 0)) + " variables")
return(mse, r2, var, best_prs)
# Multi-task Lasso
def get_multitask_lasso_cv(X_train, y_train, X_test, y_test, cols):
# Create Multi-task Lasso CV
ls_grid = MultiTaskLassoCV(cv = 10, random_state = 0, n_jobs = -1)
# Train the regressor
ls_grid.fit(X_train, y_train)
# Make predictions using the optimised parameters
ls_pred = ls_grid.predict(X_test)
# Find mean squared error
mse = mean_squared_error(y_test, ls_pred)
# Find r-squared
r2 = r2_score(y_test, ls_pred)
best_prs = ls_grid.alpha_
print("Best Parameters:\n", best_prs)
print("Best Score:\n", 'mse:', mse, 'r2:', r2)
# Get coefficients of the model
coef = pd.DataFrame(ls_grid.coef_.T, index = cols)
var = list(coef[coef[0] != 0].index)
print(coef.head())
print("Multit-task Lasso picked " + str(sum(coef[0] != 0)) + " variables and eliminated the other " + str(sum(coef[0] == 0)) + " variables")
return(mse, r2, var, best_prs)
"""Elastic Net and Multi Task Elastic Net"""
# Elastic Net
def get_elasticnet_cv(X_train, y_train, X_test, y_test, cols):
# Create Elastic Net CV
el_grid = ElasticNetCV(cv = 10, random_state = 0, n_jobs = -1)
# Train the regressor
el_grid.fit(X_train, y_train)
# Make predictions using the optimised parameters
el_pred = el_grid.predict(X_test)
# Find mean squared error
mse = mean_squared_error(y_test, el_pred)
# Find r-squared
r2 = r2_score(y_test, el_pred)
best_prs = [el_grid.alpha_]
best_prs.append(el_grid.l1_ratio_)
print("Best Parameters:\n", best_prs)
print("Best Score:\n", 'mse:', mse, 'r-squared:', r2)
# Get coefficients of the model
coef = pd.DataFrame(el_grid.coef_.T, index = cols)
var = list(coef[coef[0] != 0].index)
print(coef.head())
print("ElasticNet picked " + str(sum(coef[0] != 0)) + " variables and eliminated the other " + str(sum(coef[0] == 0)) + " variables")
return(mse, r2, var, best_prs)
# Multi-task Elastic Net
def get_multitask_elasticnet_cv(X_train, y_train, X_test, y_test, cols):
# Create Multi Task Elastic Net CV
el_grid = MultiTaskElasticNetCV(cv = 10, random_state = 0, n_jobs = -1)
# Train the regressor
el_grid.fit(X_train, y_train)
# Make predictions using the optimised parameters
el_pred = el_grid.predict(X_test)
# Find mean squared error
mse = mean_squared_error(y_test, el_pred)
# Find r-squared
r2 = r2_score(y_test, el_pred)
best_prs = [el_grid.alpha_]
best_prs.append(el_grid.l1_ratio_)
print("Best Parameters:\n", best_prs)
print("Best Score:\n", 'mse:', mse, 'r-squared:', r2)
# Get coefficients of the model
coef = pd.DataFrame(el_grid.coef_.T, index = cols)
var = list(coef[coef[0] != 0].index)
print(coef.head())
print("Multi-task ElasticNet picked " + str(sum(coef[0] != 0)) + " variables and eliminated the other " + str(sum(coef[0] == 0)) + " variables")
return(mse, r2, var, best_prs)
# Evaluation each trait by multi-task Lasso
def eval_mtls_split_trait(alpha, X_train, Y_train, X_test, Y_test):
# Create Multi-Task Lasso
ls_tfl_grw = MultiTaskLasso(alpha, random_state = 0)
# Train the regressor
ls_tfl_grw.fit(X_train, Y_train)
# Make predictions using the optimised parameters
ls_pred = ls_tfl_grw.predict(X_test)
# Find mean squared error
mse_tfl = mean_squared_error(Y_test[:, 0], ls_pred[:, 0])
mse_grw= mean_squared_error(Y_test[:, 1], ls_pred[:, 1])
# Find r-squared
r2_tfl = r2_score(Y_test[:, 0], ls_pred[:, 0])
r2_grw = r2_score(Y_test[:, 1], ls_pred[:, 1])
return(mse_tfl, mse_grw, r2_tfl, r2_grw)
# Evaluation each trait by multi-task Elastic Net
def eval_mtel_split_trait(alpha, l1_ratio, X_train, Y_train, X_test, Y_test):
# Create Multi-Task Lasso
el_tfl_grw = MultiTaskElasticNet(alpha, l1_ratio, random_state = 0)
# Train the regressor
el_tfl_grw.fit(X_train, Y_train)
# Make predictions using the optimised parameters
el_pred = el_tfl_grw.predict(X_test)
# Find mean squared error
mse_tfl = mean_squared_error(Y_test[:, 0], el_pred[:, 0])
mse_grw= mean_squared_error(Y_test[:, 1], el_pred[:, 1])
# Find r-squared
r2_tfl = r2_score(Y_test[:, 0], el_pred[:, 0])
r2_grw = r2_score(Y_test[:, 1], el_pred[:, 1])
return(mse_tfl, mse_grw, r2_tfl, r2_grw)
if __name__ == '__main__':
print("")
print("")
print("|============================================================================|")
print("| |")
print("| ----- IMPORTANT VARIABLE SELECTION WITH SNPS ----- |")
print("| |")
print("|============================================================================|")
print("")
print("")
print("********************************* INPUT DATA *********************************")
print("")
print("Import data may take several minutes, please wait...")
print("")
# Import data
X_cont = read_x_cont()
cols = X_cont.columns[1::]
# Load data after pre-processinng
y_tfl = pd.read_csv("y_tfl.csv", header=None)
y_grw = pd.read_csv("y_grw.csv", header=None)
y_tfl_grw = pd.read_csv("y_tfl_grw.csv", header=None)
X_grw_2 = pd.read_csv("X_grw_2.csv", header='infer')
X_grw_3 = pd.read_csv("X_grw_3.csv", header='infer')
X_grw_4 = pd.read_csv("X_grw_4.csv", header='infer')
X_grw_5 = pd.read_csv("X_grw_5.csv", header='infer')
X_tfl_2 = pd.read_csv("X_tfl_2.csv", header='infer')
X_tfl_3 = pd.read_csv("X_tfl_3.csv", header='infer')
X_tfl_4 = pd.read_csv("X_tfl_4.csv", header='infer')
X_tfl_5 = pd.read_csv("X_tfl_5.csv", header='infer')
X_tfl_6 = pd.read_csv("X_tfl_6.csv", header='infer')
X_tfl_grw_2 = pd.read_csv("X_tfl_grw_2.csv", header='infer')
X_tfl_grw_25 = pd.read_csv("X_tfl_grw_25.csv", header='infer')
X_tfl_grw_1 = pd.read_csv("X_tfl_grw_1.csv", header='infer')
X_tfl_grw_75 = pd.read_csv("X_tfl_grw_75.csv", header='infer')
X_tfl_grw_3 = pd.read_csv("X_tfl_grw_3.csv", header='infer')
print("")
# Transform response variables to matrix type.
y_tfl = y_tfl.values.ravel()
y_grw = y_grw.values.ravel()
y_tfl_grw = y_tfl_grw.values
# Normalize rice data
X_grw_2_train, X_grw_2_test, y_grw_2_train, y_grw_2_test = process_variable(X_grw_2, y_grw)
X_grw_3_train, X_grw_3_test, y_grw_3_train, y_grw_3_test = process_variable(X_grw_3, y_grw)
X_grw_4_train, X_grw_4_test, y_grw_4_train, y_grw_4_test = process_variable(X_grw_4, y_grw)
X_grw_5_train, X_grw_5_test, y_grw_5_train, y_grw_5_test = process_variable(X_grw_5, y_grw)
X_tfl_2_train, X_tfl_2_test, y_tfl_2_train, y_tfl_2_test = process_variable(X_tfl_2, y_tfl)
X_tfl_3_train, X_tfl_3_test, y_tfl_3_train, y_tfl_3_test = process_variable(X_tfl_3, y_tfl)
X_tfl_4_train, X_tfl_4_test, y_tfl_4_train, y_tfl_4_test = process_variable(X_tfl_4, y_tfl)
X_tfl_5_train, X_tfl_5_test, y_tfl_5_train, y_tfl_5_test = process_variable(X_tfl_5, y_tfl)
X_tfl_6_train, X_tfl_6_test, y_tfl_6_train, y_tfl_6_test = process_variable(X_tfl_6, y_tfl)
X_tfl_grw_2_train, X_tfl_grw_2_test, y_tfl_grw_2_train, y_tfl_grw_2_test = process_variable(X_tfl_grw_2, y_tfl_grw)
X_tfl_grw_25_train, X_tfl_grw_25_test, y_tfl_grw_25_train, y_tfl_grw_25_test = process_variable(X_tfl_grw_25, y_tfl_grw)
X_tfl_grw_1_train, X_tfl_grw_1_test, y_tfl_grw_1_train, y_tfl_grw_1_test = process_variable(X_tfl_grw_1, y_tfl_grw)
X_tfl_grw_75_train, X_tfl_grw_75_test, y_tfl_grw_75_train, y_tfl_grw_75_test = process_variable(X_tfl_grw_75, y_tfl_grw)
X_tfl_grw_3_train, X_tfl_grw_3_test, y_tfl_grw_3_train, y_tfl_grw_3_test = process_variable(X_tfl_grw_3, y_tfl_grw)
X_grw_train, X_grw_test, y_grw_train, y_grw_test = process_variable(X_cont, y_grw)
X_tfl_train, X_tfl_test, y_tfl_train, y_tfl_test = process_variable(X_cont, y_tfl)
X_tfl_grw_train, X_tfl_grw_test, y_tfl_grw_train, y_tfl_grw_test = process_variable(X_cont, y_tfl_grw)
print("")
print("******************************* TRAINING MODELS *****************************")
print("")
rf_grw_mse = []
rf_grw_r2 = []
rf_tfl_mse = []
rf_tfl_r2 = []
rf_grw_prs = []
rf_tfl_prs = []
rf_tfl_grw_mse_0 = []
rf_tfl_grw_r2_0 = []
rf_tfl_grw_prs_0 = []
rf_tfl_grw_mse_1 = []
rf_tfl_grw_r2_1 = []
rf_tfl_grw_prs_1 = []
svr_grw_mse = []
svr_grw_r2 = []
svr_tfl_mse = []
svr_tfl_r2 = []
svr_grw_prs = []
svr_tfl_prs = []
svr_tfl_grw_mse_0 = []
svr_tfl_grw_r2_0 = []
svr_tfl_grw_prs_0 = []
svr_tfl_grw_mse_1 = []
svr_tfl_grw_r2_1 = []
svr_tfl_grw_prs_1 = []
# Filtering variables by p_value.
p_value = ['<=5e-6', '<=5e-5', '<=5e-4', '<=5e-3', '<=5e-2']
p_value_2 = ['<=5e-3','<=7.5e-3', '<=1e-2', '<=2.5e-2', '<=5e-2']
print("Find mse and r-squared for random forest model of grain weight...")
rf_grw_mse_2, rf_grw_r2_2, rf_grw_prs_2 = get_rf_model(X_grw_2_train, y_grw_2_train, X_grw_2_test, y_grw_2_test)
rf_grw_mse.append(rf_grw_mse_2)
rf_grw_r2.append(rf_grw_r2_2)
rf_grw_prs.append(rf_grw_prs_2)
rf_grw_mse_3, rf_grw_r2_3, rf_grw_prs_3 = get_rf_model(X_grw_3_train, y_grw_3_train, X_grw_3_test, y_grw_3_test)
rf_grw_mse.append(rf_grw_mse_3)
rf_grw_r2.append(rf_grw_r2_3)
rf_grw_prs.append(rf_grw_prs_3)
rf_grw_mse_4, rf_grw_r2_4, rf_grw_prs_4 = get_rf_model(X_grw_4_train, y_grw_4_train, X_grw_4_test, y_grw_4_test)
rf_grw_mse.append(rf_grw_mse_4)
rf_grw_r2.append(rf_grw_r2_4)
rf_grw_prs.append(rf_grw_prs_4)
rf_grw_mse_5, rf_grw_r2_5, rf_grw_prs_5 = get_rf_model(X_grw_5_train, y_grw_5_train, X_grw_5_test, y_grw_5_test)
rf_grw_mse.append(rf_grw_mse_5)
rf_grw_r2.append(rf_grw_r2_5)
rf_grw_prs.append(rf_grw_prs_5)
rf_grw = pd.DataFrame({'rf_grw_mse':rf_grw_mse[::-1], 'rf_grw_r2':rf_grw_r2[::-1], 'rf_grw_prs':rf_grw_prs[::-1]})
rf_grw.set_index(pd.Index(p_value[1:5]), 'p_value', inplace = True)
rf_grw.to_csv('rf_grw.csv')
print('RF of grain weight is saved')
print("Find mse and r-squared for random forest model of time to flowering...")
rf_tfl_mse_2, rf_tfl_r2_2, rf_tfl_prs_2 = get_rf_model(X_tfl_2_train, y_tfl_2_train, X_tfl_2_test, y_tfl_2_test)
rf_tfl_mse.append(rf_tfl_mse_2)
rf_tfl_r2.append(rf_tfl_r2_2)
rf_tfl_prs.append(rf_tfl_prs_2)
rf_tfl_mse_3, rf_tfl_r2_3, rf_tfl_prs_3 = get_rf_model(X_tfl_3_train, y_tfl_3_train, X_tfl_3_test, y_tfl_3_test)
rf_tfl_mse.append(rf_tfl_mse_3)
rf_tfl_r2.append(rf_tfl_r2_3)
rf_tfl_prs.append(rf_tfl_prs_3)
rf_tfl_mse_4, rf_tfl_r2_4, rf_tfl_prs_4 = get_rf_model(X_tfl_4_train, y_tfl_4_train, X_tfl_4_test, y_tfl_4_test)
rf_tfl_mse.append(rf_tfl_mse_4)
rf_tfl_r2.append(rf_tfl_r2_4)
rf_tfl_prs.append(rf_tfl_prs_4)
rf_tfl_mse_5, rf_tfl_r2_5, rf_tfl_prs_5 = get_rf_model(X_tfl_5_train, y_tfl_5_train, X_tfl_5_test, y_tfl_5_test)
rf_tfl_mse.append(rf_tfl_mse_5)
rf_tfl_r2.append(rf_tfl_r2_5)
rf_tfl_prs.append(rf_tfl_prs_5)
rf_tfl_mse_6, rf_tfl_r2_6, rf_tfl_prs_6 = get_rf_model(X_tfl_6_train, y_tfl_6_train, X_tfl_6_test, y_tfl_6_test)
rf_tfl_mse.append(rf_tfl_mse_6)
rf_tfl_r2.append(rf_tfl_r2_6)
rf_tfl_prs.append(rf_tfl_prs_6)
rf_tfl = pd.DataFrame({'rf_tfl_mse':rf_tfl_mse[::-1], 'rf_tfl_r2':rf_tfl_r2[::-1], 'rf_tfl_prs':rf_tfl_prs[::-1]})
rf_tfl.set_index(pd.Index(p_value), 'p_value', inplace = True)
rf_tfl.to_csv('rf_tfl.csv')
print('RF of time to flowering is saved')
print("Find mse and r-squared for random forest model of time to flowering and grain weight...")
# Output is time to flowering
rf_tfl_grw_mse_2_0, rf_tfl_grw_r2_2_0, rf_tfl_grw_prs_2_0 = get_rf_model(X_tfl_grw_2_train, y_tfl_grw_2_train[:, 0], X_tfl_grw_2_test, y_tfl_grw_2_test[:, 0])
rf_tfl_grw_mse_0.append(rf_tfl_grw_mse_2_0)
rf_tfl_grw_r2_0.append(rf_tfl_grw_r2_2_0)
rf_tfl_grw_prs_0.append(rf_tfl_grw_prs_2_0)
rf_tfl_grw_mse_25_0, rf_tfl_grw_r2_25_0, rf_tfl_grw_prs_25_0 = get_rf_model(X_tfl_grw_25_train, y_tfl_grw_25_train[:, 0], X_tfl_grw_25_test, y_tfl_grw_25_test[:, 0])
rf_tfl_grw_mse_0.append(rf_tfl_grw_mse_25_0)
rf_tfl_grw_r2_0.append(rf_tfl_grw_r2_25_0)
rf_tfl_grw_prs_0.append(rf_tfl_grw_prs_25_0)
rf_tfl_grw_mse_1_0, rf_tfl_grw_r2_1_0, rf_tfl_grw_prs_1_0 = get_rf_model(X_tfl_grw_1_train, y_tfl_grw_1_train[:, 0], X_tfl_grw_1_test, y_tfl_grw_1_test[:, 0])
rf_tfl_grw_mse_0.append(rf_tfl_grw_mse_1_0)
rf_tfl_grw_r2_0.append(rf_tfl_grw_r2_1_0)
rf_tfl_grw_prs_0.append(rf_tfl_grw_prs_1_0)
rf_tfl_grw_mse_75_0, rf_tfl_grw_r2_75_0, rf_tfl_grw_prs_75_0 = get_rf_model(X_tfl_grw_75_train, y_tfl_grw_75_train[:, 0], X_tfl_grw_75_test, y_tfl_grw_75_test[:, 0])
rf_tfl_grw_mse_0.append(rf_tfl_grw_mse_75_0)
rf_tfl_grw_r2_0.append(rf_tfl_grw_r2_75_0)
rf_tfl_grw_prs_0.append(rf_tfl_grw_prs_75_0)
rf_tfl_grw_mse_3_0, rf_tfl_grw_r2_3_0, rf_tfl_grw_prs_3_0 = get_rf_model(X_tfl_grw_3_train, y_tfl_grw_3_train[:, 0], X_tfl_grw_3_test, y_tfl_grw_3_test[:, 0])
rf_tfl_grw_mse_0.append(rf_tfl_grw_mse_3_0)
rf_tfl_grw_r2_0.append(rf_tfl_grw_r2_3_0)
rf_tfl_grw_prs_0.append(rf_tfl_grw_prs_3_0)
rf_tfl_grw_0 = pd.DataFrame({'rf_tfl_grw_mse_0':rf_tfl_grw_mse_0[::-1], 'rf_tfl_grw_r2_0':rf_tfl_grw_r2_0[::-1], 'rf_tfl_grw_prs_0':rf_tfl_grw_prs_0[::-1]})
rf_tfl_grw_0.set_index(pd.Index(p_value_2), 'p_value', inplace = True)
rf_tfl_grw_0.to_csv('rf_tfl_grw_0.csv')
# Output is grain weight
rf_tfl_grw_mse_2_1, rf_tfl_grw_r2_2_1, rf_tfl_grw_prs_2_1 = get_rf_model(X_tfl_grw_2_train, y_tfl_grw_2_train[:, 1], X_tfl_grw_2_test, y_tfl_grw_2_test[:, 1])
rf_tfl_grw_mse_1.append(rf_tfl_grw_mse_2_1)
rf_tfl_grw_r2_1.append(rf_tfl_grw_r2_2_1)
rf_tfl_grw_prs_1.append(rf_tfl_grw_prs_2_1)
rf_tfl_grw_mse_25_1, rf_tfl_grw_r2_25_1, rf_tfl_grw_prs_25_1 = get_rf_model(X_tfl_grw_25_train, y_tfl_grw_25_train[:, 1], X_tfl_grw_25_test, y_tfl_grw_25_test[:, 1])
rf_tfl_grw_mse_1.append(rf_tfl_grw_mse_25_1)
rf_tfl_grw_r2_1.append(rf_tfl_grw_r2_25_1)
rf_tfl_grw_prs_1.append(rf_tfl_grw_prs_25_1)
rf_tfl_grw_mse_1_1, rf_tfl_grw_r2_1_1, rf_tfl_grw_prs_1_1 = get_rf_model(X_tfl_grw_1_train, y_tfl_grw_1_train[:, 1], X_tfl_grw_1_test, y_tfl_grw_1_test[:, 1])
rf_tfl_grw_mse_1.append(rf_tfl_grw_mse_1_1)
rf_tfl_grw_r2_1.append(rf_tfl_grw_r2_1_1)
rf_tfl_grw_prs_1.append(rf_tfl_grw_prs_1_1)
rf_tfl_grw_mse_75_1, rf_tfl_grw_r2_75_1, rf_tfl_grw_prs_75_1 = get_rf_model(X_tfl_grw_75_train, y_tfl_grw_75_train[:, 1], X_tfl_grw_75_test, y_tfl_grw_75_test[:, 1])
rf_tfl_grw_mse_1.append(rf_tfl_grw_mse_75_1)
rf_tfl_grw_r2_1.append(rf_tfl_grw_r2_75_1)
rf_tfl_grw_prs_1.append(rf_tfl_grw_prs_75_1)
rf_tfl_grw_mse_3_1, rf_tfl_grw_r2_3_1, rf_tfl_grw_prs_3_1 = get_rf_model(X_tfl_grw_3_train, y_tfl_grw_3_train[:, 1], X_tfl_grw_3_test, y_tfl_grw_3_test[:, 1])
rf_tfl_grw_mse_1.append(rf_tfl_grw_mse_3_1)
rf_tfl_grw_r2_1.append(rf_tfl_grw_r2_3_1)
rf_tfl_grw_prs_1.append(rf_tfl_grw_prs_3_1)
rf_tfl_grw_1 = pd.DataFrame({'rf_tfl_grw_mse_1':rf_tfl_grw_mse_1[::-1], 'rf_tfl_grw_r2_1':rf_tfl_grw_r2_1[::-1], 'rf_tfl_grw_prs_1':rf_tfl_grw_prs_1[::-1]})
rf_tfl_grw_1.set_index(pd.Index(p_value_2), 'p_value', inplace = True)
rf_tfl_grw_1.to_csv('rf_tfl_grw_1.csv')
print('RF of time to flowering and grain weight is saved')
print("Find mse and r-squared for svm model of grain weight...")
svr_grw_mse_2, svr_grw_r2_2, svr_grw_prs_2 = get_svm_model(X_grw_2_train, y_grw_2_train, X_grw_2_test, y_grw_2_test)
svr_grw_mse.append(svr_grw_mse_2)
svr_grw_r2.append(svr_grw_r2_2)
svr_grw_prs.append(svr_grw_prs_2)
svr_grw_mse_3, svr_grw_r2_3, svr_grw_prs_3 = get_svm_model(X_grw_3_train, y_grw_3_train, X_grw_3_test, y_grw_3_test)
svr_grw_mse.append(svr_grw_mse_3)
svr_grw_r2.append(svr_grw_r2_3)
svr_grw_prs.append(svr_grw_prs_3)
svr_grw_mse_4, svr_grw_r2_4, svr_grw_prs_4 = get_svm_model(X_grw_4_train, y_grw_4_train, X_grw_4_test, y_grw_4_test)
svr_grw_mse.append(svr_grw_mse_4)
svr_grw_r2.append(svr_grw_r2_4)
svr_grw_prs.append(svr_grw_prs_4)
svr_grw_mse_5, svr_grw_r2_5, svr_grw_prs_5 = get_svm_model(X_grw_5_train, y_grw_5_train, X_grw_5_test, y_grw_5_test)
svr_grw_mse.append(svr_grw_mse_5)
svr_grw_r2.append(svr_grw_r2_5)
svr_grw_prs.append(svr_grw_prs_5)
svr_grw = pd.DataFrame({'svr_grw_mse':svr_grw_mse[::-1], 'svr_grw_r2':svr_grw_r2[::-1], 'svr_grw_prs':svr_grw_prs[::-1]})
svr_grw.set_index(pd.Index(p_value[1:5]), 'p_value', inplace = True)
svr_grw.to_csv('svr_grw.csv')
print('SVR of grain weight is saved')
print("Find mse and r-squared for svm model of time to flowering...")
svr_tfl_mse_2, svr_tfl_r2_2, svr_tfl_prs_2 = get_svm_model(X_tfl_2_train, y_tfl_2_train, X_tfl_2_test, y_tfl_2_test)
svr_tfl_mse.append(svr_tfl_mse_2)
svr_tfl_r2.append(svr_tfl_r2_2)
svr_tfl_prs.append(svr_tfl_prs_2)
svr_tfl_mse_3, svr_tfl_r2_3, svr_tfl_prs_3 = get_svm_model(X_tfl_3_train, y_tfl_3_train, X_tfl_3_test, y_tfl_3_test)
svr_tfl_mse.append(svr_tfl_mse_3)
svr_tfl_r2.append(svr_tfl_r2_3)
svr_tfl_prs.append(svr_tfl_prs_3)
svr_tfl_mse_4, svr_tfl_r2_4, svr_tfl_prs_4 = get_svm_model(X_tfl_4_train, y_tfl_4_train, X_tfl_4_test, y_tfl_4_test)
svr_tfl_mse.append(svr_tfl_mse_4)
svr_tfl_r2.append(svr_tfl_r2_4)
svr_tfl_prs.append(svr_tfl_prs_4)
svr_tfl_mse_5, svr_tfl_r2_5, svr_tfl_prs_5 = get_svm_model(X_tfl_5_train, y_tfl_5_train, X_tfl_5_test, y_tfl_5_test)
svr_tfl_mse.append(svr_tfl_mse_5)
svr_tfl_r2.append(svr_tfl_r2_5)
svr_tfl_prs.append(svr_tfl_prs_5)
svr_tfl_mse_6, svr_tfl_r2_6, svr_tfl_prs_6 = get_svm_model(X_tfl_6_train, y_tfl_6_train, X_tfl_6_test, y_tfl_6_test)
svr_tfl_mse.append(svr_tfl_mse_6)
svr_tfl_r2.append(svr_tfl_r2_6)
svr_tfl_prs.append(svr_tfl_prs_6)
svr_tfl = | pd.DataFrame({'svr_tfl_mse':svr_tfl_mse[::-1], 'svr_tfl_r2':svr_tfl_r2[::-1], 'svr_tfl_prs':svr_tfl_prs[::-1]}) | pandas.DataFrame |
import numpy as np
import pandas as p
from datetime import datetime, timedelta
class PreprocessData():
def __init__(self, file_name):
self.file_name = file_name
#get only used feature parameters
def get_features(self, file_name):
data = p.read_csv(file_name, skiprows=7, sep=';', header=None)
data.drop(data.columns[len(data.columns)-1], axis=1, inplace=True)
data.columns = ['DateAndTime', 'T', 'Po', 'P', 'Pa', 'U', 'DD', 'Ff', 'ff10',
'ff3', 'N', 'WW', 'W1', 'W2', 'Tn', 'Tx', 'Cl', 'Nh', 'H', 'Cm', 'Ch',
'VV', 'Td', 'RRR', 'tR', 'E', 'Tg', 'E\'', 'sss']
data [['Date', 'Time']] = data.DateAndTime.str.split(expand = True)
data.Date = self.removeYear(data)
return data[['Date', 'Time', 'T', 'Po', 'P', 'Pa', 'DD', 'Ff', 'N', 'Tn', 'Tx', 'VV', 'Td']]
#preprocess data in case of trining model or generating data to run prediction
def preprocess(self, training_flag, predict_date):
data = self.get_features(self.file_name)
data_date = p.get_dummies(data.Date.to_frame())
data_time = p.get_dummies(data.Time.to_frame())
wind_direction = p.get_dummies(data.DD.to_frame())
cloud_rate = p.get_dummies(data.N.to_frame())
data_target = data[['T']];
name = "features.csv"
if training_flag:
temp_data = data.Date.to_frame().apply(lambda x: p.Series(self.training(x, data_target)), axis=1)
result = p.concat([data_date, data_time, wind_direction, cloud_rate, temp_data], axis=1)
result.iloc[:len(result.index) - 365*8].to_csv("features.csv")
data_target.iloc[:len(data_target.index) - 365*8].to_csv("target.csv")
return "features.csv", "target.csv"
else:
temp_data = data.Date.to_frame().apply(lambda x: p.Series(self.predicting(x, data_target)), axis=1)
data_date = data_date.iloc[:8]
predict_date = datetime.strptime(predict_date, "%d.%m.%Y")
new_date_string = ("Date_%02d.%02d") % (predict_date.day, predict_date.month)
predict_date = predict_date - timedelta(days=1)
date_string = ("Date_%02d.%02d") % (predict_date.day, predict_date.month)
data_date[date_string] = 0
data_date[new_date_string] = 1
result = | p.concat([data_date, data_time, wind_direction, cloud_rate, temp_data], axis=1) | pandas.concat |
import os
import pandas as pd
import numpy as np
from kuzushiji_data import data_config
class KuzushijiF1:
@staticmethod
def score_page(preds, truth, with_lable=True):
"""
Scores a single page.
Args:
preds: prediction string of labels and center points.
truth: ground truth string of labels and bounding boxes.
Returns:
True/false positive and false negative counts for the page
"""
tp = 0
fp = 0
fn = 0
truth_indices = {
'label': 0,
'X': 1,
'Y': 2,
'Width': 3,
'Height': 4
}
preds_indices = {
'label': 0,
'X': 1,
'Y': 2
}
if pd.isna(truth) and pd.isna(preds):
return {'tp': tp, 'fp': fp, 'fn': fn}
if pd.isna(truth):
fp += len(preds.split(' ')) // len(preds_indices)
return {'tp': tp, 'fp': fp, 'fn': fn}
if | pd.isna(preds) | pandas.isna |
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from sklearn.base import BaseEstimator, RegressorMixin
from statsmodels.tsa.statespace.sarimax import SARIMAX
from fbprophet import Prophet
class SetTempAsPower:
"""
Makes a forecast by selecting the hottest days
"""
def __init__(self, col="temp_max"):
self.col = col
def fit(self, X, y):
self.y_fit = y
self.X_fit = X
self.max_power = y.max()
self.min_power = y.min()
return self
def predict(self, X):
self.X_predict = X
minmaxscaler = MinMaxScaler(feature_range=(self.min_power, self.max_power))
minmaxscaler.fit(self.X_fit[[self.col]])
scaled = minmaxscaler.transform(self.X_predict[[self.col]])
self.predict_yhat = pd.Series(
data=scaled.reshape(1, -1)[0], index=self.X_predict.index
)
return self.predict_yhat
def get_pred_values(self):
# All Data is only available after fit and predict
# Build a return DataFrame that looks similar to the prophet output
# date index | y | yhat| yhat_lower | yhat_upper | is_forecast
X_fit = self.X_fit.copy()
X_predict = self.X_predict.copy()
y = self.y_fit
if self.X_fit.equals(self.X_predict):
yhat = self.predict_yhat.copy(deep=True)
else:
self.fit(X_fit, y)
y.name = "y"
fit_yhat = self.predict(X_fit)
pred_yhat = self.predict(X_predict)
y_pred = pd.Series(np.NaN, index=X_predict.index)
y = | pd.concat([y, y_pred], axis=0) | pandas.concat |
from factor_analyzer import FactorAnalyzer, Rotator, calculate_bartlett_sphericity, calculate_kmo
from sklearn.utils import check_array
from matplotlib import pyplot as plt
import pandas as pd
import numpy as np
def score(database, semester, year, season, answer_key, savedname):
'''
Modified so that it uses numerical values of question/answer rather than string values.
By:
<NAME>, 5 March 2018
'''
'''
The score function reads in a QuaRCS dataset and answer key file to create a series of columns
to add to the dataset. The function creates columns for:
- score on a binary scale (1 for correct, 0 for incorrect)
- total score
- totals and means by category
- number of questions answered
- total and mean confidence
Args:
database: pre or post QuaRCS dataset for a semester
answer_key: QuaRCS Assessment Answer Key
semester: 'PRE' or 'PST'
Output:
name of file + '_scored' as .csv file
Example:
score('QuaRCS_Summer_2017_Pre.csv', 'PRE', QuaRCS Assessment Answer Key.csv', QuaRCS_Summer_2017_Pre )
New File saved under QuaRCS_Summer_2017_Pre_scored.csv
Check folder for files
By:
<NAME>, 08/11/2017
Future Improvements:
add columns for confidence means and totals by category
add extra colums after insert so the deletion of columns will not be necessary
'''
question = semester + "_Q" # question = 'PRE_Q' or 'PST_Q'
data = pd.read_csv(database, encoding = 'utf-8', skiprows = [1,2], header = 0)
df = pd.read_csv(answer_key, encoding = 'utf-8')
cols = list(data.columns.values)
c = len(cols)
e = 0
h = len(data)
# Adds the Q#_SCORE column right next to each question
questions = np.unique(df['Question #'])
for item in questions:
if(question+str(item) in data.columns):
data.insert(data.columns.get_loc(question+str(item))+1,question+str(item)+'_SCORE', 0)
# e >= 50 --> Full, e < 50 --> Lite
for d in range(c):
column = cols[d]
column = column[0:5]
if question == column:
e = e + 1
data.insert(6 , 'VERSION', " ")
if e == 50:
if(year == "16" and season == "Fa"):
data['VERSION'] = "Fl_2.0"
# If the value "progress bar" is in comments, change the version to 2.1
for v in range(h):
if 'COMMENTS' in data.columns:
if (data.loc[v, 'COMMENTS'] == "progress bar"):
data.loc[v, 'VERSION'] = "Fl_2.1"
else:
data['VERSION'] = "Fl_1.1"
elif e == 54:
data['VERSION'] = "Fl_1.0"
data = data.drop([semester + '_Q18'], axis=1)
data = data.drop([semester + '_Q18CF'], axis=1)
data = data.drop([semester + '_Q25'], axis=1)
data = data.drop([semester + '_Q25CF'], axis=1)
e = 50
elif e == 22:
data['VERSION'] = "Lt_1.0"
elif e == 30:
intyr = int(year)
if (intyr >= 19 or (year == "18" and season == "Fa")):
data['VERSION'] = "Lt_2.1"
else:
data['VERSION'] = "Lt_2.0"
elif e == 28:
data['VERSION'] = "SM_1.0"
# New columns for the totals
data[semester + '_TOTAL'] = np.nan
data[semester + '_PCT_TOTAL'] = np.nan
data[semester + '_GR_TOTAL'] = np.nan
data[semester + '_GR_MEAN'] = np.nan
data[semester + '_AR_TOTAL'] = np.nan
data[semester + '_AR_MEAN'] = np.nan
data[semester + '_PR_TOTAL'] = np.nan
data[semester + '_PR_MEAN'] = np.nan
data[semester + '_PC_TOTAL'] = np.nan
data[semester + '_PC_MEAN'] = np.nan
data[semester + '_SP_TOTAL'] = np.nan
data[semester + '_SP_MEAN'] = np.nan
data[semester + '_TR_TOTAL'] = np.nan
data[semester + '_TR_MEAN'] = np.nan
data[semester + '_AV_TOTAL'] = np.nan
data[semester + '_AV_MEAN'] = np.nan
#data[semester + '_ER_MEAN'] = np.nan
data[semester + '_UD_TOTAL'] = np.nan
data[semester + '_UD_MEAN'] = np.nan
data[semester + '_ES_TOTAL'] = np.nan
data[semester + '_ES_MEAN'] = np.nan
# Composite Variables
data[semester + '_SELFEFF'] = np.nan
data[semester + '_MATHANX'] = np.nan
data[semester + '_MATHREL'] = np.nan
data[semester + '_ACADMAT'] = np.nan
data[semester + '_SCHMATH'] = np.nan
corr_ans = {15: 0, 12:0, 14:0, 26:0, 27:0, 23:0, 28:0, 19:0, 3:0, 16:0, 13:0, 31:0,
32:0, 29:0, 30:0, 5:0, 6:0, 7:0, 10:0, 11:0, 20:0, 21:0, 33:0, 34:0, 35:0}
for item in corr_ans:
corr_ans[item] = int(list(df.loc[df['Question #']==item]['Correct Answer'])[0])
# Adds totals and means to total and means columns
for nn in range(h):
qn = {15: 0, 12:0, 14:0, 26:0, 27:0, 23:0, 28:0, 19:0, 3:0, 16:0, 13:0, 31:0, 32:0, 29:0, 30:0, 5:0, 6:0, 7:0, 10:0, 11:0, 20:0, 21:0, 33:0, 34:0, 35:0}
for q_num in qn:
try:
if(int(data.loc[nn, question + str(q_num)]) == corr_ans[q_num]):
qn[q_num] = 1
data.loc[nn, question+str(q_num)+'_SCORE'] = 1
except:
pass
GR = int(np.nansum([qn[15], qn[14], qn[12], qn[29], qn[30], qn[13]]))
AR = int(np.nansum([qn[15], qn[14], qn[26], qn[27], qn[23], qn[28], qn[19], qn[3], qn[16], qn[31], qn[32], qn[5], qn[6], qn[7], qn[29], qn[30], qn[10], qn[11], qn[20], qn[21], qn[33], qn[34], qn[35]]))
PR = int(np.nansum([qn[15], qn[12], qn[14], qn[23], qn[28], qn[3], qn[16], qn[7], qn[10], qn[11], qn[20], qn[21], qn[33], qn[35], qn[13]]))
PC = int(np.nansum([qn[27], qn[3], qn[32], qn[20], qn[21]]))
SP = int(np.nansum([qn[27], qn[23], qn[28], qn[29], qn[30], qn[20], qn[21]]))
TR = int(np.nansum([qn[26], qn[27], qn[23]]))
AV = int(np.nansum([qn[31], qn[10], qn[11], qn[33], qn[34]]))
UD = int(np.nansum([qn[31], qn[6], qn[7], qn[35], qn[16]]))
ES = int(np.nansum([qn[15], qn[12], qn[14], qn[16], qn[13]]))
data.loc[nn, semester + '_GR_TOTAL'] = GR
data.loc[nn, semester + '_AR_TOTAL'] = AR
data.loc[nn, semester + '_PR_TOTAL'] = PR
data.loc[nn, semester + '_PC_TOTAL'] = PC
data.loc[nn, semester + '_SP_TOTAL'] = SP
data.loc[nn, semester + '_TR_TOTAL'] = TR
data.loc[nn, semester + '_AV_TOTAL'] = AV
data.loc[nn, semester + '_UD_TOTAL'] = UD
data.loc[nn, semester + '_ES_TOTAL'] = ES
total_full = 0
for q_num in qn:
total_full += qn[q_num]
if e == 50:
data.loc[nn, semester + '_TOTAL'] = total_full
data.loc[nn, semester + '_PCT_TOTAL'] = total_full/(25)
data.loc[nn, semester + '_GR_MEAN'] = GR/6
data.loc[nn, semester + '_AR_MEAN'] = AR/23
data.loc[nn, semester + '_PR_MEAN'] = PR/15
data.loc[nn, semester + '_PC_MEAN'] = PC/5
data.loc[nn, semester + '_SP_MEAN'] = SP/7
data.loc[nn, semester + '_TR_MEAN'] = TR/3
data.loc[nn, semester + '_AV_MEAN'] = AV/5
data.loc[nn, semester + '_UD_MEAN'] = UD/5
data.loc[nn, semester + '_ES_MEAN'] = ES/5
elif e == 22:
data.loc[nn, semester + '_TOTAL'] = total_full
data.loc[nn, semester + '_PCT_TOTAL'] = total_full/(11)
data.loc[nn, semester + '_GR_MEAN'] = GR/4
data.loc[nn, semester + '_AR_MEAN'] = AR/9
data.loc[nn, semester + '_PR_MEAN'] = PR/8
data.loc[nn, semester + '_SP_MEAN'] = SP/3
data.loc[nn, semester + '_TR_MEAN'] = TR/3
data.loc[nn, semester + '_ES_MEAN'] = ES/5
#lacks number of questions for meaningful subscore
#1 q
data.loc[nn, semester + '_UD_MEAN'] = np.nan
data.loc[nn, semester + '_UD_TOTAL'] = np.nan
#2 qs
data.loc[nn, semester + '_PC_MEAN'] = np.nan
data.loc[nn, semester + '_PC_TOTAL'] = np.nan
#1 q
data.loc[nn, semester + '_AV_MEAN'] = np.nan
data.loc[nn, semester + '_AV_TOTAL'] = np.nan
elif e == 30:
data.loc[nn, semester + '_TOTAL'] = total_full
data.loc[nn, semester + '_PCT_TOTAL'] = total_full/(15)
data.loc[nn, semester + '_GR_MEAN'] = GR/4
data.loc[nn, semester + '_AR_MEAN'] = AR/13
data.loc[nn, semester + '_PR_MEAN'] = PR/11
data.loc[nn, semester + '_SP_MEAN'] = SP/3
data.loc[nn, semester + '_TR_MEAN'] = TR/3
data.loc[nn, semester + '_AV_MEAN'] = AV/4
data.loc[nn, semester + '_ES_MEAN'] = ES/5
#lacks number of questions for meaningful subscore
#1 q
data.loc[nn, semester + '_UD_MEAN'] = np.nan
data.loc[nn, semester + '_UD_TOTAL'] = np.nan
#2 qs
data.loc[nn, semester + '_PC_MEAN'] = np.nan
data.loc[nn, semester + '_PC_TOTAL'] = np.nan
elif e == 28:
data.loc[nn, semester + '_TOTAL'] = total_full
data.loc[nn, semester + '_PCT_TOTAL'] = total_full/(14)
data.loc[nn, semester + '_GR_MEAN'] = GR/4
data.loc[nn, semester + '_AR_MEAN'] = AR/13
data.loc[nn, semester + '_PR_MEAN'] = PR/9
data.loc[nn, semester + '_PC_MEAN'] = PC/3
data.loc[nn, semester + '_SP_MEAN'] = SP/7
data.loc[nn, semester + '_UD_MEAN'] = UD/5
data.loc[nn, semester + '_ES_MEAN'] = ES/3
#lacks number of questions for meaningful subscore
#2 q
data.loc[nn, semester + '_TR_MEAN'] = np.nan
data.loc[nn, semester + '_TR_TOTAL'] = np.nan
#1 q
data.loc[nn, semester + '_AV_MEAN'] = np.nan
data.loc[nn, semester + '_AV_TOTAL'] = np.nan
data[semester + '_CF_TOTAL'] = np.nan
data[semester + '_CF_TOTAL_CORR'] = np.nan
data[semester + '_CF_TOTAL_INCORR'] = np.nan
data[semester + '_CF_MEAN'] = np.nan
data[semester + '_CF_MEAN_CORR'] = np.nan
data[semester + '_CF_MEAN_INCORR'] = np.nan
# Calculates confidence totals and means; adds to respective columns
for u in range(h):
qcf = {'15': 0, '12':0, '14':0, '26':0, '27':0, '23':0, '28':0, '19':0, '3':0, '16':0, '13':0, '31':0, '32':0, '29':0, '30':0, '5':0, '6':0, '7':0, '10':0, '11':0,'20':0, '21':0, '33':0, '34':0, '35':0}
qc = {'15': 0, '12':0, '14':0, '26':0, '27':0, '23':0, '28':0, '19':0, '3':0, '16':0, '13':0, '31':0, '32':0, '29':0, '30':0, '5':0, '6':0, '7':0, '10':0, '11':0,'20':0, '21':0, '33':0, '34':0, '35':0}
for q_num in qcf:
try:
qcf[q_num] = int(data.loc[u, question + str(q_num) + "CF"])
qc[q_num] = int(data.loc[u, question + str(q_num) + '_SCORE'])
except:
pass
medscore = 0
corrscore = 0
incorrscore = 0
confcount = 0
for item in qcf:
medscore += qcf[item]
if qcf[item] > 0:
confcount +=1
if qc[item] == 1:
corrscore += qcf[item]
else:
incorrscore += qcf[item]
#print(confcount)
if (confcount == 0):
confcount = 1
# Student's score
numcorr = data.loc[u, semester + '_TOTAL']
# Calculate confidence scores
if e == 30:
data.loc[u, semester + '_CF_TOTAL'] = medscore
data.loc[u, semester + '_CF_TOTAL_CORR'] = corrscore
data.loc[u, semester + '_CF_TOTAL_INCORR'] = incorrscore
data.loc[u, semester + '_CF_MEAN'] = medscore/confcount
if numcorr != 0:
data.loc[u, semester + '_CF_MEAN_CORR'] = corrscore/numcorr
else:
data.loc[u, semester + '_CF_MEAN_CORR'] = 0
if numcorr != confcount:
data.loc[u, semester + '_CF_MEAN_INCORR'] = incorrscore/(confcount-numcorr)
else:
data.loc[u, semester + '_CF_MEAN_INCORR'] = 0
elif e == 22:
data.loc[u, semester + '_CF_TOTAL'] = medscore
data.loc[u, semester + '_CF_TOTAL_CORR'] = np.nan
data.loc[u, semester + '_CF_TOTAL_INCORR'] = incorrscore
data.loc[u, semester + '_CF_MEAN'] = medscore/confcount
if numcorr != 0:
data.loc[u, semester + '_CF_MEAN_CORR'] = corrscore/numcorr
else:
data.loc[u, semester + '_CF_MEAN_CORR'] = 0
if numcorr != confcount:
data.loc[u, semester + '_CF_MEAN_INCORR'] = incorrscore/(confcount-numcorr)
else:
data.loc[u, semester + '_CF_MEAN_INCORR'] = 0
elif e == 28:
data.loc[u, semester + '_CF_TOTAL'] = medscore
data.loc[u, semester + '_CF_TOTAL_CORR'] = np.nan
data.loc[u, semester + '_CF_TOTAL_INCORR'] = incorrscore
data.loc[u, semester + '_CF_MEAN'] = medscore/confcount
if numcorr != 0:
data.loc[u, semester + '_CF_MEAN_CORR'] = corrscore/numcorr
else:
data.loc[u, semester + '_CF_MEAN_CORR'] = 0
if numcorr != confcount:
data.loc[u, semester + '_CF_MEAN_INCORR'] = incorrscore/(confcount-numcorr)
else:
data.loc[u, semester + '_CF_MEAN_INCORR'] = 0
elif e == 50:
data.loc[u, semester + '_CF_TOTAL'] = medscore
data.loc[u, semester + '_CF_TOTAL_CORR'] = corrscore
data.loc[u, semester + '_CF_TOTAL_INCORR'] = incorrscore
data.loc[u, semester + '_CF_MEAN'] = medscore/confcount
if numcorr != 0:
data.loc[u, semester + '_CF_MEAN_CORR'] = corrscore/numcorr
else:
data.loc[u, semester + '_CF_MEAN_CORR'] = 0
if numcorr != confcount:
data.loc[u, semester + '_CF_MEAN_INCORR'] = incorrscore/(confcount-numcorr)
else:
data.loc[u, semester + '_CF_MEAN_INCORR'] = 0
data[semester + '_QCOMPLETE'] = 0
data[semester + '_COMPFLAG'] = 0
data[semester +'_EFFFLAG'] = 0
# Counts number of completed columns
try:
if e == 50:
q = [15, 12, 14, 26, 27, 23, 28, 19, 3, 16, 13, 31, 32, 29, 30, 5, 6, 7, 10, 11, 20, 21, 33, 34, 35]
elif e == 22:
q = [15, 12, 13, 14, 26, 27, 23, 28, 19, 3, 16]
elif e == 30:
q = [15, 12, 13, 14, 26, 27, 23, 28, 19, 3, 16, 10, 11, 33, 34]
elif e == 28:
q = [6, 7, 13, 14, 16, 20, 21, 23, 27, 28, 29, 30, 31, 35]
for v in range(h):
# Count up totals
total = 0
for w in q:
count = question + str(w)
answered = data.loc[v, count]
if (str(answered) == 'nan' or str(answered) == ' '):
continue
else:
total = int(np.nansum([total, 1]))
data.loc[v, semester + '_QCOMPLETE'] = total
# Add completed flag
if total == len(q):
data.loc[v, semester + '_COMPFLAG'] = 1
else:
data.loc[v, semester + '_COMPFLAG'] = 0
except:
KeyError
# Calculating effort column
for v in range(h):
# If there is no response for effort, mark completion as 0 for that student!
if ( | pd.isnull(data.loc[v, semester + '_EFFORT']) | pandas.isnull |
import pandas as pd
import numpy as np
import re
import datetime as dt
import math
import geopandas as gpd
import h3 # h3 bins from uber
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler, minmax_scale, MinMaxScaler
from sklearn.cluster import KMeans
from sklearn.cluster import MeanShift
from sklearn.cluster import AgglomerativeClustering
from sklearn.neighbors import NearestCentroid
from sklearn.decomposition import PCA
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
import scipy.cluster.hierarchy as sch
import holidays
from fastai.vision.all import * # Needs latest version, and sometimes a restart of the runtime after the pip installs
from sklearn_extra.cluster import KMedoids
import json
from geopy.distance import geodesic
def clean_weather_data(df_weather):
'''
Fills the missing information by looking at the previous and the following existing values
and then incrementaly distributing the difference over the missing days.
This guarantees a smooth development of the weather data over time.
'''
missing = df_weather[pd.isnull(df_weather).any(1)].index
if len(missing) > 0:
for col in df_weather.columns[1:]:
before = df_weather.loc[missing[0]-1, col]
after = df_weather.loc[missing[-1]+1, col]
diff = after - before
for i in range(len(missing)):
df_weather.loc[missing[i], col] = before+diff/(len(missing)+1)*(i+1)
return df_weather
def add_weather_change(df_weather):
df_weather["change_water_atmosphere"] = 0
df_weather["change_temperature"] = 0
for row in range(df_weather.shape[0]):
if row == 0:
df_weather.loc[row, "change_water_atmosphere"] = 0
df_weather.loc[row, "change_temperature"] = 0
else:
df_weather.loc[row, "change_water_atmosphere"] = df_weather.loc[row, "precipitable_water_entire_atmosphere"] - df_weather.loc[row-1, "precipitable_water_entire_atmosphere"]
df_weather.loc[row, "change_temperature"] = df_weather.loc[row, "temperature_2m_above_ground"] - df_weather.loc[row-1, "temperature_2m_above_ground"]
return df_weather
def join_accident_to_weather(df_accident, df_weather):
'''
Left-joins the accident data to the weather data, resulting in a dataframe containing the weather information
for every day as well as the aggregated accidents.
'''
# Count accidents per day and leftjoin to weather dataframe
df_accident["date"] = df_accident["datetime"].apply(lambda x: x.date())
if type(df_weather.loc[0, "Date"]) is not dt.date:
df_weather["Date"] = df_weather["Date"].apply(lambda x: x.date())
accident_count = df_accident.groupby("date").count()["uid"].reset_index()
df_combined = df_weather.merge(accident_count[["date", "uid"]], left_on="Date", right_on="date", how='left')
# Fill NaNs with zeros
df_combined.fillna(value=0, inplace=True)
# Drop duplicate Date column
df_combined.drop("date", axis=1, inplace=True)
# Rename column
df_combined.rename(columns={"Date":"date", "uid":"accidents"}, inplace=True)
# Adding column with 1 for sundays and holidays, 0 for working days
df_combined["sun_holiday"] = df_combined["date"].apply(lambda x: 1 if (x.weekday() == 6) or (x in holidays.Kenya()) else 0)
# Change type to integer
df_combined["accidents"] = df_combined["accidents"].astype("int")
return df_combined
def scale_pca_weather(df_combined):
'''
Scaling and analysing the principal components of the weather data.
'''
# Scaling
mm_scaler = MinMaxScaler()
X_mm = df_combined[["precipitable_water_entire_atmosphere", "relative_humidity_2m_above_ground",
"specific_humidity_2m_above_ground", "temperature_2m_above_ground"]]
X_mm_scaled = mm_scaler.fit_transform(X_mm)
std_scaler = StandardScaler()
X_std = df_combined[["u_component_of_wind_10m_above_ground", "v_component_of_wind_10m_above_ground",
"change_water_atmosphere", "change_temperature"]]
X_std_scaled = std_scaler.fit_transform(X_std)
X_scaled = pd.DataFrame(np.concatenate((X_mm_scaled, X_std_scaled), axis=1), columns=["precipitable_water_entire_atmosphere", "relative_humidity_2m_above_ground",
"specific_humidity_2m_above_ground", "temperature_2m_above_ground", "u_component_of_wind_10m_above_ground", "v_component_of_wind_10m_above_ground",
"change_water_atmosphere", "change_temperature"])
# Principal componant analysis (PCA)
pca = PCA(n_components=0.99)
pca_decomposition = pca.fit(X_scaled)
X_pca = pca_decomposition.transform(X_scaled)
df_combined_pca = pd.DataFrame(X_pca)
df_combined_pca = df_combined_pca.join(df_combined[["date", "accidents", "sun_holiday"]])
return df_combined_pca
def split_combined(df_combined_pca, predict_period='2019_h2'):
if predict_period == '2019_h1':
X_train = df_combined_pca[df_combined_pca["date"] < dt.date(2019, 1, 1)][[0, 1, 2, 3, 4, "sun_holiday"]]
y_train = df_combined_pca[df_combined_pca["date"] < dt.date(2019, 1, 1)]["accidents"]
X_test = df_combined_pca[(df_combined_pca["date"] >= dt.date(2019, 1, 1)) & (df_combined_pca["date"] < dt.date(2019, 7, 1))][[0, 1, 2, 3, 4, "sun_holiday"]]
elif predict_period == '2019_h2':
X_train = df_combined_pca[df_combined_pca["date"] < dt.date(2019, 7, 1)][[0, 1, 2, 3, 4, "sun_holiday"]]
y_train = df_combined_pca[df_combined_pca["date"] < dt.date(2019, 7, 1)]["accidents"]
X_test = df_combined_pca[(df_combined_pca["date"] >= dt.date(2019, 7, 1)) & (df_combined_pca["date"] < dt.date(2020, 1, 1))][[0, 1, 2, 3, 4, "sun_holiday"]]
return X_train, X_test, y_train
def predict_poly(X_train, X_test, y_train):
poly = PolynomialFeatures(degree=4)
X_train_poly = poly.fit_transform(X_train.drop("sun_holiday", axis=1))
lin_poly = LinearRegression()
lin_poly.fit(X_train_poly, y_train)
X_test_poly = poly.transform(X_test.drop("sun_holiday", axis=1))
return lin_poly.predict(X_test_poly)
def predict_accidents_on_weather(df_accident, df_weather, predict_period='2019_h1'):
'''
Takes the raw data and returns the number of predicted road traffic accidents for every day in the predict period:
First half 2019 : (predict_period='2019_h1')
Second half of 2019 : (predict_period='2019_h2')
'''
df_weather = clean_weather_data(df_weather)
df_weather = add_weather_change(df_weather)
df_combined = join_accident_to_weather(df_accident, df_weather)
df_combined_pca = scale_pca_weather(df_combined)
X_train, X_test, y_train = split_combined(df_combined_pca, predict_period=predict_period)
y_pred = predict_poly(X_train, X_test, y_train)
y_pred = [0 if i < 0 else i for i in y_pred]
return y_pred
def create_crash_df(train_file = '../Inputs/Train.csv'):
'''
loads crash data from input folder into dataframe
'''
crash_df = pd.read_csv(train_file, parse_dates=['datetime'])
return crash_df
def create_temporal_features(df, date_column='datetime'):
'''
Add the set of temporal features the the df based on the datetime column. Returns the dataframe.
'''
dict_windows = {1: "00-03", 2: "03-06", 3: "06-09", 4: "09-12", 5: "12-15",
6: "15-18", 7: "18-21", 8: "21-24"}
dict_months = {1: "Jan", 2: "Feb", 3: "Mar", 4: "Apr", 5: "May", 6: "Jun",
7: "Jul", 8: "Aug", 9: "Sep", 10: "Oct", 11: "Nov", 12: "Dec"}
rainy_season = ["Mar", "Apr", "May", "Oct", "Nov", "Dec"]
df["time"] = df[date_column].apply(lambda x: x.time())
df["time_window"] = df[date_column].apply(lambda x: math.floor(x.hour / 3) + 1)
df["time_window_str"] = df["time_window"].apply(lambda x: dict_windows.get(x))
df["day"] = df[date_column].apply(lambda x: x.day)
df["weekday"] = df[date_column].apply(lambda x: x.weekday())
df["month"] = df[date_column].apply(lambda x: dict_months.get(x.month))
df["half_year"] = df[date_column].apply(lambda x: 1 if x.month<7 else 2)
df["rainy_season"] = df["month"].apply(lambda x: 1 if (x in rainy_season) else 0)
df["year"] = df[date_column].apply(lambda x: x.year)
df["date_trunc"] = df[date_column].apply(lambda x: x.date()) #this does something strange that breaks the code if higher
df["holiday"] = df["date_trunc"].apply(lambda x: 1 if (x in holidays.Kenya()) else 0)
df["weekday"] = df["date_trunc"].apply(lambda x: 7 if (x in holidays.Kenya()) else x.weekday())
return df
def drop_temporal(df):
'''
helper function to remove all the granular temporal columns once they have been used for generating other columns for joining.
'''
df = df.drop(["day", "time_window", "time_window_str", "time_window_str", "month", "year", "weekday", "rainy_season", "date_trunc", "time", "half_year", "holiday"], axis=1)
return df
def split_accident_df(data, strategy, test_size=0.3, random_state=42):
'''
Splits the data set into a train and a test set.
strategy:
random = splits off random indices, using test_size and random_state parameters
year_2019 = splits the days of 2019 off into a test set
percentage_month = splits off the last days of every month to the test set according to the test_size
2nd_half_2018 = oversamples the months from July to December 2018 by about 33%
'''
if strategy == "random":
data = data.sample(frac=1, random_state=random_state).reset_index().drop("index", axis=1)
split_at = round(data.shape[0] * test_size)
data_train = data.iloc[split_at:, :]
data_test = data.iloc[:split_at, :]
elif strategy == "year_2019":
data_train = data[data["datetime"] < "2019-01-01"]
data_test = data[data["datetime"] >= "2019-01-01"]
elif strategy == "percentage_month":
split_at = round(30 * (1-test_size))
data_train = data.loc[data["day"] <= split_at]
data_test = data.loc[data["day"] > split_at]
elif strategy == "2nd_half_2018":
train_samples = round(data.shape[0] * (1-test_size))
test_samples = round(data.shape[0] * test_size)
data_train = data.sample(n=train_samples, weights="half_year", random_state=random_state)
data_test = data.sample(n=test_samples, weights="half_year", random_state=random_state)
return data_train, data_test
def outlier_removal(crash_df, filter=0.00):
if filter == 'hex_bin':
crash_df = assign_hex_bin(crash_df)
hex_bin_filter = ['867a45067ffffff', '867a45077ffffff', '867a4511fffffff',
'867a4512fffffff', '867a45147ffffff', '867a4515fffffff',
'867a45177ffffff', '867a45817ffffff', '867a4584fffffff',
'867a4585fffffff', '867a458dfffffff', '867a458f7ffffff',
'867a45a8fffffff', '867a45b0fffffff', '867a45b17ffffff',
'867a45b67ffffff', '867a45b77ffffff', '867a6141fffffff',
'867a614d7ffffff', '867a616b7ffffff', '867a6304fffffff',
'867a632a7ffffff', '867a63307ffffff', '867a6331fffffff',
'867a6360fffffff', '867a63667ffffff', '867a6396fffffff',
'867a656c7ffffff', '867a65797ffffff', '867a6e18fffffff',
'867a6e1b7ffffff', '867a6e4c7ffffff', '867a6e517ffffff',
'867a6e59fffffff', '867a6e5a7ffffff', '867a6e5b7ffffff',
'867a6e657ffffff', '867a6e737ffffff', '867a6e797ffffff',
'867a6e79fffffff', '867a6e7b7ffffff', '867a6ecf7ffffff',
'867a6ed47ffffff', '867a6ed97ffffff', '867a6eda7ffffff' ]
crash_df = crash_df.loc[~crash_df['h3_zone_6'].isin(hex_bin_filter)]
else:
'''filters top and bottom quantiles of data based on filter input'''
crash_df = crash_df.loc[crash_df['latitude'] < crash_df['latitude'].quantile(1-filter)]
crash_df = crash_df.loc[crash_df['latitude'] > crash_df['latitude'].quantile(filter)]
crash_df = crash_df.loc[crash_df['longitude'] < crash_df['longitude'].quantile(1-filter)]
crash_df = crash_df.loc[crash_df['longitude'] > crash_df['longitude'].quantile(filter)]
return crash_df
def assign_hex_bin(df,lat_column="latitude",lon_column="longitude", hexbin_resolution=6):
'''
Takes lat,lon and creates column with h3 bin name for three levels of granualirity.
'''
df["h3_zone_{}".format(hexbin_resolution)] = df.apply(lambda x: h3.geo_to_h3(x[lat_column], x[lon_column], hexbin_resolution),axis=1)
return df
def plot_centroids(crash_data_df, centroids, cluster='cluster', labels = 'b'):
'''
plots the crash data points from crash_data_df and overlays the ambulance location from centroids.
Can be used in a loop by giving 'cluster' value as a parameter to label the chart with the cluster name.
'''
fig, axs = plt.subplots(figsize=(8, 5))
plt.scatter(x = crash_data_df['longitude'], y=crash_data_df['latitude'], s=1, label='Crash locations', c=labels )
plt.scatter(x = centroids[:,1], y=centroids[:,0], marker="x",
color='r',label='Ambulances locations',s=100)
axs.set_title('Scatter plot : Ambulaces locations vs Crash locations :'+cluster)
plt.xlabel("latitude")
plt.ylabel("longitude")
plt.legend()
plt.show()
def plot_dendrogram(df):
'''Use Dendrogram to determine an optimal number of clusters'''
plt.figure(figsize=(45,18))
plt.title('Androgram')
plt.xlabel('time_buckets_days')
plt.ylabel('Euclidean distances')
dendrogram = sch.dendrogram(sch.linkage(df, method = 'ward'))
plt.show()
def calculate_TW_cluster(crash_df, method='MeanShift', verbose=0):
'''
Takes crash dataframe with temporal features added as input
Function to perform clustering of time windows and assign labels back to crash dataframe.
Output is dataframe with additional column for labels
If verbosity is increased, information about the clusters to printed.
'''
group_stats = crash_df.groupby(['time_window_str', 'weekday'])
group_stats = group_stats.agg({'latitude': [np.mean, np.std],'longitude': [np.mean, np.std, 'count']})
# flatten out groupby object and name columns again
group_stats = group_stats.reset_index()
group_stats.columns = group_stats.columns.get_level_values(0)
group_stats.columns.values[[2,3,4,5,6]] = ['latitude_mean', 'latitude_std',
'longitude_mean', 'longitude_std', 'RTA_count']
X = group_stats.loc[:,['RTA_count']]#, 'latitude_mean', 'latitude_std','longitude_mean', 'longitude_std']]
scaler = StandardScaler()
scale_columns = ['latitude_mean', 'latitude_std','longitude_mean', 'longitude_std']
#X[scale_columns] = scaler.fit_transform(X[scale_columns])
if verbose > 5:
X1 = X.copy()
X1['RTA_count'] = minmax_scale(X1['RTA_count'])
plot_dendrogram(X1)
if method == 'MeanShift':
#X['RTA_count'] = minmax_scale(X['RTA_count'])
ms_model = MeanShift().fit(X)
labels = ms_model.labels_
elif method == 'GMM':
X['RTA_count'] = minmax_scale(X['RTA_count'])
gmm = GaussianMixture(n_components=4, verbose=verbose, random_state=42)
gmm.fit(X)
labels = gmm.predict(X)
else:
display('Select method "MeanShift" or "GMM"')
#return 'error'
labels = pd.DataFrame(labels,columns=['cluster'])
clustered_time_buckets = pd.concat([group_stats,labels], axis=1)
if verbose > 0:
display(clustered_time_buckets.groupby('cluster').agg({'RTA_count': ['count', np.sum]}))
if verbose > 1:
plot_TW_cluster(clustered_time_buckets)
crash_df = crash_df.merge(clustered_time_buckets[['time_window_str', 'weekday','cluster']],
how='left', on=['time_window_str', 'weekday'])
return crash_df
def plot_TW_cluster(clustered_time_buckets):
'''
Displays stripplot to show how different times of the week are assigned to TW clusters.
'''
tb_clusters = sns.FacetGrid(clustered_time_buckets,hue='cluster', height=5)
tb_clusters.map(sns.stripplot,'weekday', 'time_window_str', s=25, order = ['00-03', '03-06', '06-09', '09-12',
'12-15', '15-18', '18-21', '21-24'])
def assign_TW_cluster(weekday, time_window, holiday=0, strategy='baseline'):
'''
Can be used in a lambda function to return the time window cluster for a given day and time window.
e.g. crash_df["cluster"] = crash_df.apply(lambda x: return_TW_cluster(x.weekday, x.time_window_str) ,axis=1)
This is called by the function: create_cluster_feature.
'''
# baseline returns a single value for all time windows so there will only be a single placement set
if strategy == 'baseline':
return 'baseline'
# mean_shift_modified uses the results of the mean shift clustering
# and applies human approach to create 3 simple clusters
if strategy == 'mean_shift_modified':
if weekday == 7:
return 'off_peak'
elif weekday == 6:
return 'off_peak'
elif weekday in [0,1,2,3,4]:
if time_window in ["06-09"]:
return 'peak'
elif time_window in ["09-12", "12-15", "15-18", "18-21"]:
return 'middle'
elif time_window in ["00-03", "03-06", "21-24"]:
return 'off_peak'
elif weekday == 5:
if time_window in ["06-09", "12-15", "15-18", "18-21"]:
return 'middle'
elif time_window in ["00-03", "03-06", "21-24"]:
return 'off_peak'
elif time_window in ["09-12"]:
return 'peak'
# saturday_2 adds an additional cluster for middle of the day saturday
elif strategy == 'saturday_2':
if weekday == 7:
return 'off_peak'
elif weekday == 6:
return 'off_peak'
elif weekday in [0,1,2,3,4]:
if time_window in ["06-09"]:
return 'peak'
elif time_window in ["09-12", "12-15", "15-18", "18-21"]:
return 'middle'
elif time_window in ["00-03", "03-06", "21-24"]:
return 'off_peak'
elif weekday == 5:
if time_window in ["06-09", "12-15", "15-18", "18-21"]:
return 'saturday_busy'
elif time_window in ["00-03", "03-06", "21-24"]:
return 'off_peak'
elif time_window in ["09-12"]:
return 'saturday_busy'
# holiday_6 builds on saturday_2 and adds a new 'day' to the week for holidays
# and a separate cluster for sundays. Total of 6 clusters
elif strategy == 'holiday_6':
if weekday == 7:
return 'holiday'
elif weekday == 6:
return 'sunday'
elif weekday in [0,1,2,3,4]:
if time_window in ["06-09"]:
return 'peak'
elif time_window in ["09-12", "12-15", "15-18", "18-21"]:
return 'middle'
elif time_window in ["00-03", "03-06", "21-24"]:
return 'off_peak'
elif weekday == 5:
if time_window in ["06-09", "12-15", "15-18", "18-21"]:
return 'saturday_busy'
elif time_window in ["00-03", "03-06", "21-24"]:
return 'off_peak'
elif time_window in ["09-12"]:
return 'saturday_busy'
# has holidays but uses off peak for holidays and sundays
elif strategy == 'holiday_simple':
if weekday == 7:
return 'off_peak_day'
elif weekday == 6:
return 'off_peak_day'
elif weekday in [0,1,2,3,4]:
if time_window in ["06-09"]:
return 'peak'
elif time_window in ["09-12", "12-15", "15-18", "18-21"]:
return 'middle'
elif time_window in ["00-03", "03-06", "21-24"]:
return 'off_peak'
elif weekday == 5:
if time_window in ["06-09", "12-15", "15-18", "18-21"]:
return 'saturday_busy'
elif time_window in ["00-03", "03-06", "21-24"]:
return 'off_peak'
elif time_window in ["09-12"]:
return 'saturday_busy'
# has holidays but uses off peak for holidays and sundays
elif strategy == 'off_peak_split':
if weekday == 7:
if time_window in ["06-09", "09-12", "12-15", "15-18", "18-21"]:
return 'sunday_busy'
elif time_window in ["00-03", "03-06", "21-24"]:
return 'off_peak'
elif weekday == 6:
if time_window in ["06-09", "09-12", "12-15", "15-18", "18-21"]:
return 'sunday_busy'
elif time_window in ["00-03", "03-06", "21-24"]:
return 'off_peak'
elif weekday in [0,1,2,3,4]:
if time_window in ["06-09"]:
return 'peak'
elif time_window in ["09-12", "12-15", "15-18", "18-21"]:
return 'middle'
elif time_window in ["00-03", "03-06", "21-24"]:
return 'off_peak'
elif weekday == 5:
if time_window in ["06-09", "12-15", "15-18", "18-21"]:
return 'saturday_busy'
elif time_window in ["00-03", "03-06", "21-24"]:
return 'off_peak'
elif time_window in ["09-12"]:
return 'saturday_busy'
# Weekend_day is an attempt to improve based on the loss scores of the model
elif strategy == 'weekend_day':
if weekday == 7:
if time_window in ["06-09", "09-12", "12-15", "15-18", "18-21"]:
return 'weekend_busy'
elif time_window in ["00-03", "03-06", "21-24"]:
return 'off_peak'
elif weekday == 6:
if time_window in ["06-09", "09-12", "12-15", "15-18", "18-21"]:
return 'weekend_busy'
elif time_window in ["00-03", "03-06", "21-24"]:
return 'off_peak'
elif weekday in [0,1,2,3,4]:
if time_window in ["06-09"]:
return 'peak'
elif time_window in ["09-12", "12-15", "15-18", "18-21"]:
return 'busy'
elif time_window in ["00-03", "03-06", "21-24"]:
return 'off_peak'
elif weekday == 5:
if time_window in ["06-09", "12-15", "15-18", "18-21"]:
return 'weekend_busy'
elif time_window in ["00-03", "03-06", "21-24"]:
return 'off_peak'
elif time_window in ["09-12"]:
return 'weekend_busy'
# no_cluster returns a individual cluster name for each weekday, time window and holiday combination
elif strategy == 'no_cluster':
return (str(weekday)+str(time_window)+str(holiday))
elif strategy == 'time_window':
return time_window
def create_cluster_feature(crash_df, strategy='baseline', verbose=0):
'''
Function takes crash df and creates new column with tw cluster labels.
If verbose is increased, the time window clusters will be visualised.
'''
crash_df['cluster'] = crash_df.apply(lambda x: assign_TW_cluster(weekday=x.weekday, time_window=x.time_window_str, strategy=strategy) ,axis=1)
if verbose > 0:
print(f'{crash_df.cluster.nunique()} clusters created')
if verbose > 1:
tb_clusters = sns.FacetGrid(crash_df,hue='cluster', height=5)
tb_clusters.map(sns.stripplot,'weekday', 'time_window_str', s=20,
order = ['00-03', '03-06', '06-09', '09-12',
'12-15', '15-18', '18-21', '21-24'],
label = 'Time Window Clusters')
return crash_df
def create_cluster_centroids(crash_df_with_cluster, test_df, verbose=0, method='k_means', lr=3e-2, n_epochs=400, batch_size=50):
if method == 'k_means':
centroids_dict = create_k_means_centroids(crash_df_with_cluster, verbose=verbose)
elif method == 'agglomerative':
centroids_dict = create_AgglomerativeClustering_centroids(crash_df_with_cluster, verbose=verbose)
elif method == 'gradient_descent':
centroids_dict = create_gradient_descent_centroids(crash_df_with_cluster, test_df, verbose=verbose,
lr=lr, n_epochs=n_epochs, batch_size=batch_size)
elif method == 'k_medoids':
centroids_dict = create_k_medoids_centroids(crash_df_with_cluster, verbose=verbose)
elif method == 'baseline':
centroids_dict = create_baseline_centroids(crash_df_with_cluster, verbose=verbose)
if verbose > 0:
print(f'{len(centroids_dict)} placement sets created')
return centroids_dict
def create_baseline_centroids(crash_df_with_cluster, verbose=0):
if verbose > 0:
print('using star grid for placement')
centroids_dict = {}
for i in crash_df_with_cluster.cluster.unique():
data_slice = crash_df_with_cluster.query('cluster==@i')
lat_centroid = list(data_slice.latitude.quantile(q=[1/5,2/5,3/5,4/5]))
lon_centroid = list(data_slice.longitude.quantile(q=[1/4,2/4,3/4]))
centroids=[(lat_centroid[1],lon_centroid[0]),(lat_centroid[2],lon_centroid[0]),
(lat_centroid[0],lon_centroid[1]),(lat_centroid[3],lon_centroid[1]),
(lat_centroid[1],lon_centroid[2]),(lat_centroid[2],lon_centroid[2])]
centroids_dict[i] = np.array(centroids).flatten()
if verbose > 5:
print(centroids)
return centroids_dict
def create_k_means_centroids(crash_df_with_cluster, verbose=0):
if verbose > 0:
print('using k-means clustering')
centroids_dict = {}
for i in crash_df_with_cluster.cluster.unique():
data_slice = crash_df_with_cluster.query('cluster==@i')
kmeans = KMeans(n_clusters=6, verbose=0, tol=1e-5, max_iter=500, n_init=20 ,random_state=42)
kmeans.fit(data_slice[['latitude','longitude']])
labels = kmeans.labels_
centroids = kmeans.cluster_centers_
centroids_dict[i] = centroids.flatten()
if verbose > 2:
plot_centroids(data_slice, centroids, cluster=i, labels=labels)
if verbose > 5:
print(centroids)
return centroids_dict
def create_k_medoids_centroids(crash_df_with_cluster, verbose=0):
if verbose > 0:
print('using k-medoids clustering')
centroids_dict = {}
for i in crash_df_with_cluster.cluster.unique():
data_slice = crash_df_with_cluster.query('cluster==@i')
kmedoids = KMedoids(n_clusters=6, init='k-medoids++', max_iter=500, random_state=42)
kmedoids.fit(data_slice[['latitude','longitude']])
labels = kmedoids.labels_
centroids = kmedoids.cluster_centers_
centroids_dict[i] = centroids.flatten()
if verbose > 2:
plot_centroids(data_slice, centroids, cluster=i, labels=labels)
if verbose > 5:
print(centroids)
return centroids_dict
def create_AgglomerativeClustering_centroids(crash_df_with_cluster, verbose=0):
if verbose > 0:
print('using agglomerative clustering')
centroids_dict = {}
for i in crash_df_with_cluster.cluster.unique():
data_slice = crash_df_with_cluster.query('cluster==@i')
hc = AgglomerativeClustering(n_clusters = 6, affinity = 'euclidean', linkage = 'ward')
y_predict = hc.fit_predict(data_slice[['latitude','longitude']])
clf = NearestCentroid()
clf.fit(data_slice[['latitude','longitude']], y_predict)
centroids = clf.centroids_
centroids_dict[i] = centroids.flatten()
if verbose > 5:
plot_centroids(data_slice, centroids, cluster=i)
if verbose > 14:
print(centroids)
return centroids_dict
def create_gradient_descent_centroids(crash_df_with_cluster, test_df, verbose=0, lr=3e-3, n_epochs=400, batch_size=50):
if verbose > 0:
print('using gradient descent clustering')
# Copy dataframes and standardize lat lon values on train and test set
scaler = StandardScaler()
crash_df_scaled = crash_df_with_cluster.copy()
test_df_scaled = test_df.copy()
crash_df_scaled[['latitude', 'longitude']] = scaler.fit_transform(crash_df_scaled[['latitude', 'longitude']])
test_df_scaled[['latitude', 'longitude']] = scaler.transform(test_df_scaled[['latitude', 'longitude']])
centroids_dict = {}
for i in crash_df_scaled.cluster.unique():
data_slice = crash_df_scaled.query('cluster==@i')
test_slice = test_df_scaled.query('cluster==@i')
train_locs = tensor(data_slice[['latitude', 'longitude']].values) # To Tensor
val_locs = tensor(test_slice[['latitude', 'longitude']].values) # To Tensor
# Load crash locs from train into a dataloader
batches = DataLoader(train_locs, batch_size=batch_size, shuffle=True)
# Set up ambulance locations
amb_locs = torch.randn(6, 2)*.5
amb_locs = amb_locs + tensor(data_slice.latitude.mean(), data_slice.longitude.mean())
amb_locs.requires_grad_()
# Set vars
lr=lr
n_epochs = n_epochs
# Store loss over time
train_losses = []
val_losses = []
# Training loop
for epoch in range(n_epochs):
# Run through batches
for crashes in batches:
loss = loss_fn(crashes, amb_locs) # Find loss for this batch of crashes
loss.backward() # Calc grads
amb_locs.data -= lr * amb_locs.grad.data # Update locs
amb_locs.grad = None # Reset gradients for next step
train_losses.append(loss.item())
if verbose > 2:
val_loss = loss_fn(val_locs, amb_locs)
val_losses.append(val_loss.item()) # Can remove as this slows things down
if verbose > 2 and epoch % 100 == 0: # show progress
print(f'Val loss for cluster {i}: {val_loss.item()}')
centroids = amb_locs.detach().numpy()
#show output
if verbose > 5:
plot_centroids(data_slice, centroids, cluster=i)
if verbose > 14:
print(centroids)
if verbose > 9:
plt.figure(num=None, figsize=(16, 10), dpi=80, facecolor='w', edgecolor='k')
plt.plot(train_losses, label='train_loss')
plt.plot(val_losses, c='red', label='val loss')
plt.legend()
#scale back to lat lon
centroids = scaler.inverse_transform(centroids)
centroids_dict[i] = centroids.flatten()
return centroids_dict
def loss_fn(crash_locs, amb_locs):
"""
Used for gradient descent model.
For each crash we find the dist to the closest ambulance, and return the mean of these dists.
"""
# Dists to first ambulance
dists_split = crash_locs - amb_locs[0]
dists = (dists_split[:,0]**2 + dists_split[:,1]**2)**0.5
min_dists = dists
for i in range(1, 6):
# Update dists so they represent the dist to the closest ambulance
dists_split = crash_locs-amb_locs[i]
dists = (dists_split[:,0]**2 + dists_split[:,1]**2)**0.5
min_dists = torch.min(min_dists, dists)
return min_dists.mean()
def centroid_to_submission(centroids_dict, date_start='2019-07-01', date_end='2020-01-01', tw_cluster_strategy='baseline', verbose=0):
'''Takes dictionary of clusters and centroids and creates a data frame in the format needed for submission'''
# Create Date range covering submission period set
dates = pd.date_range(date_start, date_end, freq='3h')
submission_df = pd.DataFrame({'date':dates})
submission_df = create_temporal_features(submission_df,'date')
submission_df["cluster"] = submission_df.apply(lambda x: assign_TW_cluster(x.weekday, x.time_window_str, strategy=tw_cluster_strategy) ,axis=1)
ambulance_columns = ['A0_Latitude', 'A0_Longitude', 'A1_Latitude','A1_Longitude', 'A2_Latitude', 'A2_Longitude',
'A3_Latitude', 'A3_Longitude', 'A4_Latitude', 'A4_Longitude', 'A5_Latitude', 'A5_Longitude']
for i in submission_df["cluster"].unique():
submission_df["placements"] = submission_df["cluster"].apply(lambda x: centroids_dict.get(x))
submission_df[ambulance_columns] = pd.DataFrame(submission_df.placements.tolist(), index=submission_df.index)
submission_df = submission_df.drop('placements', axis=1)
submission_df = drop_temporal(submission_df)
submission_df = submission_df.drop(["cluster"], axis=1)
if verbose > 0:
print('submission dataframe created')
return submission_df
def create_submission_csv(submission_df, crash_source, outlier_filter, tw_cluster_strategy, placement_method, path='../Outputs/', verbose=0):
'''Takes dataframe in submission format and outputs a csv file with matching name'''
# current_time = dt.datetime.now()
current_time = dt.datetime.now()
filename = f'{current_time.year}{current_time.month}{current_time.day}_{crash_source}_{outlier_filter}_{tw_cluster_strategy}_{placement_method}.csv'
submission_df.to_csv(path+filename,index=False)
if verbose > 0:
print(f'{filename} saved in {path}')
def score(train_placements_df, crash_df, test_start_date='2018-01-01', test_end_date='2019-12-31', verbose=0):
'''
Can be used to score the ambulance placements against a set of crashes.
Can be used on all crash data, train_df or holdout_df as crash_df.
'''
test_df = crash_df.loc[(crash_df.datetime >= test_start_date) & (crash_df.datetime <= test_end_date)]
if verbose > 0:
print(f'Data points in test period: {test_df.shape[0]}' )
total_distance = 0
for crash_date, c_lat, c_lon in test_df[['datetime', 'latitude', 'longitude']].values:
row = train_placements_df.loc[train_placements_df.date < crash_date].tail(1)
dists = []
for a in range(6):
dist = ((c_lat - row[f'A{a}_Latitude'].values[0])**2+(c_lon - row[f'A{a}_Longitude'].values[0])**2)**0.5
dists.append(dist)
total_distance += min(dists)
return total_distance
def import_uber_data():
'''Imports the hourly travel times from Uber movement data.
In addition, the hexlusters used by Uber in Nairobi are imported. '''
# Read the JSON file with the hexclusters
file = open('../Inputs/540_hexclusters.json',)
hexclusters = json.load(file)
file.close()
# Find the centroids of the hexbin clusters
cluster_id = []
longitude = []
latitude = []
for i in range(len(hexclusters['features'])):
coords = hexclusters['features'][i]['geometry']['coordinates'][0]
x = [long for long, lat in coords]
y = [lat for long, lat in coords]
x_c = sum(x) / len(x)
y_c = sum(y) / len(y)
cluster_id.append(hexclusters['features'][i]['properties']['MOVEMENT_ID'])
longitude.append(x_c)
latitude.append(y_c)
# Create DataFrame with hexcluster ids and the lat and long values of the centroids
global df_hexclusters
df_hexclusters = pd.DataFrame([cluster_id, longitude, latitude]).transpose()
df_hexclusters.columns = ['cluster_id', 'longitude', 'latitude']
df_hexclusters['cluster_id'] = df_hexclusters['cluster_id'].astype('int')
df_hexclusters = assign_hex_bin(df_hexclusters, 'latitude', 'longitude')
h3res = h3.h3_get_resolution(df_hexclusters.loc[0, 'h3_zone_6'])
# Read the travel times for weekdays
df_tt_hourly_wd = pd.read_csv('../Inputs/nairobi-hexclusters-2018-3-OnlyWeekdays-HourlyAggregate.csv')
# Add lat and long values to the tavel time data
global df_combined_wd
df_combined_wd = df_tt_hourly_wd.merge(df_hexclusters, how='left', left_on='sourceid', right_on='cluster_id')
df_combined_wd.drop(['cluster_id'], axis=1, inplace=True)
df_combined_wd = df_combined_wd.merge(df_hexclusters, how='left', left_on='dstid', right_on='cluster_id', suffixes=('_source', '_dst'))
df_combined_wd.drop(['cluster_id'], axis=1, inplace=True)
#df_combined_wd['dist_air'] = df_combined_wd[['latitude_source', 'longitude_source', 'latitude_dst', 'longitude_dst']].apply(lambda x: get_distance_air(x.latitude_source, x.longitude_source, x.latitude_dst, x.longitude_dst, h3res), axis=1)
#df_combined_wd['avg_speed'] = df_combined_wd['dist_air'] / df_combined_wd['mean_travel_time'] * 3600
# Get average speeds per hour
global avg_speeds_wd
#avg_speeds_wd = df_combined_wd.groupby('hod').mean()['avg_speed']
avg_speeds_wd = [32.309, 31.931, 33.079, 33.651, 32.329, 30.146, 25.374, 23.388, 24.028, 24.589, 23.937, 23.609,
23.485, 23.755, 23.506, 22.334, 20.371, 18.948, 20.007, 21.896, 25.091, 28.293, 29.963, 29.516]
# Read the travel times for weekends
df_tt_hourly_we = pd.read_csv('../Inputs/nairobi-hexclusters-2018-3-OnlyWeekends-HourlyAggregate.csv')
# Add lat and long values to the tavel time data
global df_combined_we
df_combined_we = df_tt_hourly_we.merge(df_hexclusters, how='left', left_on='sourceid', right_on='cluster_id')
df_combined_we.drop(['cluster_id'], axis=1, inplace=True)
df_combined_we = df_combined_we.merge(df_hexclusters, how='left', left_on='dstid', right_on='cluster_id', suffixes=('_source', '_dst'))
df_combined_we.drop(['cluster_id'], axis=1, inplace=True)
#df_combined_we['dist_air'] = df_combined_we[['latitude_source', 'longitude_source', 'latitude_dst', 'longitude_dst']].apply(lambda x: get_distance_air(x.latitude_source, x.longitude_source, x.latitude_dst, x.longitude_dst, h3res), axis=1)
#df_combined_we['avg_speed'] = df_combined_we['dist_air'] / df_combined_we['mean_travel_time'] * 3600
# Get average speeds per hour
global avg_speeds_we
#avg_speeds_we = df_combined_we.groupby('hod').mean()['avg_speed']
avg_speeds_we = [30.955, 31.295, 31.420, 31.653, 31.033, 30.927, 33.035, 31.679, 28.906, 26.834, 25.684, 24.879,
24.587, 23.887, 23.518, 24.960, 25.638, 26.112, 24.846, 23.837, 26.140, 28.012, 29.391, 29.532]
def get_metrics(coord_src, coord_dst, weekend, hour):
'''
Inputs:
* coord_src: H3 hexbin or coordinate as list or tuple of the origin of the trip
* coord_dst: H3 hexbin or coordinate as list or tuple of the destination of the trip
* weekend: 1 if weekend, 0 if weekday
* hour: Hour of the day as integer
Output: Returns a list with the five levels of metics:
* Zindi: Euklidean distance between latitude and longitude values (Zindi challenge)
* Air: Air distance in kilometers
* Road: Road distance in kilometers
* Time: Driving distance in minutes
* Golden: Binary value: False if driving distance below threshold ("Golden Hour"), True if above
'''
if type(coord_src) == str:
lat_src = h3.h3_to_geo(coord_src)[0]
long_src = h3.h3_to_geo(coord_src)[1]
h3res = h3.h3_get_resolution(coord_src)
elif type(coord_src) == list or tuple:
lat_src = coord_src[0]
long_src = coord_src[1]
h3res = 0
if type(coord_dst) == str:
lat_dst = h3.h3_to_geo(coord_dst)[0]
long_dst = h3.h3_to_geo(coord_dst)[1]
elif type(coord_dst) == list or tuple:
lat_dst = coord_dst[0]
long_dst = coord_dst[1]
metric = {}
# Zindi score
metric['Zindi'] = get_distance_zindi(lat_src, long_src, lat_dst, long_dst)
# Air distance
distance_air = get_distance_air(lat_src, long_src, lat_dst, long_dst, h3res)
metric['Air'] = distance_air
# Approximated road distance
detour_coef = 1.3 # Known as Henning- or Hanno-coefficient
metric['Road'] = distance_air * detour_coef
# Travel time from Uber movement data
travel_time = get_distance_time(lat_src, long_src, lat_dst, long_dst, weekend, hour, h3res)
metric['Time'] = travel_time
# 'Golden hour'-threshold classification
golden_hour = 60 # Minutes
metric['Golden'] = travel_time > golden_hour
return metric
def get_distance_zindi(lat_src, long_src, lat_dst, long_dst):
'''
Returns the Euklidean distance between latitude and longitude values like in the Zindi-score.
'''
return ((lat_src - lat_dst)**2 + (long_src - long_dst)**2) ** 0.5
def get_distance_air(lat_src, long_src, lat_dst, long_dst, h3res):
'''
Returns the Euklidean distance between two pairs of coordinates in km.
If a distance between two points within a single cluster has to be calculated,
the average distance of all possible distances within one cluster is returned.
'''
distance_air = geodesic((lat_src, long_src), (lat_dst, long_dst)).km
if distance_air == 0:
area = h3.hex_area(resolution = h3res)
radius = (area / math.pi) ** 0.5
distance_air = 128 / (45 * math.pi) * radius
return distance_air
def get_distance_time(lat_src, long_src, lat_dst, long_dst, weekend, hour, h3res):
'''
Returns the time that is needed to cover the road distance between two pairs of coordinates in minutes based on the Uber movement data.
'''
hex_src = h3.geo_to_h3(lat=lat_src, lng=long_src, resolution=h3res)
hex_dst = h3.geo_to_h3(lat=lat_dst, lng=long_dst, resolution=h3res)
if weekend == 1:
travel_times = df_combined_we[(df_combined_we['h3_zone_6_source'] == hex_src) & \
(df_combined_we['h3_zone_6_dst'] == hex_dst) & \
(df_combined_we['hod'] == hour) \
]['mean_travel_time']
else:
travel_times = df_combined_wd[(df_combined_wd['h3_zone_6_source'] == hex_src) & \
(df_combined_wd['h3_zone_6_dst'] == hex_dst) & \
(df_combined_wd['hod'] == hour) \
]['mean_travel_time']
if len(travel_times) > 0:
travel_time = sum(travel_times) / len(travel_times) / 60
else:
# len(travel_times) == 0 means that no travel times exist for this connection in the Uber movement data
# Get air distance between two original coordinates
orig_dist = get_distance_air(lat_src, long_src, lat_dst, long_dst, h3res)
# Divide air distance through average speed
if weekend == 1:
travel_time = orig_dist / avg_speeds_we[hour] * 60
else:
travel_time = orig_dist / avg_speeds_wd[hour] * 60
return travel_time
def score_adv(train_placements_df, crash_df, test_start_date='2018-01-01', test_end_date='2019-12-31', verbose=0):
'''
Advanced version of the standard score function. Does return a dictionary with five entries.
First entry is the 'Zindi' score just like in the score function. The other values are 'Air', 'Road', 'Time' and 'Golden'.
Can be used to score the ambulance placements against a set of crashes.
Can be used on all crash data, train_df or holdout_df as crash_df.
'''
try:
df_combined_wd
except NameError:
import_uber_data()
test_df = crash_df.loc[(crash_df.datetime >= test_start_date) & (crash_df.datetime <= test_end_date)]
if verbose > 0:
print(f'Data points in test period: {test_df.shape[0]}' )
total_distance = {'Zindi': 0, 'Air': 0, 'Road': 0, 'Time': 0, 'Golden': 0}
for crash_date, c_lat, c_lon in test_df[['datetime', 'latitude', 'longitude']].values:
row = train_placements_df.loc[train_placements_df.date < crash_date].tail(1)
if crash_date.weekday() in (6, 7):
weekend = 1
else:
weekend = 0
hour = crash_date.hour
dists = []
for a in range(6):
dist = get_metrics((row[f'A{a}_Latitude'].values[0], row[f'A{a}_Longitude'].values[0]), (c_lat, c_lon), weekend, hour)
dists.append(dist)
min_dist = dists[np.argmin([x['Time'] for x in dists])]
for x in total_distance:
total_distance[x] += min_dist[x]
return total_distance
def ambulance_placement_pipeline(input_path='../Inputs/', output_path='../Outputs/', crash_source_csv='Train',
outlier_filter=0, holdout_strategy='random', holdout_test_size=0.3,
test_period_date_start='2019-01-01', test_period_date_end='2019-07-01',
tw_cluster_strategy='saturday_2', placement_method='k_means', verbose=0,
lr=3e-2, n_epochs=400, batch_size=50):
'''
load crash data (from train or prediction) and apply feautre engineering, run tw clustering (based on strategy choice)
create ambulance placements, create output file.
'''
# load crash data into dataframe
crash_df = create_crash_df(train_file = input_path+crash_source_csv+'.csv')
# in case of loading file with hex bins instead of lat/long
if 'latitude' not in crash_df.columns:
crash_df['latitude'] = crash_df.hex_bins.apply(lambda x : h3.h3_to_geo(x)[0])
crash_df['longitude'] = crash_df.hex_bins.apply(lambda x : h3.h3_to_geo(x)[1])
crash_df.drop("hex_bins", axis=1, inplace=True)
# create individual date and time features from date column
crash_df = create_temporal_features(crash_df)
# split data into train and test sets
train_df, test_df = split_accident_df(data=crash_df, strategy=holdout_strategy,
test_size=holdout_test_size)
# remove outliers from test set based on lat and lon
train_df = outlier_removal(train_df, filter=outlier_filter)
# apply time window cluster labels to df based on strategy specified
train_df = create_cluster_feature(train_df, strategy=tw_cluster_strategy, verbose=verbose)
# Run clustering model to get placement set centroids for each TW cluster
test_df_with_clusters = create_cluster_feature(test_df, strategy=tw_cluster_strategy, verbose=0)
centroids_dict = create_cluster_centroids(train_df, test_df=test_df_with_clusters, verbose=verbose, method=placement_method,
lr=lr, n_epochs=n_epochs, batch_size=batch_size)
# create df in format needed for submission
train_placements_df = centroid_to_submission(centroids_dict, date_start='2018-01-01', date_end='2019-12-31',
tw_cluster_strategy=tw_cluster_strategy)
# Run scoring functions
# If using score
if verbose > 0:
print(f'Total size of test set: {test_df.shape[0]}')
test_score = score(train_placements_df, test_df, test_start_date=test_period_date_start,
test_end_date=test_period_date_end)
if verbose > 0:
print(f'Total size of train set: {crash_df.shape[0]}')
train_score = score(train_placements_df,train_df,
test_start_date=test_period_date_start, test_end_date=test_period_date_end)
if verbose > 0:
print(f'Score on test set: {test_score / max(test_df.shape[0],1)}')
print(f'Score on train set: {train_score / train_df.shape[0] } (avg distance per accident)')
# If using score_adv:
if verbose == 2:
test_score = score_adv(train_placements_df, test_df, test_start_date=test_period_date_start,
test_end_date=test_period_date_end)
train_score = score_adv(train_placements_df,train_df,
test_start_date=test_period_date_start, test_end_date=test_period_date_end)
for x in test_score:
test_score[x] = test_score[x] / max(test_df.shape[0],1)
print(f'Score on test set: {test_score}')
for x in train_score:
train_score[x] = train_score[x] / max(train_df.shape[0],1)
print(f'Score on train set: {train_score} (avg distance per accident)')
# Create file for submitting to zindi
submission_df = centroid_to_submission(centroids_dict, date_start='2019-07-01', date_end='2020-01-01',
tw_cluster_strategy=tw_cluster_strategy)
create_submission_csv(submission_df, crash_source=crash_source_csv, outlier_filter=outlier_filter,
tw_cluster_strategy=tw_cluster_strategy, placement_method=placement_method, path=output_path ,verbose=verbose)
### Prediction functions from here on
def convert_h3_to_lat_lon(df):
"""
Convert hex bins back to latitude and longitude
"""
df['latitude'] = df.hex_bins.apply(lambda x: h3.h3_to_geo(x)[0])
df['longitude'] = df.hex_bins.apply(lambda x: h3.h3_to_geo(x)[1])
df = df.drop("hex_bins", axis=1)
return df
def create_pred_template(df):
'''Based on hex bin resolution creates an empty data frame for each 3 hour time window for each hex bin.
This results in a n * 2 dataframe (columns: time_windows, hex_bins) where number of rows equals hex_bins * 4369.
4369 is the result of days between start and end date (in days) * 8 time windows per day (24 / 3 hours)'''
#Create dataframe to get the accurate amount of 3-hour time windows for the desired time frame
date_start = '2018-01-01'
date_end = '2019-07-01'
dates = pd.date_range(date_start, date_end, freq='3h')
all_days_df = pd.DataFrame(dates, columns=["dates"])
time_windows = list(all_days_df["dates"])
len_windows = all_days_df.shape[0]
list_unique_hexbins = list(df["h3_zone_6"].unique())
list_bins_per_window = []
list_time_windows = []
for i in range(0, len(list_unique_hexbins)):
list_bins_per_window += len_windows * [list_unique_hexbins[i]]
list_time_windows += time_windows
input_df = {"time_windows": list_time_windows, "hex_bins": list_bins_per_window}
df_pred_template = pd.DataFrame(data=input_df)
return df_pred_template
def rta_per_time_window_hex_bin(df):
'''
Add up RTA's per time window and hex bin
'''
df["time_window_key"] = df["datetime"].apply(lambda x: str(x.year) + "-" + str(x.month) + "-" + str(x.day) + "-" + str(math.floor(x.hour / 3)))
df_tw_hex = df.groupby(["time_window_key", "h3_zone_6"]).agg({"uid": "count"}).reset_index()
col_names = ["time_window_key"] + ["hex_bins"] + ["RTA"]
df_tw_hex.columns = col_names
return df_tw_hex
def fill_overall_df(df_pred_template, df_tw_hex):
'''
Join road traffic accidents onto empty data frame that consists of time windows (8 per day) for all days (1.5 years) for all hex bins.
For combinations with no accidents, NaNs will be converted into 0.
'''
df_pred_template["time_window_key"] = df_pred_template["time_windows"].apply(lambda x: str(x.year) + "-" + str(x.month) + "-" + str(x.day) + "-" + str(math.floor(x.hour / 3)))
df_merged = pd.merge(df_pred_template, df_tw_hex, on=["time_window_key", "hex_bins"], how="outer")
df_merged = df_merged.fillna(0)
list_of_c = list(df_merged.columns)
list_of_c[0] = "datetime"
df_merged.columns = list_of_c
return df_merged
def generate_outlier_list(df, frequency_cutoff=1):
"""
Based on the minimum frequency of occurrence, cut off all hex bins that do not exceed that value over 1.5 years. Returns list of hex bins to exclude.
"""
if frequency_cutoff == 0:
return []
else:
df_outliers = df.groupby("hex_bins")
df_outliers = df_outliers.agg({'RTA': np.count_nonzero})
df_outliers = df_outliers.reset_index()
df_outliers.columns = ["hex_bins", "RTA_nonzero"]
df_freq_outliers = df_outliers.loc[df_outliers["RTA_nonzero"] <= frequency_cutoff]
# Get list of frequency outliers
list_freq_outliers = list(df_freq_outliers["hex_bins"].values)
return list_freq_outliers
def filter_df_for_pred_a(df, list_freq_outliers):
"""
Exclude frequency outliers according to list and drop all hex bin / time window combinations with 0 RTA's
"""
df_pred_a = df.loc[~df["h3_zone_6"].isin(list_freq_outliers)]
df_pred_a = df_pred_a.reset_index()
df_pred_a = df_pred_a.drop(["uid", "latitude", "longitude", "time", "time_window", "time_window_str", "day", "weekday", "month", "half_year", "rainy_season",
"year", "date_trunc", "holiday", "time_window_key", "index"], axis=1)
df_pred_a.columns = ["datetime", "hex_bins"]
return df_pred_a
def filter_df_for_pred_b(df_merged, list_freq_outliers):
"""
Exclude frequency outliers according to list and drop all hex bin / time window combinations with 0 RTA's
"""
# Filters overall dataframe to exclude hex bins with only one RTA occurrence in the whole timeframe (according to input list)
df_merged = df_merged.loc[~df_merged["hex_bins"].isin(list_freq_outliers)]
# Also filters out all hex bin and time window combinations where no RTA occurred
df_pred_b = df_merged.loc[df_merged["RTA"] > 0]
return df_pred_b
def clean_pred_b(df_pred_b):
"""Dropping all redundant rows, fixing indices and making sure the time windows are hit."""
# Remove some redundant rows and fix indices
df_predictions = df_pred_b.drop(["time_window_key", "RTA"], axis=1)
df_predictions = df_predictions.reset_index()
df_predictions.drop("index", axis=1, inplace=True)
# Add 1 minute to have the RTA's lie inside the time window rather than on the verge, sort values and reset the index
df_predictions["datetime"] = df_predictions["datetime"].apply(lambda x: x + pd.Timedelta(minutes=1))
df_predictions = df_predictions.sort_values(by="datetime").reset_index()
# Drop redundant columns
df_predictions = df_predictions.drop("index", axis=1)
return df_predictions
def create_samples(df, list_freq_outliers):
"""
Creates a sort of distribution from which hex bin and time window combinations can be drawn, subject to the predicted RTA's per day
"""
dict_windows = {1: "00-03", 2: "03-06", 3: "06-09", 4: "09-12", 5: "12-15",
6: "15-18", 7: "18-21", 8: "21-24"}
df["time_window"] = df["datetime"].apply(lambda x: math.floor(x.hour / 3) + 1)
df["time_window_str"] = df["time_window"].apply(lambda x: dict_windows.get(x))
df["weekday"] = df["datetime"].apply(lambda x: x.weekday())
# Filtering for hex bins with only one occurrence
df_filter = df.loc[~df["hex_bins"].isin(list_freq_outliers)]
# Prepare data frame for sample generation
df_freq = df_filter.groupby(["hex_bins", "weekday", "time_window_str"])
df_samples = df_freq.agg({'RTA': [np.count_nonzero]})
df_samples = df_samples.reset_index()
df_samples.columns = ["hex_bins", "weekday", "time_window", "RTA_freq"]
return df_samples
def generate_predictions(df, predicted_rta):
"""
Takes a dataframe containing the RTA frequency per weekday and time window and the predicted RTA's per day and turns this into a prediction dataframe.
"""
df_monday = df.loc[df["weekday"] == 0].sort_values(by="RTA_freq", ascending=False)
df_tuesday = df.loc[df["weekday"] == 1].sort_values(by="RTA_freq", ascending=False)
df_wednesday = df.loc[df["weekday"] == 2].sort_values(by="RTA_freq", ascending=False)
df_thursday = df.loc[df["weekday"] == 3].sort_values(by="RTA_freq", ascending=False)
df_friday = df.loc[df["weekday"] == 4].sort_values(by="RTA_freq", ascending=False)
df_saturday = df.loc[df["weekday"] == 5].sort_values(by="RTA_freq", ascending=False)
df_sunday = df.loc[df["weekday"] == 6].sort_values(by="RTA_freq", ascending=False)
# Split overall predictions into predictions per weekday
lst_mon = predicted_rta[0::7]
lst_tue = predicted_rta[1::7]
lst_wed = predicted_rta[2::7]
lst_thu = predicted_rta[3::7]
lst_fri = predicted_rta[4::7]
lst_sat = predicted_rta[5::7]
lst_sun = predicted_rta[6::7]
# The evaluation period 2019-07-01 to 2019-12-31 conveniently starts with a Monday but end with a Tuesday - hence the loop has to run
# one iteration more for Monday and Tuesday.
# This generates a list of lists of predictions for each weekday
monday_bins = tuesday_bins = wednesday_bins = thursday_bins = friday_bins = saturday_bins = sunday_bins = []
monday_tw = tuesday_tw = wednesday_tw = thursday_tw = friday_tw = saturday_tw = sunday_tw = []
for i in range(len(lst_mon)):
monday_bins.append(list(*[df_monday["hex_bins"][0:lst_mon[i]]]))
monday_tw.append(list(*[df_monday["time_window"][0:lst_mon[i]]]))
tuesday_bins.append(list(*[df_tuesday["hex_bins"][0:lst_tue[i]]]))
tuesday_tw.append(list(*[df_tuesday["time_window"][0:lst_tue[i]]]))
for i in range(len(lst_wed)):
wednesday_bins.append(list(*[df_wednesday["hex_bins"][0:lst_wed[i]]]))
wednesday_tw.append(list(*[df_wednesday["time_window"][0:lst_wed[i]]]))
thursday_bins.append(list(*[df_thursday["hex_bins"][0:lst_thu[i]]]))
thursday_tw.append(list(*[df_thursday["time_window"][0:lst_thu[i]]]))
friday_bins.append(list(*[df_friday["hex_bins"][0:lst_fri[i]]]))
friday_tw.append(list(*[df_friday["time_window"][0:lst_fri[i]]]))
saturday_bins.append(list(*[df_saturday["hex_bins"][0:lst_sat[i]]]))
saturday_tw.append(list(*[df_saturday["time_window"][0:lst_sat[i]]]))
sunday_bins.append(list(*[df_sunday["hex_bins"][0:lst_sun[i]]]))
sunday_tw.append(list(*[df_sunday["time_window"][0:lst_sun[i]]]))
# Turn list of lists into an overall list for each weekday's predictions
flat_monday_bins = [item for sublist in monday_bins for item in sublist]
flat_monday_tw = [item for sublist in monday_tw for item in sublist]
flat_tuesday_bins = [item for sublist in tuesday_bins for item in sublist]
flat_tuesday_tw = [item for sublist in tuesday_tw for item in sublist]
flat_wednesday_bins = [item for sublist in wednesday_bins for item in sublist]
flat_wednesday_tw = [item for sublist in wednesday_tw for item in sublist]
flat_thursday_bins = [item for sublist in thursday_bins for item in sublist]
flat_thursday_tw = [item for sublist in thursday_tw for item in sublist]
flat_friday_bins = [item for sublist in friday_bins for item in sublist]
flat_friday_tw = [item for sublist in friday_tw for item in sublist]
flat_saturday_bins = [item for sublist in saturday_bins for item in sublist]
flat_saturday_tw = [item for sublist in saturday_tw for item in sublist]
flat_sunday_bins = [item for sublist in sunday_bins for item in sublist]
flat_sunday_tw = [item for sublist in sunday_tw for item in sublist]
# Generate list with hex bins and time windows as input for prediction
flat_bins = flat_monday_bins + flat_tuesday_bins + flat_wednesday_bins + flat_thursday_bins + flat_friday_bins + flat_saturday_bins + flat_sunday_bins
flat_tw = flat_monday_tw + flat_tuesday_tw + flat_wednesday_tw + flat_thursday_tw + flat_friday_tw + flat_saturday_tw + flat_sunday_tw
# Generate list with day of the week entries for each prediction as input for dataframe
weekdays = [0] * sum(lst_mon) + [1] * sum(lst_tue) + [2] * sum(lst_wed) + [3] * sum(lst_thu) + [4] * sum(lst_fri) + [5] * sum(lst_sat) + [6] * sum(lst_sun)
# Generate list with week entries for each prediction as input for dataframe
list_of_days_list = [lst_mon, lst_tue, lst_wed, lst_thu, lst_fri, lst_sat, lst_sun]
lst_weeks = []
for lst_days in list_of_days_list:
i = 0
for number in lst_days:
lst_weeks += [i] * number
i += 1
# Create dataframe
df_pred_c = pd.DataFrame(list(zip(flat_bins, flat_tw, weekdays, lst_weeks)), columns=["hex_bins", "time_window", "weekday", "week"])
return df_pred_c
def generate_predictions_first_half_2019(df, predicted_rta):
"""
Takes a dataframe containing the RTA frequency per weekday and time window and the predicted RTA's per day and turns this into a prediction dataframe.
"""
df_monday = df.loc[df["weekday"] == 0].sort_values(by="RTA_freq", ascending=False)
df_tuesday = df.loc[df["weekday"] == 1].sort_values(by="RTA_freq", ascending=False)
df_wednesday = df.loc[df["weekday"] == 2].sort_values(by="RTA_freq", ascending=False)
df_thursday = df.loc[df["weekday"] == 3].sort_values(by="RTA_freq", ascending=False)
df_friday = df.loc[df["weekday"] == 4].sort_values(by="RTA_freq", ascending=False)
df_saturday = df.loc[df["weekday"] == 5].sort_values(by="RTA_freq", ascending=False)
df_sunday = df.loc[df["weekday"] == 6].sort_values(by="RTA_freq", ascending=False)
# Split overall predictions into predictions per weekday
lst_mon = predicted_rta[6::7]
lst_tue = predicted_rta[0::7]
lst_wed = predicted_rta[1::7]
lst_thu = predicted_rta[2::7]
lst_fri = predicted_rta[3::7]
lst_sat = predicted_rta[4::7]
lst_sun = predicted_rta[5::7]
# The evaluation period 2019-07-01 to 2019-12-31 conveniently starts with a Monday but end with a Tuesday - hence the loop has to run
# one iteration more for Monday and Tuesday.
# This generates a list of lists of predictions for each weekday
monday_bins = tuesday_bins = wednesday_bins = thursday_bins = friday_bins = saturday_bins = sunday_bins = []
monday_tw = tuesday_tw = wednesday_tw = thursday_tw = friday_tw = saturday_tw = sunday_tw = []
for i in range(len(lst_mon)):
monday_bins.append(list(*[df_monday["hex_bins"][0:lst_mon[i]]]))
monday_tw.append(list(*[df_monday["time_window"][0:lst_mon[i]]]))
for i in range(len(lst_wed)):
tuesday_bins.append(list(*[df_tuesday["hex_bins"][0:lst_tue[i]]]))
tuesday_tw.append(list(*[df_tuesday["time_window"][0:lst_tue[i]]]))
wednesday_bins.append(list(*[df_wednesday["hex_bins"][0:lst_wed[i]]]))
wednesday_tw.append(list(*[df_wednesday["time_window"][0:lst_wed[i]]]))
thursday_bins.append(list(*[df_thursday["hex_bins"][0:lst_thu[i]]]))
thursday_tw.append(list(*[df_thursday["time_window"][0:lst_thu[i]]]))
friday_bins.append(list(*[df_friday["hex_bins"][0:lst_fri[i]]]))
friday_tw.append(list(*[df_friday["time_window"][0:lst_fri[i]]]))
saturday_bins.append(list(*[df_saturday["hex_bins"][0:lst_sat[i]]]))
saturday_tw.append(list(*[df_saturday["time_window"][0:lst_sat[i]]]))
sunday_bins.append(list(*[df_sunday["hex_bins"][0:lst_sun[i]]]))
sunday_tw.append(list(*[df_sunday["time_window"][0:lst_sun[i]]]))
# Turn list of lists into an overall list for each weekday's predictions
flat_monday_bins = [item for sublist in monday_bins for item in sublist]
flat_monday_tw = [item for sublist in monday_tw for item in sublist]
flat_tuesday_bins = [item for sublist in tuesday_bins for item in sublist]
flat_tuesday_tw = [item for sublist in tuesday_tw for item in sublist]
flat_wednesday_bins = [item for sublist in wednesday_bins for item in sublist]
flat_wednesday_tw = [item for sublist in wednesday_tw for item in sublist]
flat_thursday_bins = [item for sublist in thursday_bins for item in sublist]
flat_thursday_tw = [item for sublist in thursday_tw for item in sublist]
flat_friday_bins = [item for sublist in friday_bins for item in sublist]
flat_friday_tw = [item for sublist in friday_tw for item in sublist]
flat_saturday_bins = [item for sublist in saturday_bins for item in sublist]
flat_saturday_tw = [item for sublist in saturday_tw for item in sublist]
flat_sunday_bins = [item for sublist in sunday_bins for item in sublist]
flat_sunday_tw = [item for sublist in sunday_tw for item in sublist]
# Generate list with hex bins and time windows as input for prediction
flat_bins = flat_monday_bins + flat_tuesday_bins + flat_wednesday_bins + flat_thursday_bins + flat_friday_bins + flat_saturday_bins + flat_sunday_bins
flat_tw = flat_monday_tw + flat_tuesday_tw + flat_wednesday_tw + flat_thursday_tw + flat_friday_tw + flat_saturday_tw + flat_sunday_tw
# Generate list with day of the week entries for each prediction as input for dataframe
weekdays = [0] * sum(lst_mon) + [1] * sum(lst_tue) + [2] * sum(lst_wed) + [3] * sum(lst_thu) + [4] * sum(lst_fri) + [5] * sum(lst_sat) + [6] * sum(lst_sun)
# Generate list with week entries for each prediction as input for dataframe
list_of_days_list = [lst_mon, lst_tue, lst_wed, lst_thu, lst_fri, lst_sat, lst_sun]
lst_weeks = []
for lst_days in list_of_days_list:
i = 0
for number in lst_days:
lst_weeks += [i] * number
i += 1
# Create dataframe
df_pred_c = pd.DataFrame(list(zip(flat_bins, flat_tw, weekdays, lst_weeks)), columns=["hex_bins", "time_window", "weekday", "week"])
return df_pred_c
def reduce_to_time_windows(df, predict_period):
"""
Takes a data frame of predicted RTA's and brings it into the correct format for clustering.
"""
# Set start of prediction period
if predict_period == '2019_h2':
start = pd.to_datetime("2019-07-01")
if predict_period == '2019_h1':
start = pd.to_datetime("2019-01-01")
# Creates a datetime column that counts the days upwards and then sets all entries to the starting day, 2019-07-01, plus that day
df["help"] = (df["week"]) * 7 + df["weekday"]
df["datetime"] = df["help"].apply(lambda x: start + pd.Timedelta(days=x))
# Convert time windows strings back to datetime objects and add 1 minute to have them lie inside the time window rather than on the verge
df.loc[df["time_window"] == "00-03", "datetime"] = df["datetime"] + pd.Timedelta(minutes=1)
df.loc[df["time_window"] == "03-06", "datetime"] = df["datetime"] + pd.Timedelta(hours=3, minutes=1)
df.loc[df["time_window"] == "06-09", "datetime"] = df["datetime"] + | pd.Timedelta(hours=6, minutes=1) | pandas.Timedelta |
# Released under MIT License
# Copyright (c) 2021 <NAME>, github.com/ispanos
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
# NOTE: Most times Demand is mentioned, it's the inverse demand
import numpy as np
import pandas as pd
from typing import List, Tuple # , Annotated
import copy
import statsmodels.api as sm
# import matplotlib.pyplot as plt
def infinite_sequence():
num = 1
while True:
yield str(num)
num += 1
name_sequence = infinite_sequence()
def next_name():
return next(name_sequence)
def reset_names():
global name_sequence
name_sequence = infinite_sequence()
class Company:
def __init__(self, i: float, s: float, name: str = None):
self._i = float(i)
self._s = float(s)
self._name = name
self._production = None
self._regression_data = None
if not name:
self._name = next_name()
@property
def i(self):
"""
The intercept of the estimated marginal cost curve
Type: float
"""
return self._i
@property
def s(self):
"""
The slope of the estimated marginal cost curve
Type: float
"""
return self._s
@property
def equation(self) -> str:
"""
The slope of the estimated marginal cost curve
Type: float
"""
return f"Mc = {str(round(self._i, 2))} + {str(round(self._s, 2))} * q"
@property
def full_equation(self) -> str:
"""
The slope of the estimated marginal cost curve
Type: float
"""
return f"Mc = {str(self._i)} + {str(self._s)} * q"
@property
def name(self):
"""
A name to identify the company
Type: string
"""
return self._name
@property
def production(self):
"""
A name to identify the company
Type: float
"""
return self._production
@property
def regression_data(self):
"""
Regression data from OLS regression.
"""
return self._regression_data
def set_prod(self, production: float):
"""
Set the production of the company in the current market
"""
self._production = production
return self
def set_name(self, name: str):
"""
Set a name to identify the company by
"""
self._name = name
return self
def profits(self, p: float):
"""
The profits of the company
Type: float
"""
return (p * self._production) - (self._i +
self._s * self._production
) * self._production
def set_regression_data(self, stuff):
self._regression_data = stuff
return self
Demand = Tuple[float, float]
CompanyList = List[Company]
def calculate_price(total_q: float, demand: Demand) -> float:
"""
Calculates the equilibrium price, given a linear Demand curve and
the total units produced by the companies
Args:
total_q: The total units produced
demand: The parameters of a linear demand curve
Returns:
The equilibrium price
"""
return demand[0] - demand[1] * total_q
def set_cournot_production(demand: Demand,
companies: CompanyList) -> CompanyList:
"""
Return a list with the addition of the production units in
every company in the given list, when all companies are in a
Cournot competition.
Args:
demand: Market's demand
companies: A list of companies
Returns: Company_List with updated production values
"""
# Create an array of length N -> (M_i + 2 * B)
diagonal: List[float] = [x.s + 2 * demand[1] for x in companies]
dimension = len(companies)
# Create a matrix of N x N dimension filled with B
x = np.full((dimension, dimension), demand[1], dtype=float)
# Replace the diagonal of the matrix with (M_i + 2 * B)
# This creates matrix named H in the documentation above
# noinspection PyTypeChecker
np.fill_diagonal(x, diagonal)
# Create a matrix N x 1 with ( A - K ) -- Named U in the documentation above
constants = [demand[0] - comp.i for comp in companies]
# Our solution is an array of quantities, length N.
productions = np.linalg.solve(x, constants).flatten()
for i, c in enumerate(companies):
c.set_prod(productions[i])
return companies
def merge_companies(comp_i: Company, comp_j: Company) -> Company:
"""
Merges two companies by horizontally adding their production output curves
in relation with their marginal costs.
Args:
comp_i: Company that will merge with another one
comp_j: Company that will merge with another one
Returns:
Company post merge
"""
if (comp_i.s + comp_j.s) == 0:
new_comp = Company(min(comp_i.i, comp_j.i), 0)
elif comp_i.s == 0 or comp_j.s == 0:
print("Edge case is ot accounted for.")
exit()
else:
new_comp = Company((comp_j.s * comp_i.i + comp_i.s * comp_j.i) /
(comp_i.s + comp_j.s),
comp_i.s * comp_j.s / (comp_i.s + comp_j.s),
comp_i.name + '&' + comp_j.name)
return new_comp
def merge_two(demand: Demand, companies: CompanyList, to_merge: Tuple[int, int]):
"""
Replace the two companies that merge, in the given list, with the newly formed one.
Args:
demand: Market's demand
companies: An ordered list of the companies that are competing
to_merge: A tuple composed of the two indexes of the two companies that
will merge.
Returns:
Company_List after given merger
"""
companies_post_merge = copy.copy(companies)
comp_i = companies_post_merge[to_merge[0]]
comp_j = companies_post_merge[to_merge[1]]
new_company = merge_companies(comp_i, comp_j)
companies_post_merge.remove(comp_i)
companies_post_merge.remove(comp_j)
companies_post_merge.insert(0, new_company)
return set_cournot_production(demand, companies_post_merge)
def market_stats_dump(companies: CompanyList, q: float, p: float):
"""
Print data for the market.
"""
for comp in companies:
print(f"Company {comp.name} with {comp.equation}\n"
f"\tProduces {round(comp.production, 2)} units",
f" with €{round(comp.profits(p), 2)} profit.\n")
print(f"Total production is {round(q, 2)} units @ €{round(p, 2)}.")
def hhi(c: CompanyList) -> int:
"""
Herfindahl-Hirschman Index
Args:
c: List of companies
Returns: Herfindahl-Hirschman Index
"""
q_tot = sum([x.production for x in c])
return int(round(sum([(100 * x.production / q_tot) ** 2 for x in c])))
def regress_info(array_x, array_y):
"""
Runs OLS for x, y arrays.
Args:
array_x: Independent variable
array_y: Dependent variable
Returns: sm.OLS.fit
"""
array_x = sm.add_constant(array_x)
model = sm.OLS(array_y, array_x)
return model.fit()
def create_est_company(model: sm.OLS.fit) -> Company:
"""
Given the OLS.fit date, create a new Company
Args:
model: OLS regression data of Q and MC arrays
Returns: Company
"""
new_company = Company(model.params[0], model.params[1])
new_company.set_regression_data(model)
return new_company
def estimate_comp_productions(
demand: Demand,
marginal_costs: Tuple[pd.Series, pd.Series, pd.Series]
) -> Tuple[pd.Series, pd.Series, pd.Series]:
"""
WORK IN PROGRESS -- NOT OPTIMIZED
Returns the production for each company, calculated by their
marginal costs and demand curve. Only works for 3 companies
"""
c1, c2, c3 = marginal_costs
q1, q2, q3 = [], [], []
for i in range(len(c1)):
x = np.full((3, 3), .5, dtype=float)
# Replace the diagonal of the matrix with (M_i + 2 * B)
# This creates matrix named H in the documentation above
# noinspection PyTypeChecker
np.fill_diagonal(x, np.ones((3,), dtype=float))
# Create a matrix MC_i / 2*B
constants = [(demand[0] - c1[i]) / demand[1],
(demand[0] - c2[i]) / demand[1],
(demand[0] - c3[i]) / demand[1]]
# Our solution is an array of quantities, length N.
productions = np.linalg.solve(x, constants).tolist()
q1.append(productions[0])
q2.append(productions[1])
q3.append(productions[2])
pd_q1 = | pd.Series(q1, name='q1') | pandas.Series |
import requests
import pandas as pd
from datetime import datetime
import psycopg2
import time
from sklearn.ensemble import RandomForestRegressor
conn = psycopg2.connect(database="postgres", user="postgres",
password="password", host="127.0.0.1", port="5432")
ARIMA_URL = "http://localhost:9000/predict/ARIMA"
VARMAX_URL = "http://localhost:9000/predict/VARMAX"
SES_URL = "http://localhost:9000/predict/SES"
ACTUAL_URL = "http://localhost:9000/btc/price"
while(1):
actual_res = requests.get(url=ACTUAL_URL)
actual_data = actual_res.json()
ARIMA_res = requests.get(url=ARIMA_URL)
ARIMA_data = ARIMA_res.json()[:-2]
VARMAX_res = requests.get(url=VARMAX_URL)
VARMAX_data = VARMAX_res.json()[:-2]
SES_res = requests.get(url=SES_URL)
SES_data = SES_res.json()[:-2]
actual = pd.DataFrame.from_dict(actual_data)
actual.columns = ['datetime', 'PRICE']
arima = pd.DataFrame.from_dict(ARIMA_data)
arima.columns = ['datetime', 'ARIMA']
varmax = pd.DataFrame.from_dict(VARMAX_data)
varmax.columns = ['datetime', 'VARMAX']
ses = | pd.DataFrame.from_dict(SES_data) | pandas.DataFrame.from_dict |
import sys
import csv
import numpy as np
import gpflow
import os
import pandas as pd
import h5py
from sklearn.model_selection import train_test_split
import tensorflow as tf
from scipy.cluster.vq import kmeans
tf.set_random_seed(1234)
import pickle
import argparse
#Model to train the individual Policy and Value Function models for Penalty Shot
def train_model(**kwargs):
subID = kwargs['subID']
npseed = kwargs['npseed']
iters = kwargs['iterations']
gpu = kwargs['gpu']
numInducingPoints = kwargs['IP']
whichModel = kwargs['whichModel']
print("subID: " + str(subID))
print("npseed: " + str(npseed))
print("iterations: " + str(iters))
print("gpu: " + str(gpu))
print("IPs: " + str(numInducingPoints))
print("Model Requested: " + str(whichModel))
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=str(gpu)
print("Loading Data for Subject " + str(subID) + "....")
data = h5py.File('penaltykickdata.h5','r')
subID1HE = np.array(data.get('subID')).astype('float32')
otherdata = np.array(data.get('otherfeatures')).astype('float32')
switchBool = np.array(data.get('targets')).astype('float32') #did they switch at time t+1
trialidx = np.array(data.get('trialidx')).astype('float32')
time = np.array(data.get('time')).astype('int32')
if whichModel == 'PSwitch':
targets = np.array(data.get('targets')).astype('float32')
elif whichModel == 'ExtraEV':
targets = np.array(data.get('EVtargets').value).astype('int32')
otherdata = np.hstack((otherdata, switchBool))
Xfeatures_totaldata = np.hstack((otherdata, subID1HE))
Xfeatures_totaldata = pd.DataFrame(Xfeatures_totaldata)
offset = otherdata.shape[1]
subdata = Xfeatures_totaldata[Xfeatures_totaldata[offset+subID]==1]
subtargets = | pd.DataFrame(targets) | pandas.DataFrame |
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
import numpy as np
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
from dash.dependencies import Output, Input
from plotly.subplots import make_subplots
external_stylesheets = [dbc.themes.BOOTSTRAP]
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server = app.server
#read the data
pd.set_option("max_columns", None)
airports = | pd.read_json("data/processed_airports.json") | pandas.read_json |
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sklearn.preprocessing as preprocessing
from sklearn import linear_model
from sklearn import model_selection
from sklearn.ensemble import RandomForestRegressor
print(os.getcwd())
# data_path = r'C:\Users\ArseneLupin\Desktop\OrderType.csv'
data_path = os.getcwd() + r'\dataset\train.csv'
data_train = pd.read_csv(data_path)
data_train.shape
# 使用 len 和 df
for i in range(len(data_train)):
for j in range(12):
cur_data = data_train.loc[i][j]
print(cur_data)
# 使用 .iteriterms()
for i, series in data_train.iteritems():
# print(i, ":", type(series))
# print(i + ' : ' + series)
print(series)
print(data_train.head())
# data.select_dtypes()
data_train.info()
# show the data
fig = plt.figure()
plt.subplot2grid((2, 3), (0, 0))
# the Survived is the y
data_train.Survived.value_counts().plot(kind='bar') # 柱状图
plt.title(u'live num')
plt.ylabel(u'num')
plt.subplot2grid((2, 3), (0, 1))
data_train.Pclass.value_counts().plot(kind="bar")
plt.ylabel(u"num")
plt.title(u"passenger class")
plt.subplot2grid((2, 3), (0, 2))
plt.scatter(data_train.Age, data_train.Survived)
plt.ylabel(u"live") # 设定纵坐标名称
plt.grid(b=True, which='major', axis='y')
plt.title(u"live by age")
plt.subplot2grid((2, 3), (1, 0), colspan=2)
data_train.Age[data_train.Pclass == 1].plot(kind='kde')
data_train.Age[data_train.Pclass == 2].plot(kind='kde')
data_train.Age[data_train.Pclass == 3].plot(kind='kde')
plt.xlabel(u"age") # plots an axis lable
plt.ylabel(u"density")
plt.title(u"passerger class by age ")
plt.legend((u'1 class ', u'2 class', u'3 class'), loc='best') # sets our legend for our graph.
plt.subplot2grid((2, 3), (1, 2))
data_train.Embarked.value_counts().plot(kind='bar')
plt.title(u"num at embarked ")
plt.ylabel(u"num")
# 看看各乘客等级的获救情况
fig = plt.figure()
# fig.set(alpha=0.2) # 设定图表颜色alpha参数
Survived_0 = data_train.Pclass[data_train.Survived == 0].value_counts()
Survived_1 = data_train.Pclass[data_train.Survived == 1].value_counts()
df = pd.DataFrame({u'live': Survived_1, u'unlive': Survived_0})
df.plot(kind='bar', stacked=True)
plt.title(u"live by class")
plt.xlabel(u"passenger class")
plt.ylabel(u"num")
plt.show()
# the baby all lived the old lives little than the new
Survived_age = data_train.Age[data_train.Survived == 1].value_counts()
unSurvived_age = data_train.Age[data_train.Survived == 0].value_counts()
temp_data = {u'live': Survived_age, u'unlive': unSurvived_age}
df = pd.DataFrame(temp_data)
df.plot(kind='bar', stacked=True)
plt.title(u'live by age')
plt.ylabel(u'num')
plt.xlabel(u'age')
print(df)
plt.show()
print(df.head())
print(df.size)
print(df.shape)
df.describe()
df.get_dtype_counts()
df.idxmax()
df.idxmin()
df.info()
data_list = df.iteritems
# 看看各性别的获救情况
fig = plt.figure()
# fig.set(alpha=0.2) # 设定图表颜色alpha参数
# most of the people died and in the live ,women is more
Survived_m = data_train.Survived[data_train.Sex == 'male'].value_counts()
Survived_f = data_train.Survived[data_train.Sex == 'female'].value_counts()
df = pd.DataFrame({u'man': Survived_m, u'female': Survived_f})
df.plot(kind='bar', stacked=True)
plt.title(u"survied by sex")
plt.xlabel(u"sex")
plt.ylabel(u"num")
plt.show()
# 然后我们再来看看各种舱级别情况下各性别的获救情况
fig = plt.figure()
# fig.set(alpha=0.65) # 设置图像透明度,无所谓
plt.title(u"surviced by class and sex")
ax1 = fig.add_subplot(141)
data_train.Survived[data_train.Sex == 'female'][data_train.Pclass != 3].value_counts().plot(kind='bar',
label="female highclass",
color='#FA2479')
ax1.set_xticklabels([u"unlive", u"live"], rotation=0)
ax1.legend([u"femall/high class"], loc='best')
ax2 = fig.add_subplot(142, sharey=ax1)
data_train.Survived[data_train.Sex == 'female'][data_train.Pclass == 3].value_counts().plot(kind='bar',
label='female, low class',
color='pink')
ax2.set_xticklabels([u"live", u"unlive"], rotation=0)
plt.legend([u"female/low class"], loc='best')
ax3 = fig.add_subplot(143, sharey=ax1)
data_train.Survived[data_train.Sex == 'male'][data_train.Pclass != 3].value_counts().plot(kind='bar',
label='male, high class',
color='lightblue')
ax3.set_xticklabels([u"unlive", u"live"], rotation=0)
plt.legend([u"man/high class"], loc='best')
ax4 = fig.add_subplot(144, sharey=ax1)
data_train.Survived[data_train.Sex == 'male'][data_train.Pclass == 3].value_counts().plot(kind='bar',
label='male low class',
color='steelblue')
ax4.set_xticklabels([u"unlive", u"live"], rotation=0)
plt.legend([u"man/low class"], loc='best')
plt.show()
fig = plt.figure()
fig.set(alpha=0.2) # 设定图表颜色alpha参数
# x is the Embarked and y is the num and in the dataframe the row is the Embarked and the clo is the num
Survived_0 = data_train.Embarked[data_train.Survived == 0].value_counts()
Survived_1 = data_train.Embarked[data_train.Survived == 1].value_counts()
df = pd.DataFrame({u'live': Survived_1, u'unlive': Survived_0})
df.plot(kind='bar', stacked=True)
plt.title(u"live by Embarked")
plt.xlabel(u"Embarked")
plt.ylabel(u"num")
plt.show()
df
# 堂兄妹个数 这就是特征工程,就是属性队结果的影响就是侦探的直觉,所以这些东西吸引我的原因,因为这些东西 在一开始就是和我在一起。很久很久以前。
g = data_train.groupby(['SibSp', 'Survived'])
df = pd.DataFrame(g.count()['PassengerId'])
df
g = data_train.groupby(['Parch', 'Survived'])
df = pd.DataFrame(g.count()['PassengerId'])
df
temp_data = data_train.Parch
temp_data
data_train.head()
# ticket是船票编号,应该是unique的,和最后的结果没有太大的关系,先不纳入考虑的特征范畴把
# cabin只有204个乘客有值,我们先看看它的一个分布
temp_data = data_train.Cabin.value_counts()
temp_data
fig = plt.figure()
fig.set(alpha=0.2) # 设定图表颜色alpha参数
# cabin 客舱
Survived_cabin = data_train.Survived[pd.notnull(data_train.Cabin)].value_counts()
Survived_nocabin = data_train.Survived[pd.isnull(data_train.Cabin)].value_counts()
df = pd.DataFrame({u'yes cabin': Survived_cabin, u'no cabin': Survived_nocabin}).transpose()
df.plot(kind='bar', stacked=True)
plt.title(u"live by cabin")
plt.xlabel(u"Cabin exit")
plt.ylabel(u"num")
plt.show()
### 使用 RandomForestClassifier 填补缺失的年龄属性
def set_missing_ages(df):
# 把已有的数值型特征取出来丢进Random Forest Regressor中
age_df = df[['Age', 'Fare', 'Parch', 'SibSp', 'Pclass']]
# 乘客分成已知年龄和未知年龄两部分
known_age = age_df[age_df.Age.notnull()].as_matrix()
unknown_age = age_df[age_df.Age.isnull()].as_matrix()
# y即目标年龄
y = known_age[:, 0]
# X即特征属性值
X = known_age[:, 1:]
# fit到RandomForestRegressor之中
rfr = RandomForestRegressor(random_state=0, n_estimators=2000, n_jobs=-1)
rfr.fit(X, y)
# 用得到的模型进行未知年龄结果预测
predictedAges = rfr.predict(unknown_age[:, 1::])
# 用得到的预测结果填补原缺失数据
df.loc[(df.Age.isnull()), 'Age'] = predictedAges
return df, rfr
def set_Cabin_type(df):
df.loc[(df.Cabin.notnull()), 'Cabin'] = "Yes"
df.loc[(df.Cabin.isnull()), 'Cabin'] = "No"
return df
data_train, rfr = set_missing_ages(data_train)
data_train = set_Cabin_type(data_train)
age_df = data_train[['Age', 'Fare', 'Parch', 'SibSp', 'Pclass']]
known_age = age_df[age_df.Age.notnull()].values
unknown_age = age_df[age_df.Age.isnull()].values
# y即目标年龄
y = known_age[:, 0]
X = known_age[:, 1:]
# fit到RandomForestRegressor之中
rfr = RandomForestRegressor(random_state=0, n_estimators=2000, n_jobs=-1)
rfr.fit(X, y)
predictedAges = rfr.predict(unknown_age[:, 1::])
# 用得到的预测结果填补原缺失数据
df.loc[(df.Age.isnull()), 'Age'] = predictedAges
dummies_Cabin = pd.get_dummies(data_train['Cabin'], prefix='Cabin')
dummies_Embarked = pd.get_dummies(data_train['Embarked'], prefix='Embarked')
dummies_Sex = pd.get_dummies(data_train['Sex'], prefix='Sex')
dummies_Pclass = | pd.get_dummies(data_train['Pclass'], prefix='Pclass') | pandas.get_dummies |
import pandas as pd
#데이터프레임 만들기
df1 = pd.DataFrame({'a': ['a0','a1','a2','a3'],
'b':['b0','b1','b2','b3'],
'c':['c0','c1','c2','c3'] },
index= [0,1,2,3])
df2 = pd.DataFrame( {'a':['a2','a3','a4','a5'],
'b':['b2','b3','b4','b5'],
'c':['c2','c3','c4','c5']},
index=[2,3,4,5])
print(df1.head())
# print('\n')
# print(df2.head())
result1 = pd.concat([df1,df2])
# print(result1,'\n')
result2 = pd.concat([df1,df2], ignore_index= True)
# print(result2)
result3 = pd.concat([df1,df2],axis=1)
# print(result3,'\n')
result3_in = pd.concat([df1,df2],axis=1,join='inner')
# print(result3_in)
sr1 = pd.Series(['e0','e1','e2','e3'], name='e')
sr2 = pd.Series(['f0','f1','f2'],name='f',index=[3,4,5])
sr3 = pd.Series(['g0','g1','g2','g3'],name='g')
result4 = pd.concat([df1,sr1],axis=1)
result5 = | pd.concat([df2,sr2],axis=1,sort=True) | pandas.concat |
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from pandas.api.types import is_numeric_dtype
sns.set()
def _dtypes(df: pd.DataFrame) -> pd.DataFrame:
"""A private function that returns the data types of all the columns in a dataframe
Parameters
----------
df : pd.DataFrame
The dataframe to be analyzed
Returns
-------
pd.DataFrame
A dataframe of columns with their respective data types
"""
return | pd.DataFrame(df.dtypes) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from scipy import stats
from sklearn.linear_model import Ridge, RidgeCV
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn.metrics import mean_squared_error, make_scorer
# In[2]:
def calculate_pearson(df):
correlations = {}
numerical_features = df.select_dtypes(exclude = ["object"]).columns
numerical_features = numerical_features.drop("cod_municipio")
for i in numerical_features:
corr = stats.pearsonr(df[i], df['ideb'])[0]
correlations[i] = corr
df_corr = pd.DataFrame(list(correlations.items()), columns=['feature', 'correlation_with_ideb'])
df_corr = df_corr.dropna()
return df_corr
# In[3]:
def calculate_categorical_correlation(df):
categorical_features = df.select_dtypes(include = ["object"]).columns
return categorical_features
# # Puxa dados do CSV de cada integrante do grupo
# ### Dados Alexandre
# In[4]:
path = '../../data/'
# In[5]:
#Dados iniciais
alexandre_inicio_2015 = pd.read_csv(path + 'bases_ale/anos_iniciais/ideb_municipios_2015_ai.csv')
alexandre_inicio_2017 = pd.read_csv(path + 'bases_ale/anos_iniciais/ideb_municipios_2017_ai.csv')
# Dados finais
alexandre_final_2015 = pd.read_csv(path + 'bases_ale/anos_finais/ideb_municipios_2015_af.csv')
alexandre_final_2017 = pd.read_csv(path + 'bases_ale/anos_finais/ideb_municipios_2017_af.csv')
# ### Dados Lidia
# In[6]:
#Dados iniciais
lidia_inicio_2007 = pd.read_csv(path + 'bases_lidia/anos_iniciais/ideb_escola_2007_ai.csv')
lidia_inicio_2009 = pd.read_csv(path + 'bases_lidia/anos_iniciais/ideb_escola_2009_ai.csv')
lidia_inicio_2011 = pd.read_csv(path + 'bases_lidia/anos_iniciais/ideb_escola_2011_ai.csv')
lidia_inicio_2013 = pd.read_csv(path + 'bases_lidia/anos_iniciais/ideb_escola_2013_ai.csv')
lidia_inicio_2015 = | pd.read_csv(path + 'bases_lidia/anos_iniciais/ideb_escola_2015_ai.csv') | pandas.read_csv |
from data import CITIES, BUSINESSES, USERS, REVIEWS, TIPS, CHECKINS
import random
import pandas as pd
from pandas import Series, DataFrame
import numpy as np
from IPython.display import display
import csv
def recommend(user_id=None, business_id=None, city=None, n=10):
"""
Returns n recommendations as a list of dicts.
Optionally takes in a user_id, business_id and/or city.
A recommendation is a dictionary in the form of:
{
business_id:str
stars:str
name:str
city:str
adress:str
}
"""
if not city:
city = random.choice(CITIES)
return random.sample(BUSINESSES[city], n)
def get_rating(REVIEWS, user_id, business_id):
for city in CITIES:
for review in range(len(REVIEWS[city])):
reviewdict = REVIEWS[city][review]
if reviewdict['user_id'] == user_id and reviewdict['business_id'] == business_id:
rating = (REVIEWS[city][review]['stars'])
return rating
return np.nan
def pivot_ratings(REVIEWS, CITIES, USERS, BUSINESSES):
users = []
businesses = []
for city in CITIES:
for user in USERS[city]:
users.append(user['user_id'])
for business in BUSINESSES[city]:
businesses.append(business['business_id'])
pivot_data = pd.DataFrame(np.nan, columns=users, index=businesses, dtype=float)
for x in pivot_data:
for y in pivot_data.index:
pivot_data.loc[y][x] = get_rating(REVIEWS, x, y)
return pivot_data
def pivot_ratings_city(city, REVIEWS, CITIES, USERS, BUSINESSES):
users = []
businesses = []
for user in USERS[city]:
users.append(user['user_id'])
for business in BUSINESSES[city]:
businesses.append(business['business_id'])
pivot_data = pd.DataFrame(np.nan, columns=users, index=businesses, dtype=float)
for x in pivot_data:
for y in pivot_data.index:
pivot_data.loc[y][x] = get_rating(REVIEWS, x, y)
return pivot_data
def pivot_ratings_friends(user_id, REVIEWS, CITIES, USERS, BUSINESSES):
"""
Return matrix containing all ratings of friends on businesses they have been to
"""
users = find_friends(user_id, USERS)
users.append(user_id)
businesses = []
for friend in users:
friends_businesses = check_businesses(friend, REVIEWS)
for business in friends_businesses:
businesses.append(business)
businesses = list(set(businesses))
pivot_data = | pd.DataFrame(np.nan, columns=users, index=businesses, dtype=float) | pandas.DataFrame |
import os
import glob
import click
from .batch_manager import BatchManager, Job
from .config_json_parser import ClpipeConfigParser, GLMConfigParser
import logging
import sys
from .error_handler import exception_handler
import site
path1 = sys.path
path1.insert(0, site.USER_SITE)
sys.path = path1
import nibabel
sys.path = path1[1:]
import numpy
import nipype.interfaces.fsl as fsl # fsl
import nipype.pipeline.engine as pe # pypeline engine
from nipype.interfaces.utility import IdentityInterface
import nibabel as nib
import pandas
import re
import clpipe.postprocutils
import numpy as np
@click.command()
@click.argument('subjects', nargs=-1, required=False, default=None)
@click.option('-config_file', type=click.Path(exists=True, dir_okay=False, file_okay=True), default=None, required = True,
help='Use a given configuration file.')
@click.option('-glm_config_file', type=click.Path(exists=True, dir_okay=False, file_okay=True), default=None, required = True,
help='Use a given GLM configuration file.')
@click.option('-drop_tps', type=click.Path(exists=True, dir_okay=False, file_okay=True), default=None, required = False,
help='Drop timepoints csv sheet')
@click.option('-submit', is_flag=True, default=False, help='Flag to submit commands to the HPC.')
@click.option('-batch/-single', default=True,
help='Submit to batch, or run in current session. Mainly used internally.')
@click.option('-debug', is_flag=True, default=False,
help='Print detailed processing information and traceback for errors.')
def glm_setup(subjects = None, config_file=None, glm_config_file = None,
submit=False, batch=True, debug = None, drop_tps = None):
if not debug:
sys.excepthook = exception_handler
logging.basicConfig(level=logging.INFO)
else:
logging.basicConfig(level=logging.DEBUG)
config = ClpipeConfigParser()
config.config_updater(config_file)
glm_config = GLMConfigParser(glm_config_file)
task = glm_config.config['GLMSetupOptions']['TaskName']
if not subjects:
subjectstring = "ALL"
sublist = [o.replace('sub-', '') for o in os.listdir(glm_config.config['GLMSetupOptions']['TargetDirectory'])
if os.path.isdir(os.path.join(glm_config.config['GLMSetupOptions']['TargetDirectory'], o)) and 'sub-' in o]
else:
subjectstring = " , ".join(subjects)
sublist = subjects
submission_string = '''glm_setup -config_file={config} -glm_config_file={glm_config} -single {debug} {sub} '''
if debug:
debug_string = '-debug'
else:
debug_string = ''
if batch:
batch_manager = BatchManager(config.config['BatchConfig'], glm_config.config['GLMSetupOptions']['LogDirectory'])
batch_manager.update_mem_usage(glm_config.config['GLMSetupOptions']['MemoryUsage'])
batch_manager.update_time(glm_config.config['GLMSetupOptions']['TimeUsage'])
batch_manager.update_nthreads(glm_config.config['GLMSetupOptions']['NThreads'])
batch_manager.update_email(config.config["EmailAddress"])
for sub in sublist:
sub_string_temp = submission_string.format(
config=os.path.abspath(config_file),
glm_config=os.path.abspath(glm_config_file),
sub=sub,
debug = debug_string
)
batch_manager.addjob(Job("GLM_Setup" + sub, sub_string_temp))
if submit:
batch_manager.createsubmissionhead()
batch_manager.compilejobstrings()
batch_manager.submit_jobs()
else:
batch_manager.createsubmissionhead()
batch_manager.compilejobstrings()
click.echo(batch_manager.print_jobs())
else:
for sub in subjects:
logging.info('Running Subject ' + sub)
_glm_prep(glm_config, sub, task, drop_tps)
def _glm_prep(glm_config, subject, task, drop_tps):
fsl.FSLCommand.set_default_output_type('NIFTI_GZ')
search_string = os.path.abspath(
os.path.join(glm_config.config["GLMSetupOptions"]['TargetDirectory'], "sub-" + subject, "**",
"*" + glm_config.config["GLMSetupOptions"]['TargetSuffix']))
subject_files = glob.glob(search_string, recursive=True)
glm_setup = pe.Workflow(name='glm_setup')
glm_setup.base_dir = os.path.join(glm_config.config["GLMSetupOptions"]['WorkingDirectory'], "sub-"+subject)
input_node = pe.Node(IdentityInterface(fields=['in_file', 'out_file', 'mask_file']), name='input')
strip = pe.Node(fsl.BinaryMaths(operation = 'mul'), name="mask_apply")
resample = pe.Node(fsl.FLIRT(apply_xfm = True,
reference = glm_config.config["GLMSetupOptions"]["ReferenceImage"],
uses_qform = True),
name="resample")
glm_setup.connect(input_node, 'out_file', resample, 'out_file')
if drop_tps is not None:
drop_tps_data = pandas.read_csv(drop_tps)
drop = pe.Node(fsl.ExtractROI(), name = "drop_tps")
drop.inputs.t_min = 0
glm_setup.connect(input_node, "in_file", drop, "in_file")
if glm_config.config["GLMSetupOptions"]["ApplyFMRIPREPMask"]:
if drop_tps is None:
glm_setup.connect([(input_node, strip, [('in_file', 'in_file'),
('mask_file', 'operand_file')])])
else:
glm_setup.connect(drop, "roi_file", strip, "in_file")
glm_setup.connect(input_node, "mask_file", strip, "operand_file")
if glm_config.config["GLMSetupOptions"]["SUSANSmoothing"]:
sus = pe.Node(fsl.SUSAN(), name="susan_smoothing")
sus.inputs.brightness_threshold = glm_config.config["GLMSetupOptions"]['SUSANOptions']['BrightnessThreshold']
sus.inputs.fwhm = glm_config.config["GLMSetupOptions"]['SUSANOptions']['FWHM']
if glm_config.config["GLMSetupOptions"]["ApplyFMRIPREPMask"]:
glm_setup.connect(strip, 'out_file', sus, 'in_file')
elif drop_tps is not None:
glm_setup.connect(drop, 'roi_file', sus, 'in_file')
else:
glm_setup.connect(input_node, 'in_file', sus, 'in_file')
if glm_config.config["GLMSetupOptions"]["SUSANSmoothing"]:
glm_setup.connect(sus, 'smoothed_file', resample, 'in_file')
elif glm_config.config["GLMSetupOptions"]["ApplyFMRIPREPMask"]:
glm_setup.connect(strip, 'out_file', resample, 'in_file')
elif drop_tps is not None:
glm_setup.connect(drop, 'roi_file', resample, 'in_file')
else:
glm_setup.connect(input_node, 'in_file', resample, 'in_file')
if drop_tps is not None:
drop_tps_data = pandas.read_csv(drop_tps)
for image in subject_files:
if task is None or 'task-' + task + '_' in image:
logging.info('Processing ' + image)
confounds = None
try:
if glm_config.config["GLMSetupOptions"]['PrepareConfounds']:
confound_file = _find_confounds(glm_config, image)
if not os.path.exists(confound_file):
raise ValueError("Cannot find confound file: "+ confound_file)
confounds = pandas.read_table(confound_file, dtype="float", na_values="n/a")
if len(glm_config.config["GLMSetupOptions"]['Confounds']) > 0:
cons_re = [re.compile(regex_wildcard(co)) for co in glm_config.config["GLMSetupOptions"]['Confounds']]
target_cols = []
for reg in cons_re:
logging.debug(str([reg.match(col).group() for col in confounds.columns if reg.match(col) is not None]))
target_cols.extend([reg.match(col).group() for col in confounds.columns if reg.match(col) is not None])
logging.debug("Confound Columns " + str(target_cols))
confounds_mat = confounds[target_cols]
if len(glm_config.config["GLMSetupOptions"]['ConfoundsQuad']) > 0:
cons_re = [re.compile(regex_wildcard(co)) for co in glm_config.config["GLMSetupOptions"]['ConfoundsQuad']]
target_cols = []
for reg in cons_re:
target_cols.extend(
[reg.match(col).group() for col in confounds.columns if reg.match(col) is not None])
logging.debug("Quad Columns " + str(target_cols))
confounds_quad_mat = confounds[target_cols]
confounds_quad_mat.rename(columns =lambda x: x+"_quad", inplace = True)
confounds_quad_mat = confounds_quad_mat**2
confounds_mat = | pandas.concat([confounds_mat,confounds_quad_mat],axis=1, ignore_index=True) | pandas.concat |
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib as mplc
import matplotlib.pyplot as plt
from bokeh import mpl
# Generate the pandas dataframe
data = np.random.multivariate_normal([0, 0], [[1, 2], [2, 20]], size=100)
data = | pd.DataFrame(data, columns=["X", "Y"]) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.