prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import os
import json
import pprint
import numpy as np
import pandas as pd
import tensorflow as tf
from commonmodels2.models.model import ModelBase
from commonmodels2.log.logger import Logger
class DataContainer():
def __init__(self):
self._keystore = {}
def __str__(self):
return pprint.pformat(self._keystore, indent=2)
def get_item(self, key):
if key in self.get_keys():
return self._keystore[key]
else:
raise KeyError("provided key '{}' not present in {}".format(key, type(self).__name__))
def set_item(self, key, obj):
if type(key) != str:
raise ValueError("provided key must be string type")
self._keystore[key] = obj
def get_keys(self):
return self._keystore.keys()
def save(self, out_dir_path):
Logger.getInst().info("Saving data container. This may take a while...")
str_dict = self._make_json_compatible(self._keystore, out_dir_path)
out_json_str = json.dumps(str_dict, sort_keys=True, indent=3, separators=(',', ': '))
with open(os.path.join(out_dir_path, 'data_info.json'), 'w') as outfile:
outfile.write(out_json_str)
# Helper method for _make_json_compatible(). This function allows values
# stored in a dict object associated with certain keys to be processed differently.
# For example, all lists will be stored as json-compatible lists inside the output
# json object after calling _make_json_compatible(), but this function allows certain
# lists to be stored differently
@classmethod
def _special_dict_key_handler(cls, key, value, out_file_path):
replace_val = value
if key == 'predictions':
if not os.path.isdir(os.path.dirname(out_file_path)):
os.makedirs(os.path.dirname(out_file_path))
out_file_path += '.csv'
out_preds = np.array(value)
num_cols = 1 if out_preds.ndim == 1 else out_preds.shape[1]
pred_cols = ['Predictions']
if num_cols > 1:
pred_cols = ['Prediction_%d'%(i) for i in range(num_cols)]
preds_df = | pd.DataFrame(data=out_preds, columns=pred_cols) | pandas.DataFrame |
"""
concavity_automator comports multiple scripts automating concavity constraining method for landscape
"""
import lsdtopytools as lsd
import numpy as np
import numba as nb
import pandas as pd
from matplotlib import pyplot as plt
import sys
import matplotlib
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
import math
from lsdtopytools.numba_tools import travelling_salesman_algortihm, remove_outliers_in_drainage_divide
import random
import matplotlib.gridspec as gridspec
from multiprocessing import Pool, current_process
from scipy import spatial,stats
import numba as nb
import copy
from pathlib import Path
import pylab as pl
from scipy.signal import savgol_filter
from scipy.interpolate import interp1d
def norm_by_row(A):
"""
Subfunction used to vectorised normalisation of disorder by max of row using apply_along_axis function
B.G
"""
return A/A.max()
def norm_by_row_by_range(A):
"""
Subfunction used to vectorised normalisation of disorder by range of concavity using apply_along_axis function
B.G
"""
return (A - A.min())/(A.max() - A.min())
def numfmt(x, pos):
"""
Plotting subfunction to automate tick formatting from metres to kilometres
B.G
"""
s = '{:d}'.format(int(round(x / 1000.0)))
return s
def get_best_bit_and_err_from_Dstar(thetas, medD, fstD, thdD):
"""
Takes ouput from concavity calculation to calculate the best-fit theta and its error
"""
# Calculating the index of minimum medium disorder to get the best-fit
index_of_BF = np.argmin(medD)
# Getting the Dstar value of the best-fit
dstar_val = medD[index_of_BF]
# Getting the acutal best-fit
BF = thetas[index_of_BF]
# Preformatting 2 arrays for calculating the error: I am just interested by the first half for the first error and the second for the second
A = np.copy(fstD)
A[index_of_BF+1:] = 9999
B = np.copy(fstD)
B[:index_of_BF] = 9999
# calculating the error by extracting the closest theta with a Dstar close to the median best fit ones
err = ( thetas[np.abs(A - dstar_val).argmin()] , thetas[np.abs(B - dstar_val).argmin()] )
# REturning a tuple with [0] being the best fit and [1] another tuple f error
return BF,err
def process_basin(ls, **kwargs):
"""
Main function processing the concavity. It looks a bit convoluted but it is required for clean multiprocessing.
Takes at least one argument: ls, which is a list of arguments
ls[0] -> the number of the basin (heavily used by automatic multiprocessing)
ls[1] -> the X coordinate of the basin outlet
ls[2] -> the Y coordinate of the basin outlet
ls[3] -> area_threshold used for the analysis
ls[4] -> prefix befor the number of the basin to read the file input
Also takes option kwargs argument:
ignore_numbering: jsut use the prefix as name for the DEM
extension: if your extension is not .tif, you can give it here WITHOUT THE DOT
overwrite_dem_name: used if you want to use thefunction from outside the automations: you need to provide the dem name WITH THE EXTENSION
"""
number = ls[0]
X = ls[1]
Y = ls[2]
area_threshold = ls[3]
prefix = ls[4]
print("Processing basin ", number, " with proc ", current_process())
if("ignore_numbering" not in kwargs):
kwargs["ignore_numbering"] = False
if("extension" not in kwargs):
kwargs["extension"] = "tif"
if("n_tribs_by_combo" not in kwargs):
kwargs["n_tribs_by_combo"] = 4
if(kwargs["ignore_numbering"] == True):
name = prefix
else:
name = prefix + "%s"%(number)
if(kwargs["precipitation_raster"] == ""):
precipitation = False
else:
precipitation = True
# I spent a significant amount of time preprocessing it, see SM
n_rivers = 0
dem_name ="%s.%s"%(name,kwargs["extension"])
if("overwrite_dem_name" in kwargs):
dem_name = kwargs["overwrite_dem_name"]
MD = lsd.LSDDEM(file_name = dem_name, already_preprocessed = True)
# Extracting basins
if(precipitation):
MD.CommonFlowRoutines( ingest_precipitation_raster = kwargs["precipitation_raster"], precipitation_raster_multiplier = 1, discharge = True)
else:
MD.CommonFlowRoutines()
MD.ExtractRiverNetwork( method = "area_threshold", area_threshold_min = area_threshold)
print("River extracted")
MD.DefineCatchment( method="from_XY", X_coords = [X], Y_coords = [Y], coord_search_radius_nodes = 10 )#, X_coords = [X_coordinates_outlets[7]], Y_coords = [Y_coordinates_outlets[7]])
print("CAtchment defined")
MD.GenerateChi(theta = 0.4, A_0 = 1)
print("River_network_generated")
n_rivers = MD.df_base_river.source_key.unique().shape[0]
print("You have", n_rivers, "rivers and",MD.df_base_river.shape[0],"river pixels")
MD.df_base_river.to_feather("%s_rivers.feather"%(name))
print("Starting the movern calculation")
MD.cppdem.calculate_movern_disorder(0.05, 0.025, 38, 1, area_threshold, kwargs["n_tribs_by_combo"])
print("DONE with movern, let's format the output")
OVR_dis = MD.cppdem.get_disorder_dict()[0]
OVR_tested = MD.cppdem.get_disorder_vec_of_tested_movern()
pd.DataFrame({"overall_disorder":OVR_dis, "tested_movern":OVR_tested }).to_feather("%s_overall_test.feather"%(name))
normalizer = MD.cppdem.get_n_pixels_by_combinations()[0]
np.save("%s_disorder_normaliser.npy"%(name), normalizer)
all_disorder = MD.cppdem.get_best_fits_movern_per_BK()
np.save("%s_concavity_tot.npy"%(name), all_disorder[0])
print("Getting results")
results = np.array(MD.cppdem.get_all_disorder_values()[0])
np.save("%s_disorder_tot.npy"%(name), results)
XY = MD.cppdem.query_xy_for_each_basin()["0"]
tdf = pd.DataFrame(XY)
tdf.to_feather("%s_XY.feather"%(name))
return 0
def theta_quick_constrain_single_basin(MD,X_coordinate_outlet = 0, Y_coordinate_outlet = 0, area_threshold = 1500):
"""
Main function processing the concavity. It looks a bit convoluted but it is required for clean multiprocessing.
Takes at least one argument: ls, which is a list of arguments
ls[0] -> the number of the basin (heavily used by automatic multiprocessing)
ls[1] -> the X coordinate of the basin outlet
ls[2] -> the Y coordinate of the basin outlet
ls[3] -> area_threshold used for the analysis
ls[4] -> prefix befor the number of the basin to read the file input
Also takes option kwargs argument:
ignore_numbering: jsut use the prefix as name for the DEM
extension: if your extension is not .tif, you can give it here WITHOUT THE DOT
overwrite_dem_name: used if you want to use thefunction from outside the automations: you need to provide the dem name WITH THE EXTENSION
"""
# number = ls[0]
# X = ls[1]
# Y = ls[2]
# area_threshold = ls[3]
# prefix = ls[4]
# print("Processing basin ", number, " with proc ", current_process())
# if("ignore_numbering" not in kwargs):
# kwargs["ignore_numbering"] = False
# if("extension" not in kwargs):
# kwargs["extension"] = "tif"
# if("n_tribs_by_combo" not in kwargs):
# kwargs["n_tribs_by_combo"] = 4
# if(kwargs["ignore_numbering"] == True):
# name = prefix
# else:
# name = prefix + "%s"%(number)
# if(kwargs["precipitation_raster"] == ""):
# precipitation = False
# else:
# precipitation = True
# I spent a significant amount of time preprocessing it, see SM
n_rivers = 0
# dem_name ="%s.%s"%(name,kwargs["extension"])
# if("overwrite_dem_name" in kwargs):
# dem_name = kwargs["overwrite_dem_name"]
# MD = lsd.LSDDEM(file_name = dem_name, already_preprocessed = True)
# # Extracting basins
# if(precipitation):
# MD.CommonFlowRoutines( ingest_precipitation_raster = kwargs["precipitation_raster"], precipitation_raster_multiplier = 1, discharge = True)
# else:
# MD.CommonFlowRoutines()
# print("Experimental function (Gailleton et al., submitted), if it crashes restart from a clean LSDDEM object with only the flow routines processed.")
MD.ExtractRiverNetwork( method = "area_threshold", area_threshold_min = area_threshold)
# print("River pre-extracted")
MD.DefineCatchment( method="from_XY", X_coords = X_coordinate_outlet, Y_coords = Y_coordinate_outlet, coord_search_radius_nodes = 10 )#, X_coords = [X_coordinates_outlets[7]], Y_coords = [Y_coordinates_outlets[7]])
# print("CAtchment defined")
MD.GenerateChi(theta = 0.4, A_0 = 1)
# print("River_network_generated")
n_rivers = MD.df_base_river.source_key.unique().shape[0]
print("DEBUG::You have", n_rivers, "rivers and",MD.df_base_river.shape[0],"river pixels \n")
# MD.df_base_river.to_feather("%s_rivers.feather"%(name))
# print("Starting the movern calculation")
MD.cppdem.calculate_movern_disorder(0.05, 0.025, 38, 1, area_threshold, 4)
# print("DONE with movern, let's format the output")
OVR_dis = MD.cppdem.get_disorder_dict()[0]
OVR_tested = MD.cppdem.get_disorder_vec_of_tested_movern()
# pd.DataFrame({"overall_disorder":OVR_dis, "tested_movern":OVR_tested }).to_feather("%s_overall_test.feather"%(name))
normalizer = MD.cppdem.get_n_pixels_by_combinations()[0]
# np.save("%s_disorder_normaliser.npy"%(name), normalizer)
all_disorder = MD.cppdem.get_best_fits_movern_per_BK()
# np.save("%s_concavity_tot.npy"%(name), all_disorder[0])
# print("Getting results")
results = np.array(MD.cppdem.get_all_disorder_values()[0])
# np.save("%s_disorder_tot.npy"%(name), results)
# XY = MD.cppdem.query_xy_for_each_basin()["0"]
# tdf = pd.DataFrame(XY)
# tdf.to_feather("%s_XY.feather"%(name))
# print("\n\n")
try:
from IPython.display import display, Markdown, Latex
todusplay = r"""
**Thanks for constraning** $\theta$ with the disorder algorithm from _Mudd et al., 2018_ and _Gailleton et al, submitted_.
Keep in mind that it is not straightforward and that the "best fit" we suggest is most of the time the "least worst" value maximising the collinearity in $\chi$ space.
Especially in large, complex basin, several $\theta$ actually fit different areas and the best fit is just a try to make everyone happy where it is not necessarily possible.
$\theta$ constraining results:
median $\theta$ | $1^{st}$ Q | $3^{rd}$ Q
--- | --- | ---
%s | %s | %s
"""%(round(np.nanmedian(all_disorder[0]),3), round(np.nanpercentile(all_disorder[0],25),3), round(np.nanpercentile(all_disorder[0],75),3))
display(Markdown(todusplay))
except:
pass
return all_disorder
def get_median_first_quartile_Dstar(ls):
"""
Function which post-process results from one analysis to return the median and first quartile curve of all best-fits
param:
ls: full prefix (= including basin number if needed)
B.G
"""
print("Normalising D* for ", ls)
name_to_load = ls
# loading the file containng ALL the data
all_data = np.load(name_to_load + "_disorder_tot.npy")
if(all_data.shape[0]>1):
# normalise by max each row
all_data = np.apply_along_axis(norm_by_row,1,all_data)
# Median by column
ALLDmed = np.apply_along_axis(np.median,0,all_data)
# Percentile by column
ALLDfstQ = np.apply_along_axis(lambda z: np.percentile(z,25),0,all_data)
else:
return name_to_load
return ALLDmed, ALLDfstQ, ls
def get_median_first_quartile_Dstar_r(ls):
"""
Function which post-process results from one analysis to return the median and first quartile curve of all best-fits
param:
ls: full prefix (= including basin number if needed)
B.G
"""
print("Normalising D*_r for ", ls)
name_to_load = ls
# loading the file containng ALL the data
all_data = np.load(name_to_load + "_disorder_tot.npy")
if(all_data.shape[0]>1):
# normalise by max each row
all_data = np.apply_along_axis(norm_by_row_by_range,1,all_data)
# Median by column
ALLDmed = np.apply_along_axis(np.median,0,all_data)
# Percentile by column
ALLDfstQ = np.apply_along_axis(lambda z: np.percentile(z,25),0,all_data)
else:
return name_to_load
return ALLDmed, ALLDfstQ, ls
def plot_single_theta(ls, **kwargs):
"""
For a multiple analysis on the same DEM this plot the global with each basins colored by D^*
Need post-processing function to pre-analyse the ouputs.
The layout of this function might seems a bit convoluted, but that's making multiprocessing easy, as they take time to plot
param
"""
this_theta = ls[0]
prefix = ls[1]
# Loading the small summary df
df = pd.read_csv(prefix +"summary_results.csv")
# Loading the HillShade
HS = lsd.raster_loader.load_raster(prefix + "HS.tif")
# Formatting ticks
import matplotlib.ticker as tkr # has classes for tick-locating and -formatting
yfmt = tkr.FuncFormatter(numfmt)
xfmt = tkr.FuncFormatter(numfmt)
print("plotting D* for theta", this_theta)
# Getting the Figure and the ticks right
fig,ax = plt.subplots()
ax.yaxis.set_major_formatter(yfmt)
ax.xaxis.set_major_formatter(xfmt)
# Normalising the Hillshade and taking care of the no data
HS["array"] = HS["array"]/HS["array"].max()
HS["array"][HS["array"]<0] = np.nan
# Plotting the hillshade
ax.imshow(HS["array"], extent = HS["extent"], cmap = "gray", vmin= 0.2, vmax = 0.8)
# Building the array of concavity
A = np.zeros(HS["array"].shape)
A[:,:] = np.nan
# For each raster, I am reading rows and col corresponding to the main raster and potting it with the requested value
for name in df["raster_name"]:
row = np.load(name + "_row.npy")
col = np.load(name + "_col.npy")
val = df["D*_%s"%this_theta][df["raster_name"] == name].values[0] # A wee convoluted but it work and it is fast so...
A[row,col] = val
# PLOTTING THE D*
ax.imshow(A, extent = HS["extent"], cmap= "gnuplot2", zorder = 2, alpha = 0.75, vmin = 0.1, vmax = 0.9)
# You may want to change the extents of the plot
if("xlim" in kwargs):
ax.set_xlim(kwargs["xlim"])
if("ylim" in kwargs):
ax.set_ylim(kwargs["ylim"])
ax.set_xlabel("Easting (km)")
ax.set_ylabel("Northing (km)")
# Saving the figure
plt.tight_layout()#
plt.savefig(prefix + "MAP_disorder_%s.png"%(this_theta), dpi = 500)
plt.close(fig)
print("plotting D*_r for theta", this_theta)
# Getting the Figure and the ticks right
fig,ax = plt.subplots()
ax.yaxis.set_major_formatter(yfmt)
ax.xaxis.set_major_formatter(xfmt)
# Plotting the hillshade
ax.imshow(HS["array"], extent = HS["extent"], cmap = "gray", vmin= 0.2, vmax = 0.8)
# Building the array of concavity
A = np.zeros(HS["array"].shape)
A[:,:] = np.nan
# For each raster, I am reading rows and col corresponding to the main raster and potting it with the requested value
for name in df["raster_name"]:
row = np.load(name + "_row.npy")
col = np.load(name + "_col.npy")
val = df["D*_r_%s"%this_theta][df["raster_name"] == name].values[0] # A wee convoluted but it work and it is fast so...
A[row,col] = val
# PLOTTING THE D*
ax.imshow(A, extent = HS["extent"], cmap= "gnuplot2", zorder = 2, alpha = 0.75, vmin = 0.1, vmax = 0.9)
# You may want to change the extents of the plot
if("xlim" in kwargs):
ax.set_xlim(kwargs["xlim"])
if("ylim" in kwargs):
ax.set_ylim(kwargs["ylim"])
ax.set_xlabel("Easting (km)")
ax.set_ylabel("Northing (km)")
# Saving the figure
plt.tight_layout()#
plt.savefig(prefix + "MAP_disorder_by_range_%s.png"%(this_theta), dpi = 500)
plt.close(fig)
def plot_min_D_star_map(ls, **kwargs):
"""
For a multiple analysis on the same DEM this plot the global with each basins colored by D^*
Need post-processing function to pre-analyse the ouputs.
The layout of this function might seems a bit convoluted, but that's making multiprocessing easy, as they take time to plot
param
"""
this_theta = ls[0]
prefix = ls[1]
# Loading the small summary df
df = pd.read_csv(prefix +"summary_results.csv")
# Loading the HillShade
HS = lsd.raster_loader.load_raster(prefix + "HS.tif")
# Formatting ticks
import matplotlib.ticker as tkr # has classes for tick-locating and -formatting
yfmt = tkr.FuncFormatter(numfmt)
xfmt = tkr.FuncFormatter(numfmt)
print("plotting D* for theta", this_theta)
# Getting the Figure and the ticks right
fig,ax = plt.subplots()
ax.yaxis.set_major_formatter(yfmt)
ax.xaxis.set_major_formatter(xfmt)
# Normalising the Hillshade and taking care of the no data
HS["array"] = HS["array"]/HS["array"].max()
HS["array"][HS["array"]<0] = np.nan
# Plotting the hillshade
ax.imshow(HS["array"], extent = HS["extent"], cmap = "gray", vmin= 0.2, vmax = 0.8)
# Building the array of concavity
A = np.zeros(HS["array"].shape)
A[:,:] = np.nan
df_theta = | pd.read_csv(prefix + "all_raster_names.csv") | pandas.read_csv |
import numpy as np
import pandas as pd
import glob
from pmdarima.arima import ndiffs
from pandas.tseries.offsets import QuarterBegin, QuarterEnd
from .hand_select import hand_select
import pandas_datareader.data as web
import xlrd, csv
from openpyxl.workbook import Workbook
from openpyxl.reader.excel import load_workbook, InvalidFileException
def set_date_as_index(df):
df.columns = [name.lower() for name in df.columns]
df["date"] = pd.to_datetime(df["date"])
df.set_index("date", inplace=True)
return df
def make_float(df):
df = df.replace(".", np.nan)
df = df.astype(float)
return df
def read_files(paths, fillna=True):
csv_list = []
xls_list = []
for path in paths:
csv_files = glob.glob(path + "/*.csv")
xls_files = glob.glob(path + "/*.xls")
for elt in csv_files:
df = pd.read_csv(elt)
df = set_date_as_index(df)
df = make_float(df)
if fillna:
df = df.fillna(method='ffill')
csv_list.append(df)
for elt in xls_files:
try:
df = pd.read_excel(elt)
df = set_date_as_index(df)
df = make_float(df)
if fillna:
df = df.fillna(method='ffill')
xls_files.append(df)
except Exception:
pass
return csv_list, xls_list
def make_stationary(df):
df = hand_select(df)
df = df.dropna()
columns = df.columns
for name in columns:
x = df[name].values
d_kpss = ndiffs(x, test='kpss')
d_adf = ndiffs(x, test='adf')
d_pp = ndiffs(x, test='pp')
d_ = max(d_kpss, d_adf, d_pp)
if d_ > 0:
new_name = name + '_diff' + str(d_)
if d_ == 1:
df[new_name] = df[name].diff()
elif d_ == 2:
df[new_name] = df[name].diff().diff()
elif d_ > 2:
raise ValueError('High order differentiation')
else:
raise Exception('Some thing is wrong')
df = df.drop(columns=[name])
return df
def open_xls_as_xlsx(filename):
# first open using xlrd
book = xlrd.open_workbook(filename)
index = 0
nrows, ncols = 0, 0
while nrows * ncols == 0:
sheet = book.sheet_by_index(index)
nrows = sheet.nrows
ncols = sheet.ncols
index += 1
# prepare a xlsx sheet
book1 = Workbook()
sheet1 = book1.active
for row in range(1, nrows):
for col in range(1, ncols):
sheet1.cell(row=row, column=col).value = sheet.cell_value(row, col)
return book1
def read_data(path, sheet=False, header='infer'):
file_format = path.split('.')[-1]
if 'msci' in path:
header = 6
if sheet is False:
# if file_format == 'csv':
# df = pd.read_csv(path, header=header)
# elif file_format == 'xls':
# df = open_xls_as_xlsx(path)
# else:
try:
df = pd.read_excel(path, header=header, engine='openpyxl')
except Exception:
try:
df = open_xls_as_xlsx(path)
except Exception as e:
try:
df = pd.read_csv(path, header=header)
except Exception as e:
raise Exception(e)
else:
try:
# excel_file = pd.ExcelFile(path)
# assert sheet in excel_file.sheet_names
# df = excel_file.parse(sheet, header=header)
df = pd.read_excel(path, header=header, engine='openpyxl', sheet_name=sheet)
except Exception:
raise Exception("Can not read sheet")
df.columns = [name.lower() for name in df.columns]
if 'year2' in df.columns:
drop_columns = ['year2']
else:
drop_columns = []
for elt in df.columns:
if 'unnamed' in elt:
drop_columns.append(elt)
df.drop(columns=drop_columns, inplace=True)
first_valid = df.iloc[:, 1].first_valid_index()
last_valid = df.iloc[:, 1].last_valid_index() + 1
df = df.iloc[first_valid:last_valid]
df.columns = df.columns.str.replace('.', '_')
df.columns = df.columns.str.replace(' ', '_')
df.columns = df.columns.str.replace('__', '_')
return df
def make_monthly_date(df, offset=True):
datetime = pd.to_datetime(
(
df['year'].astype(int) * 100
+ df['month'].astype(int)
).astype(str),
format='%Y%m'
)
if offset:
datetime += pd.tseries.offsets.MonthBegin(1)
else:
datetime = datetime
df['date'] = datetime
df.drop(columns=['year', 'month'], inplace=True)
df.set_index('date', inplace=True)
df.columns = [elt + '_monthly' for elt in df.columns]
return df
def make_quarterly_date(df, offset=True):
df['year'] = df['year'].str.lower()
df['year'] = df['year'].str.replace(r'(q\d)-(\d+)', r'\2-\1')
if offset:
# Bug that quarterbegin is March 01
df['date'] = pd.to_datetime(df['year'])\
+ pd.tseries.offsets.DateOffset(days=1)\
+ pd.tseries.offsets.QuarterBegin(1, startingMonth=1)
else:
df['date'] = pd.to_datetime(df['year'])
df.drop(columns=['year'], inplace=True)
df.set_index('date', inplace=True)
# Manually shift because of QuarterBegin bug
df.columns = [elt + '_quarterly' for elt in df.columns]
df = df.dropna()
return df
def make_daily_date(df):
datetime = pd.to_datetime(
(
df['year'].astype(int) * 10000
+ df['month'].astype(int) * 100
+ df['day'].astype(int)
).astype(str),
format='%Y%m%d'
)
df['date'] = datetime
df.drop(columns=['year', 'month', 'day'], inplace=True)
df.set_index('date', inplace=True)
df.columns = [elt + '_daily' for elt in df.columns]
return df
# If date of low frequency data is specified, assume It is announced
# before the start of the market
# If not specified, assume it is announced after the market is closed
def daily_data(df, freq, offset=True, fill_method='ffill'):
drop_columns = []
for elt in df.columns:
if 'unnamed' in elt:
drop_columns.append(elt)
df.drop(columns=drop_columns, inplace=True)
if freq.lower() == 'monthly':
try:
df = make_monthly_date(df, offset=offset)
except Exception:
print("set monthly date as index")
datetime = | pd.to_datetime(df['date']) | pandas.to_datetime |
#!/usr/bin/env python
# coding: utf-8
# # Predicting Student Admissions with Neural Networks in Keras
# In this notebook, we predict student admissions to graduate school at UCLA based on three pieces of data:
# - GRE Scores (Test)
# - GPA Scores (Grades)
# - Class rank (1-4)
#
# The dataset originally came from here: http://www.ats.ucla.edu/
#
# ## Loading the data
# To load the data and format it nicely, we will use two very useful packages called Pandas and Numpy. You can read on the documentation here:
# - https://pandas.pydata.org/pandas-docs/stable/
# - https://docs.scipy.org/
# In[11]:
# Importing pandas and numpy
import pandas as pd
import numpy as np
# Reading the csv file into a pandas DataFrame
data = | pd.read_csv('student_data.csv') | pandas.read_csv |
# -*- encoding: utf-8 -*-
# @Time : 2020/12/17
# @Author : <NAME>
# @email : <EMAIL>
# UPDATE
# @Time : 2020/12/17
# @Author : <NAME>
# @email : <EMAIL>
import json
import math
import re
import shutil
from collections import OrderedDict
from typing import Union, Tuple
import torch
from .metrics import Metric
def _line_width():
try:
# if we're in an interactive ipython notebook, hardcode a longer width
__IPYTHON__
return 128
except NameError:
return shutil.get_terminal_size((88, 24)).columns
def float_formatter(f: Union[float, int]) -> str:
"""
Format a float as a pretty string.
"""
if f != f:
# instead of returning nan, return "" so it shows blank in table
return ""
if isinstance(f, int):
# don't do any rounding of integers, leave them alone
return str(f)
if f >= 1000:
# numbers > 1000 just round to the nearest integer
s = f'{f:.0f}'
else:
# otherwise show 4 significant figures, regardless of decimal spot
s = f'{f:.4g}'
# replace leading 0's with blanks for easier reading
# example: -0.32 to -.32
s = s.replace('-0.', '-.')
if s.startswith('0.'):
s = s[1:]
# Add the trailing 0's to always show 4 digits
# example: .32 to .3200
if s[0] == '.' and len(s) < 5:
s += '0' * (5 - len(s))
return s
def round_sigfigs(x: Union[float, 'torch.Tensor'], sigfigs=4) -> float:
"""
Round value to specified significant figures.
:param x: input number
:param sigfigs: number of significant figures to return
:returns: float number rounded to specified sigfigs
"""
x_: float
if isinstance(x, torch.Tensor):
x_ = x.item()
else:
x_ = x # type: ignore
try:
if x_ == 0:
return 0
return round(x_, -math.floor(math.log10(abs(x_)) - sigfigs + 1))
except (ValueError, OverflowError) as ex:
if x_ in [float('inf'), float('-inf')] or x_ != x_: # inf or nan
return x_
else:
raise ex
def _report_sort_key(report_key: str) -> Tuple[str, str]:
"""
Sorting name for reports.
Sorts by main metric alphabetically, then by task.
"""
# if metric is on its own, like "f1", we will return ('', 'f1')
# if metric is from multitask, we denote it.
# e.g. "convai2/f1" -> ('convai2', 'f1')
# we handle multiple cases of / because sometimes teacher IDs have
# filenames.
fields = report_key.split("/")
main_key = fields.pop(-1)
sub_key = '/'.join(fields)
return (sub_key or 'all', main_key)
def nice_report(report) -> str:
"""
Render an agent Report as a beautiful string.
If pandas is installed, we will use it to render as a table. Multitask
metrics will be shown per row, e.g.
.. code-block:
f1 ppl
all .410 27.0
task1 .400 32.0
task2 .420 22.0
If pandas is not available, we will use a dict with like-metrics placed
next to each other.
"""
if not report:
return ""
try:
import pandas as pd
use_pandas = True
except ImportError:
use_pandas = False
sorted_keys = sorted(report.keys(), key=_report_sort_key)
output: OrderedDict[Union[str, Tuple[str, str]], float] = OrderedDict()
for k in sorted_keys:
v = report[k]
if isinstance(v, Metric):
v = v.value()
if use_pandas:
output[_report_sort_key(k)] = v
else:
output[k] = v
if use_pandas:
line_width = _line_width()
df = pd.DataFrame([output])
df.columns = | pd.MultiIndex.from_tuples(df.columns) | pandas.MultiIndex.from_tuples |
import pandas as pd # Пакет для работы с таблицами
import numpy as np # Пакет для работы с векторами и матрицами
# Из библиотеки для работы с текстами вытащим
# методы для предобработки и модели
from gensim import corpora, models
from gensim.models.callbacks import PerplexityMetric
# Пара дополнительных комбинаторных штук для картинок
from collections import defaultdict
import itertools
# Пакет, чтобы делать глубокие копии
import copy
# Пакет для работы со специфичными для питона данными
import pickle
# Косинусная метрика для рассчёта расстояний
from scipy.spatial.distance import cosine
import json
import pickle
path = '/Users/dmitrys/Yandex.Disk.localized/top_russian_music/comments/vk/'
print("Reading data")
with open(path + "vk_comments_full_1", 'rb') as f:
comments = pickle.load(f)
with open(path + "vk_comments_full_2", 'rb') as f:
comments.extend(pickle.load(f))
data = | pd.DataFrame(comments) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import sys
import os
import argparse
import toml
import librosa
import pandas as pd
import numpy as np
from tqdm import tqdm
from joblib import Parallel, delayed
sys.path.append(os.getcwd())
from audio.metrics import SI_SDR, STOI, WB_PESQ, NB_PESQ, REGISTERED_METRICS
def calculate_metric(noisy_file, clean_file, sr=16000, metric_type="STOI", pre_load=False):
# get noisy, clean
if pre_load == False:
noisy, _ = librosa.load(noisy_file, sr=sr)
clean, _ = librosa.load(clean_file, sr=sr)
else:
noisy = noisy_file
clean = clean_file
assert len(noisy) == len(clean)
# get metric score
if metric_type in ["SI_SDR"]:
return SI_SDR(noisy, clean)
elif metric_type in ["STOI"]:
return STOI(noisy, clean, sr=sr)
elif metric_type in ["WB_PESQ"]:
return WB_PESQ(noisy, clean)
elif metric_type in ["NB_PESQ"]:
return NB_PESQ(noisy, clean)
def compute_metric(noisy_files, clean_files, metrics, n_folds=1, n_jobs=8, pre_load=False):
for metric_type, _ in metrics.items():
assert metric_type in REGISTERED_METRICS
split_num = len(noisy_files) // n_folds
score = []
for n in range(n_folds):
metric_score = Parallel(n_jobs=n_jobs)(
delayed(calculate_metric)(
noisy_file,
clean_file,
sr=8000 if metric_type in ["NB_PESQ"] else 16000,
metric_type=metric_type,
pre_load=pre_load,
)
for noisy_file, clean_file in tqdm(
zip(
noisy_files[n * split_num : (n + 1) * split_num],
clean_files[n * split_num : (n + 1) * split_num],
)
)
)
score.append(np.mean(metric_score))
metrics[metric_type] = np.mean(score)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="compute_metrics")
parser.add_argument("-c", "--config", required=True, type=str, help="Config (*.toml).")
args = parser.parse_args()
# get dataset path
dataset_path = os.path.join(os.getcwd(), "dataset_csv")
# get set path
train_path = os.path.join(dataset_path, "train.csv")
valid_path = os.path.join(dataset_path, "valid.csv")
test_path = os.path.join(dataset_path, "test.csv")
# get train files
train_files = | pd.read_csv(train_path) | pandas.read_csv |
import pandas as pd
import numpy as np
from statsmodels.formula.api import ols
import plotly_express
import plotly.graph_objs as go
from plotly.subplots import make_subplots
# Read in data
batter_data = pd.read_csv("~/Desktop/MLB_FA/Data/fg_bat_data.csv")
del batter_data['Age']
print(len(batter_data))
print(batter_data.head())
pitcher_data = pd.read_csv("~/Desktop/MLB_FA/Data/fg_pitch_data.csv")
del pitcher_data['Age']
print(len(pitcher_data))
print(pitcher_data.head())
salary_data = pd.read_csv("~/Desktop/MLB_FA/Data/salary_data.csv")
print(len(salary_data))
injury_data = pd.read_csv("~/Desktop/MLB_FA/Data/injury_data_use.csv")
# Check for whether there is overlap between injury data and the salary data players
# injury_data_players = injury_data['Player'].unique()
# mutual = salary_data[salary_data['Player'].isin(injury_data_players)] # 945 out of 1135 players included
# excl = salary_data[~salary_data['Player'].isin(injury_data_players)]
# print(len(excl['Player'].unique())) # 129 unique players injury data omitted; use mlb.com trans for these
# Define inflation
def npv(df, rate):
r = rate
df['Salary'] = pd.to_numeric(df['Salary'])
df['AAV'] = salary_data['Salary'] / df['Years']
df['NPV'] = 0
df['NPV'] = round(df['AAV'] * (1 - (1 / ((1 + r) ** df['Years']))) / r, 2)
return df
salary_data = npv(salary_data, 0.05)
# Lagged metrics to see if there is carryover value / value in continuity
class Metrics:
def lagged_batter(df):
df['WAR'] = | pd.to_numeric(df['WAR']) | pandas.to_numeric |
import os
import sys
import time
import subprocess
import webbrowser
from collections import defaultdict
import pandas as pd
import numpy as np
from numpy import floor, ceil
path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(path)
pd.set_option('display.float_format', lambda x: '%.3f' % x)
from validate_ez import *
from validate_full import *
from validate_pf import *
import data
"""
<Class>
<Methods> <description>
<related methods>
Utilities
generate_token builds a "token" dictionary to pass between the loops with all values necessary for the current EIN
update_df_row extracts an updated row from a token to write back to the dataframe
get_components handles cross-referencing equation components (denoted by the * in the display)
load_data loads the dataframe at the start of the program
load_saved loads previously saved entries and merges them
sort_data sorts data by the specified criteria on program start
build_url builds Foundation Center URLs from the current EINs data
pdf_opener opens Foundation Center PDFs in default browser
get_form_crosswalk connects column names to IRS form 990 lines for user reference
process_token_storage handles the restoring of the EIN placed on hold in order to go back one
Display
display main display loop
display_static_header program info at top between the *** lines
display_form_header header with info on the current form, including eins completed/remaining - also has the validation reason from the current EIN
display_ein_header header with info on the current row, rev/ass/exp both current and prior, plus tax period
display_failed_equations manages the failed equation display between the --- lines
failure_entries used for updating the failed equations section on the fly
UserInput
get_explanation asks user to explain what was done
get_user initial setup questions
col_viewer helper function for formatting output
user_input main menu, and methods related to it:
ui_fixed
ui_checked
ui_ignore
ui_skip
ui_reset
ui_quit
ui_options
ui_done_so_far
ui_view
ui_view_group
ui_modify_row
ui_get_fix
CoreFixer
__init__ environmental variables
start base method called to begin the validation tool
dataframe_loop manages the loop through every row of the dataframe
row_loop manages the loop through a single row until fixed
end handles wrap-up and saving
"""
def wprint(text, chars=99, indent=True, ident=4):
#adds auto wrapping and optional indenting to printed text
first_line = True
lines = []
while len(text) > 0:
if not first_line:
eol = chars - ident
else:
eol = chars
curr_line_rev = text[eol-1::-1]
if '-' in curr_line_rev and '+' in curr_line_rev:
split_pt = min(curr_line_rev.find('+'), curr_line_rev.find('-'))
elif '-' in curr_line_rev:
split_pt = curr_line_rev.find('-')
elif '+' in curr_line_rev:
split_pt = curr_line_rev.find('+')
else:
split_pt = 0
if len(text) >= eol:
line = text[:eol-split_pt]
else:
line = text
text = text[len(line):].lstrip()
if not first_line:
line = ' '*ident + line
else:
first_line = False
lines.append(line)
for line in lines:
print(line)
class Utilities():
def generate_token(self, ein, row):
if row['SOURCE'].split('.')[0].upper().endswith('EZ'):
source = 'EZ'
elif row['SOURCE'].split('.')[0].upper().endswith('990'):
source = 'Full'
elif row['SOURCE'].split('.')[0].upper().endswith('PF'):
source = 'PF'
else:
source = ''
token = {'current_ein': ein,
'irs_source' : source,
'original_row': row,
'modified_row': row.copy(),
'exit': False,
'write': None,
'changes':{}}
return token
def update_df_row(self, token):
self.df.loc[token['current_ein']] = token['modified_row'] #write result back to original
def get_components(self):
"""
Loads the methods for validation calculations from Core, then builds the cross references
needed to track relations between equations (e.g. one equation that fails has TOTREV2 in it,
so the program finds all equations with TOTREV2 and displays those also)
"""
#creates an instance of the validation code from the main program
validator = {'EZ':ValidateEZ(),
'Full':ValidateFull(),
'PF':ValidatePF()}
components = {}
validators = {}
equations = {}
validators['EZ'] = {'validate_ez_totrev': validator['EZ'].ez_validate_totrev,
'validate_ez_netinc': validator['EZ'].ez_validate_netinc,
'validate_ez_ass_eoy': validator['EZ'].ez_validate_ass_eoy,
'validate_ez_saleothn': validator['EZ'].ez_validate_saleothn,
'validate_ez_netincfndrsng': validator['EZ'].ez_validate_netincfndrsng,
'validate_ez_grprof': validator['EZ'].ez_validate_grprof
}
validators['Full'] = {'validate_fu_netrent':validator['Full'].full_validate_netrent,
'validate_fu_netgnls':validator['Full'].full_validate_netgnls,
'validate_fu_netincfndrsng':validator['Full'].full_validate_netincfndrsng,
'validate_fu_netincgaming':validator['Full'].full_validate_netincgaming,
'validate_fu_grprof':validator['Full'].full_validate_grprof,
'validate_fu_totrev2':validator['Full'].full_validate_totrev2,
'validate_fu_fundbal':validator['Full'].full_validate_fundbal
}
validators['PF'] = {'validate_pf_p1totrev':validator['PF'].pf_validate_p1totrev,
'validate_pf_p1totexp':validator['PF'].pf_validate_p1totexp,
'validate_pf_p1excrev':validator['PF'].pf_validate_p1excrev,
'validate_pf_p2tinvsc':validator['PF'].pf_validate_p2tinvsc,
'validate_pf_p14tnadj':validator['PF'].pf_validate_p14tnadj,
'validate_pf_p14tqdis':validator['PF'].pf_validate_p14tqdis,
'validate_pf_p14tasvl':validator['PF'].pf_validate_p14tasvl,
'validate_pf_p14t4942':validator['PF'].pf_validate_p14t4942,
'validate_pf_p14tendw':validator['PF'].pf_validate_p14tendw,
'validate_pf_p14ttsup':validator['PF'].pf_validate_p14ttsup,
'validate_pf_p14tpsup':validator['PF'].pf_validate_p14tpsup,
'validate_pf_p14tginv':validator['PF'].pf_validate_p14tginv
}
for form in ['EZ', 'Full', 'PF']:
equations[form] = {'_'.join(k.split('_')[2:]).upper(): v(None, fixer_display=True) for k, v in validators[form].items()}
components[form] = {k: v.translate(str.maketrans({kk: None for kk in '+-=()'})).split() for k, v in equations[form].items()}
all_cols = {}
multiples = {}
cross_components_dict = {}
for form in ['EZ', 'Full', 'PF']:
all_cols[form] = [col for cols in components[form].values() for col in cols] #all_cols[form] = ['col1', 'col2', 'col3'...]
multiples[form] = []
uniques = list(set(all_cols[form])) #unique list of every column in components keys or values
for col in uniques:
if all_cols[form].count(col) > 1:
multiples[form].append(col)
#reverse the components dict, so every name that shows up more than once is a key, while the key it shows up in in components is a value
cross_components_dict[form] = defaultdict(lambda: [])
for m in multiples[form]:
for k, v in components[form].items():
if m in v:
cross_components_dict[form][m].append(k)
cross_components_dict[form] = dict(cross_components_dict[form])
#pointers for ease of reference later
components['FU'] = components['Full']
cross_components_dict['FU'] = cross_components_dict['Full']
multiples['FU'] = multiples['Full']
validators['FU'] = validators['Full']
equations['FU'] = equations['Full']
return components, cross_components_dict, multiples, equations, validators
def load_data(self):
df = pd.read_csv(os.path.join(self.path, 'validation', 'failures', '{}_{}_validate.csv'.format(self.form.lower(), self.year)), dtype=self.make_strings)
df.set_index('EIN', inplace=True)
num_cols = df.select_dtypes(include=[np.number]).columns
for c in num_cols:
df[c.lower()+'_adjusted_by'] = 0 #initialize empty adjustment tracking columns
df['validated_by'] = ''
df['EXPLANATION'] = ''
if self.load_df is not None:
try:
df.loc[self.load_df.index] = self.load_df
except KeyError:
#if the main process has been rerun and new validation output is present, but the fixes already applied haven't been deleted from the folder, they will fail to assign
print('\nWARNING: It appears the existing fixes have already been integrated into the main process, as some of the EINs in the saved fix content are no longer marked for validating.')
_ = input('Press any key to continue WITHOUT loading the fixes. They will be OVERWRITTEN the next time you save. Otherwise please exit the program to resolve the conflict.')
failures = [c for c in df.columns if c.startswith('validate_') and 'adjusted' not in c]
df.loc[:,failures] = df[failures].fillna(0)
#filters out any entries that have already been solved (does not filter any entries if there is no loaded content)
#stopped filtering this way to avoid overwriting content on save changes only, now just used to calculate start length.
df['INCLUDE'] = ((df['VALIDATION_STATE'] != 2) & ((abs(df[failures]) >= self.threshold).any(1) | (df['VALIDATION_REASON'].isin(self.sort_criteria) & ~df['VALIDATION_STATE'].isin([1,3]))))
# df = df[df['INCLUDE']]
start_len = len(df[df['INCLUDE']])
del df['INCLUDE']
df['VALIDATION_REASON'] = pd.Categorical(df['VALIDATION_REASON'], self.sort_criteria, ordered=True)
self.df = df
self.num_cols = num_cols
self.failures = failures
self.start_len = start_len
def load_saved(self, form):
#form: (str) co, pc, pf
load_df = pd.read_csv(os.path.join(path, 'validation', 'fixes', '{}_{}_validate.csv'.format(form.lower(), self.year)), dtype=self.make_strings)
load_df.set_index('EIN', inplace=True)
tracking_df = pd.read_csv(os.path.join(path, 'final output', '{}_change_tracker.csv'.format(form.lower())), dtype={'EIN':'str'})
tracking_df.set_index('EIN', inplace=True)
load_df = load_df.merge(tracking_df, how='outer', left_index=True, right_index=True, indicator=True)
assert((load_df['_merge'] == 'both').all(axis=0)), 'Warning, EINs do not line up when merging existing changes with change tracking.'
load_df.drop('_merge', axis=1, inplace=True)
load_df['VALIDATION_REASON'] = pd.Categorical(load_df['VALIDATION_REASON'], self.sort_criteria, ordered=True)
print('{} previously completed rows loaded.'.format(len(load_df)))
return load_df
def sort_data(self):
df = self.df
form = self.form
if form == 'PF':
rev = 'P1TOTREV'
else:
rev = 'TOTREV2'
self.df = self.df.sort_values(['VALIDATION_REASON', rev], ascending=[True, False])
def build_url(self, filename, taxperp):
if self.form == 'PF':
pf = 'pf'
else:
pf = ''
if not taxperp:
return 'http://990s.foundationcenter.org/990{}_pdf_archive/{}/{}/{}.pdf'.format(pf, filename[:3], filename.split('_')[0], filename)
else:
fn_split = filename.split('_')
pfn = '_'.join([ fn_split[0], str(taxperp).rstrip('.0'), fn_split[2] ])
return 'http://990s.foundationcenter.org/990{}_pdf_archive/{}/{}/{}.pdf'.format(pf, filename[:3], filename.split('_')[0], pfn)
def pdf_opener(self, filename, taxperp=None, url=None):
if not url:
url = self.build_url(filename, taxperp)
webbrowser.open_new_tab(url)
def get_form_crosswalk(self):
"""
For connecting column names to 990 form locations
"""
return {'FULL': pd.Series(['Part VIII Line 1h', 'Part VIII Line 2g (A)', 'Part VIII Line 3A', 'Part VIII Line 4A', 'Part VIII Line 5A', 'Part VIII Line 6a (i)',
'Part VIII Line 6a (ii)', 'Sum of Part VIII Line 6a (i) and (ii)', 'Part VIII Line 6b (i)', 'Part VIII Line 6b (ii)',
'Sum of Part VIII Line 6b (i) and (ii)', 'Part VIII Line 6c (i)', 'Part VIII Line 6c (ii)', 'Part VIII Line 6d (A)',
'Part VIII Line 7a (i)', 'Part VIII Line 7b (i)', 'Part VIII Line 7c (i)', 'Part VIII Line 7a (ii)', 'Part VIII Line 7b (ii)',
'Part VIII Line 7c (ii)', 'Part VIII Line 7d', 'Part VIII Line 8a', 'Part VIII Line 8b', 'Part VIII Line 8c', 'Part VIII Line 9a',
'Part VIII Line 9b', 'Part VIII Line 9c', 'Sum of Part VIII Line 8a and 9a', 'Sum of Part VIII Line 8b and 9b',
'Sum of Part VIII Line 8c (A) and 9c (A)', 'Part VIII Line 10a', 'Part VIII Line 10b', 'Part VIII Line 10c (A)',
'Part VIII Line 11e (A)', 'Part VIII Line 12 (A)', 'Part IX Line 5 (A)', 'Part IX Line 7 (A)', 'Part IX Line 10 (A)',
'Part IX Line 11e (A)', 'Part IX Line 25 (A)', 'Part X Line 16 (A)', 'Part X Line 16 (B)', 'Part X Line 20 (B)', 'Part X Line 23 (B)',
'Part X Line 24 (B)', 'Part X Line 26 (A)', 'Part X Line 26 (B)', 'Part X Line 32 (B)', 'Part X Line 33 (B)'],
index=['CONT', 'PROGREV', 'INVINC', 'TXEXMPTBNDSPROCEEDS', 'ROYALTSINC', 'GRSRNTSREAL', 'GRSRNTSPRSNL', 'RENTINC', 'RNTLEXPNSREAL', 'RNTLEXPNSPRSNL',
'RENTEXP', 'RNTLINCREAL', 'RNTLINCPRSNL', 'NETRENT', 'SECUR', 'SALESEXP', 'SALESECN', 'SALEOTHG', 'SALEOTHE', 'SALEOTHN', 'NETGNLS',
'GRSINCFNDRSNG', 'LESSDIRFNDRSNG', 'NETINCFNDRSNG', 'GRSINCGAMING', 'LESSDIRGAMING', 'NETINCGAMING', 'SPEVTG', 'DIREXP', 'FUNDINC',
'INVENTG', 'GOODS', 'GRPROF', 'OTHINC', 'TOTREV2', 'COMPENS', 'OTHSAL', 'PAYTAX', 'FUNDFEES', 'EXPS', 'ASS_BOY', 'ASS_EOY', 'BOND_EOY',
'MRTG_EOY', 'UNSECUREDNOTESEND', 'LIAB_BOY', 'LIAB_EOY', 'RETEARN', 'FUNDBAL'],
),
'EZ': None,
'PF': None}
def process_token_storage(self, token):
"""
Method for when the user has gone back to the previous token and then finished it and needs to restore the delayed one.
"""
self.update_df_row(token) #update the df with the prior token
token = self.token_storage['on_hold'] #extract the token that was placed on hold
self.token_storage['on_hold'] = None #empty the token storage
return token #return the token that was placed on hold and make it the current token
class Display():
def display(self, token):
self.display_static_header()
self.display_form_header(token)
self.display_ein_header(token)
self.display_failed_equations(token)
def display_static_header(self):
os.system('cls' if os.name == 'nt' else 'clear')
line0 = '****************************************'
line1 = 'NCCS Core File Validation Tool v7.2017'
line2 = 'Created by <NAME> (<EMAIL>)'
line_list = [line0, line1, line2, line0, '']
for line in line_list:
print(line.center(self.terminal_width))
def display_form_header(self, token):
form = self.form
num_fixed = self.num_fixed
remaining = self.start_len - num_fixed
ein = token['current_ein']
firm_name = str(token['modified_row']['NAME'])
reason = token['modified_row']['VALIDATION_REASON']
lcol = int(floor(self.terminal_width / 2))
rcol = int(self.terminal_width - lcol)
sides = int(floor(self.terminal_width / 3))
cent = int(self.terminal_width - 2*sides)
line4a = 'Form: {}'.format(form).ljust(sides)
line4c = 'Validation Reasons: {}'.format(', '.join(list(reason))).center(cent)
line4b = 'EINs Completed: {}'.format(num_fixed).rjust(sides)
line5a = 'EIN: {}'.format(ein).ljust(lcol)
line5b = 'EINs Remaining: {}'.format(remaining).rjust(rcol)
line6 = firm_name.center(self.terminal_width)
line7 = '-'*len(firm_name) + '--'
print(line4a+line4c+line4b)
print(line5a+line5b)
print(line6)
print(line7.center(self.terminal_width))
def display_ein_header(self, token):
row = token['modified_row']
source = token['irs_source']
sides = int(floor(self.terminal_width / 3))
cent = int(self.terminal_width - 2*sides)
if source in ['EZ', 'Full']:
current = (float(row['TOTREV2']),float(row['EXPS']), float(row['ASS_EOY']))
prior = (float(row['TOTREVP']), float(row['EXPSP']), float(row['ASS_BOY']))
elif source == 'PF':
current = (float(row['P1TOTREV']), float(row['P1TOTEXP']), float(row['P2TOTAST']))
prior = None
else:
current, prior = None, None
taxper = 'TAX PERIOD: '+row['TAXPER'][:4]+'-'+row['TAXPER'][4:]
print(taxper.center(self.terminal_width))
print('')
if current:
line1 = 'CURRENT: Revenue: {:,.0f}'.format(current[0]).center(sides) + 'Expenses: {:,.0f}'.format(current[1]).center(cent) + 'Assets: {:,.0f}'.format(current[2]).center(sides)
print(line1)
if prior:
line2 = 'PRIOR: Revenue: {:,.0f}'.format(prior[0]).center(sides) + 'Expenses: {:,.0f}'.format(prior[1]).center(cent) + 'Assets: {:,.0f}'.format(prior[2]).center(sides)
print(line2)
if current or prior:
print('')
print(' ' + '-'*int((ceil(self.terminal_width))-2))
print('')
def display_failed_equations(self, token):
row = token['modified_row']
failures = self.failure_entries(token)
for fail in failures.index:
eq = self.equations[fail[9:11].upper()][fail[12:].upper()]
cols = self.components[fail[9:11].upper()][fail[12:].upper()]
for col in cols:
eq = eq.replace(col, col+' [{:,.0f}]'.format(row[col]))
entries = '${:,.0f} = '.format(int(row[fail])) + eq
for multiple in self.multiples[fail[9:11].upper()]:
entries = entries.replace(multiple, multiple+'*')
ident = len(fail[12:])+6
if row[fail] < 0:
ident += 1
wprint(entries, ident=ident)
print('')
if len(failures.index) == 0:
print('No validation failures found.\n'.center(self.terminal_width))
def failure_entries(self, token):
row = token['modified_row']
failures = row.loc[self.failures]
failures = failures[(abs(failures) >= self.threshold)] #display all failure columns
#adds to the display any columns that passed validation, but contain columns that are parts of columns that did fail validation
add_related = []
for fail in failures.index: #e.g. fail='validate_ez_totrev'
fail_components = self.components[fail[9:11].upper()][fail[12:].upper()] #e.g. fail_components=['CONT', 'PRGMSRVREV'...]
for fail_component in fail_components:
if fail_component in self.multiples[fail[9:11].upper()]:
component_sources = self.cross_components[fail[9:11].upper()][fail_component]
for potential in component_sources:
if 'validate_'+fail[9:11].lower()+'_'+potential.lower() not in failures.index:
add_related.append('validate_'+fail[9:11]+'_'+potential.lower())
if len(add_related) > 0:
failures = | pd.concat([failures, row.loc[add_related]]) | pandas.concat |
"""Module to read, check and write a HDSR meetpuntconfiguratie."""
__title__ = "histTags2mpt"
__description__ = "to evaluate a HDSR FEWS-config with a csv with CAW histTags"
__version__ = "0.1.0"
__author__ = "<NAME>"
__author_email__ = "<EMAIL>"
__license__ = "MIT License"
from meetpuntconfig.fews_utilities import FewsConfig, xml_to_dict
from pathlib import Path
import json
import numpy as np
import pandas as pd
import logging
from openpyxl import load_workbook
from openpyxl.styles import Font, PatternFill
import os
import sys
import re
from shapely.geometry import Point
pd.options.mode.chained_assignment = None
def idmap2tags(row, idmap):
"""Add FEWS-locationIds to hist_tags in df.apply() method."""
exloc, expar = row["serie"].split("_", 1)
fews_locs = [
col["internalLocation"]
for col in idmap
if col["externalLocation"] == exloc and col["externalParameter"] == expar
]
if len(fews_locs) == 0:
fews_locs = np.NaN
return fews_locs
def get_validation_attribs(validation_rules, int_pars=None, loc_type=None):
"""Get attributes from validationRules."""
if int_pars is None:
int_pars = [rule["parameter"] for rule in validation_rules]
result = []
for rule in validation_rules:
if "type" in rule.keys():
if rule["type"] == loc_type:
if any(re.match(rule["parameter"], int_par) for int_par in int_pars):
for key, attribute in rule["extreme_values"].items():
if isinstance(attribute, list):
result += [value["attribute"] for value in attribute]
else:
result += [attribute]
elif any(re.match(rule["parameter"], int_par) for int_par in int_pars):
for key, attribute in rule["extreme_values"].items():
if isinstance(attribute, list):
result += [value["attribute"] for value in attribute]
else:
result += [attribute]
return result
def update_hlocs(row, h_locs, mpt_df):
"""Add startdate and enddate op hoofdloc dataframe with df.apply() method."""
loc_id = row.name
start_date = row["STARTDATE"]
end_date = row["ENDDATE"]
if loc_id in h_locs:
start_date = (
mpt_df[mpt_df.index.str.contains(loc_id[0:-1])]["STARTDATE"].dropna().min()
)
end_date = (
mpt_df[mpt_df.index.str.contains(loc_id[0:-1])]["ENDDATE"].dropna().max()
)
return start_date, end_date
def update_date(row, mpt_df, date_threshold):
"""Return start and end-date in df.apply() method."""
int_loc = row["LOC_ID"]
if int_loc in mpt_df.index:
start_date = mpt_df.loc[int_loc]["STARTDATE"].strftime("%Y%m%d")
end_date = mpt_df.loc[int_loc]["ENDDATE"]
if end_date > date_threshold:
end_date = pd.Timestamp(year=2100, month=1, day=1)
end_date = end_date.strftime("%Y%m%d")
else:
start_date = row["START"]
end_date = row["EIND"]
return start_date, end_date
def update_histtag(row, grouper):
"""Assign last histTag to waterstandsloc in df.apply method."""
return next(
(
df.sort_values("total_max_end_dt", ascending=False)["serie"].values[0]
for loc_id, df in grouper
if loc_id == row["LOC_ID"]
),
None,
)
def _sort_validation_attribs(rule):
result = {}
for key, value in rule.items():
if isinstance(value, str):
result[key] = [value]
elif isinstance(value, list):
periods = [val["period"] for val in value]
attribs = [val["attribute"] for val in value]
result[key] = [attrib for _, attrib in sorted(zip(periods, attribs))]
return result
class MeetpuntConfig:
"""Meetpuntconfig class."""
def __init__(self, config_path, log_level="INFO"):
self.paths = dict()
self.fews_config = None
self.location_sets = dict()
self.hist_tags = None
self.hist_tags_ignore = None
self.fixed_sheets = None
self.idmap_files = None
self.idmap_sections = None
self.external_parameters_allowed = None
self.consistency = None
self.parameter_mapping = None
self.validation_rules = None
self.logging = logging
self.hoofdloc = None
self.subloc = None
self.waterstandloc = None
self.mswloc = None
self.mpt_hist_tags = None
self._locs_mapping = dict(
hoofdlocaties="hoofdloc",
sublocaties="subloc",
waterstandlocaties="waterstandloc",
mswlocaties="mswloc",
)
self.logging.basicConfig(level=os.environ.get("LOGLEVEL", log_level))
self._read_config(Path(config_path))
def _read_config(self, config_json):
if config_json.exists():
with open(config_json) as src:
config = json.load(src)
workdir = Path(config_json).parent
else:
self.logging.error(f"{config_json} does not exist")
sys.exit()
# add paths to config
for key, path in config["paden"].items():
path = Path(path)
if not path.is_absolute():
path = workdir.joinpath(path).resolve()
if path.exists():
self.paths[key] = path
else:
if path.suffix == "":
logging.warning(f"{path} does not exist. Folder will be created")
path.mkdir()
else:
self.logging.error(
(
f"{path} does not exist. "
f"Please define existing file "
f"in {config_json}."
)
)
sys.exit()
# add fews_config
self.fews_config = FewsConfig(self.paths["fews_config"])
# add location_sets
for key, value in config["location_sets"].items():
if value in self.fews_config.locationSets.keys():
if "csvFile" in self.fews_config.locationSets[value].keys():
self.location_sets[key] = {
"id": value,
"gdf": self.fews_config.get_locations(value),
}
else:
self.logging.error((f"{key} not a csvFile location-set"))
else:
self.logging.error(
(
f"locationSet {key} specified in {config_json} "
f"not in fews-config"
)
)
# add rest of config
self.idmap_files = config["idmap_files"]
self.idmap_sections = config["idmap_sections"]
self.external_parameters_allowed = config["external_parameters_allowed"]
self.parameter_mapping = config["parameter_mapping"]
self.validation_rules = config["validation_rules"]
self.fixed_sheets = config["fixed_sheets"]
# read consistency df from input-excel
self.consistency = pd.read_excel(
self.paths["consistency_xlsx"], sheet_name=None, engine="openpyxl"
)
self.consistency = {
key: value
for key, value in self.consistency.items()
if key in self.fixed_sheets
}
def _read_hist_tags(self, force=False):
if (not self.hist_tags) or force:
if "hist_tags_csv" in self.paths.keys():
self.logging.info(f"reading histags: {self.paths['hist_tags_csv']}")
dtype_cols = ["total_min_start_dt", "total_max_end_dt"]
self.hist_tags = pd.read_csv(
self.paths["hist_tags_csv"],
parse_dates=dtype_cols,
sep=None,
engine="python",
)
for col in dtype_cols:
if not pd.api.types.is_datetime64_dtype(self.hist_tags[col]):
self.logging.error(
(
f"col '{col}' in '{self.paths['hist_tags_csv']} "
"can't be converted to np.datetime64 format. "
"Check if values are dates."
)
)
sys.exit()
def _read_hist_tags_ignore(self, force=False):
if (not self.hist_tags_ignore) or force:
if "mpt_ignore_csv" in self.paths.keys():
self.logging.info(
f"Reading hist tags to be ingored from "
f"{self.paths['mpt_ignore_csv']}"
)
self.hist_tags_ignore = pd.read_csv(
self.paths["mpt_ignore_csv"], sep=None, header=0, engine="python"
)
elif "histTag_ignore" in self.consistency.keys():
self.hist_tags_ignore = self.consistency["histTag_ignore"]
self.logging.info(
f"Reading hist tags to be ignored from "
f"{self.paths['consistency_xlsx']}"
)
else:
self.logging.error(
(
f"specify a histTag_ignore worksheet in "
f"{self.paths['consistency_xlsx']} or a csv-file "
"in the config-json"
)
)
sys.exit()
self.hist_tags_ignore["UNKNOWN_SERIE"] = self.hist_tags_ignore[
"UNKNOWN_SERIE"
].str.replace("#", "")
def _get_idmaps(self, idmap_files=None):
if not idmap_files:
idmap_files = self.idmap_files
idmaps = [
xml_to_dict(self.fews_config.IdMapFiles[idmap])["idMap"]["map"]
for idmap in idmap_files
]
return [item for sublist in idmaps for item in sublist]
def _read_locs(self):
self.hoofdloc = self.fews_config.get_locations("OPVLWATER_HOOFDLOC")
self.subloc = self.fews_config.get_locations("OPVLWATER_SUBLOC")
self.waterstandloc = self.fews_config.get_locations(
"OPVLWATER_WATERSTANDEN_AUTO"
)
self.mswloc = self.fews_config.get_locations("MSW_STATIONS")
def _update_staff_gauge(self, row):
"""Assign upstream and downstream staff gauges to subloc."""
result = {"HBOV": "", "HBEN": ""}
for key in result.keys():
df = self.waterstandloc.loc[self.waterstandloc["LOC_ID"] == row[key]]
if not df.empty:
result[key] = df["PEILSCHAAL"].values[0]
return result["HBOV"], result["HBEN"]
def hist_tags_to_mpt(self, sheet_name="mpt"):
"""Convert histTag-ids to mpt-ids."""
if self.hist_tags is None:
self._read_hist_tags()
idmaps = self._get_idmaps()
hist_tags_df = self.hist_tags.copy()
hist_tags_df["fews_locid"] = hist_tags_df.apply(
idmap2tags, args=[idmaps], axis=1
)
hist_tags_df = hist_tags_df[hist_tags_df["fews_locid"].notna()]
mpt_hist_tags_df = hist_tags_df.explode("fews_locid").reset_index(drop=True)
self.mpt_hist_tags = mpt_hist_tags_df
mpt_df = pd.concat(
[
mpt_hist_tags_df.groupby(["fews_locid"], sort=False)[
"total_min_start_dt"
].min(),
mpt_hist_tags_df.groupby(["fews_locid"], sort=False)[
"total_max_end_dt"
].max(),
],
axis=1,
)
mpt_df = mpt_df.sort_index(axis=0)
mpt_df.columns = ["STARTDATE", "ENDDATE"]
mpt_df.index.name = "LOC_ID"
kw_locs = list(mpt_df[mpt_df.index.str.contains("KW", regex=False)].index)
h_locs = np.unique(["{}0".format(loc[0:-1]) for loc in kw_locs])
h_locs_missing = [loc for loc in h_locs if loc not in list(mpt_df.index)]
h_locs_df = pd.DataFrame(
data={
"LOC_ID": h_locs_missing,
"STARTDATE": [pd.NaT] * len(h_locs_missing),
"ENDDATE": [pd.NaT] * len(h_locs_missing),
}
)
h_locs_df = h_locs_df.set_index("LOC_ID")
mpt_df = pd.concat([mpt_df, h_locs_df], axis=0)
mpt_df[["STARTDATE", "ENDDATE"]] = mpt_df.apply(
update_hlocs, args=[h_locs, mpt_df], axis=1, result_type="expand"
)
mpt_df = mpt_df.sort_index()
self.consistency["mpt"] = mpt_df
def check_idmap_sections(self, sheet_name="idmap section error"):
"""Check if all KW/OW locations are in the correct section."""
self.consistency[sheet_name] = pd.DataFrame(
columns=[
"bestand",
"externalLocation",
"externalParameter",
"internalLocation",
"internalParameter",
]
)
for idmap, subsecs in self.idmap_sections.items():
for section_type, sections in subsecs.items():
for section in sections:
if section_type == "KUNSTWERKEN":
prefix = "KW"
if section_type == "WATERSTANDLOCATIES":
prefix = "OW"
if section_type == "MSWLOCATIES":
prefix = "(OW|KW)"
pattern = fr"{prefix}\d{{6}}$"
idmapping = xml_to_dict(
self.fews_config.IdMapFiles[idmap], **section
)["idMap"]["map"]
idmap_wrong_section = [
idmap
for idmap in idmapping
if not bool(re.match(pattern, idmap["internalLocation"]))
]
if idmap_wrong_section:
section_start = (
section["section_start"]
if "section_start" in section.keys()
else ""
)
section_end = (
section["section_end"]
if "section_end" in section.keys()
else ""
)
self.logging.warning(
(
f"{len(idmap_wrong_section)} "
f"internalLocations not {prefix}XXXXXX "
f"between {section_start} and {section_end} "
f"in {idmap}."
)
)
df = pd.DataFrame(idmap_wrong_section)
df["sectie"] = section_start
df["bestand"] = idmap
self.consistency[sheet_name] = pd.concat(
[self.consistency[sheet_name], df], axis=0
)
def check_missing_hist_tags(self, sheet_name="histTags noMatch"):
"""Check if hisTags are missing in config."""
if self.hist_tags_ignore is None:
self._read_hist_tags_ignore()
if self.hist_tags is None:
self._read_hist_tags()
hist_tags_df = self.hist_tags.copy()
idmaps = self._get_idmaps()
hist_tags_df["fews_locid"] = self.hist_tags.apply(
idmap2tags, args=[idmaps], axis=1
)
hist_tags_no_match_df = hist_tags_df[hist_tags_df["fews_locid"].isna()]
hist_tags_no_match_df = hist_tags_no_match_df[
~hist_tags_no_match_df["serie"].isin(self.hist_tags_ignore["UNKNOWN_SERIE"])
]
hist_tags_no_match_df = hist_tags_no_match_df.drop("fews_locid", axis=1)
hist_tags_no_match_df.columns = ["UNKNOWN_SERIE", "STARTDATE", "ENDDATE"]
hist_tags_no_match_df = hist_tags_no_match_df.set_index("UNKNOWN_SERIE")
self.consistency[sheet_name] = hist_tags_no_match_df
if not self.consistency[sheet_name].empty:
self.logging.warning(
"{} histTags not in idMaps".format(len(self.consistency[sheet_name]))
)
else:
self.logging.info("all histTags in idMaps")
def check_ignored_hist_tags(
self, sheet_name="histTags ignore match", idmap_files=["IdOPVLWATER"]
):
"""Check if ignored histTags do match with idmap."""
if self.hist_tags_ignore is None:
self._read_hist_tags_ignore()
if self.hist_tags is None:
self._read_hist_tags()
hist_tags_opvlwater_df = self.hist_tags.copy()
idmaps = self._get_idmaps(idmap_files=idmap_files)
hist_tags_opvlwater_df["fews_locid"] = self.hist_tags.apply(
idmap2tags, args=[idmaps], axis=1
)
hist_tags_opvlwater_df = hist_tags_opvlwater_df[
hist_tags_opvlwater_df["fews_locid"].notna()
]
hist_tag_ignore_match_df = self.hist_tags_ignore[
self.hist_tags_ignore["UNKNOWN_SERIE"].isin(hist_tags_opvlwater_df["serie"])
]
hist_tag_ignore_match_df = hist_tag_ignore_match_df.set_index("UNKNOWN_SERIE")
self.consistency[sheet_name] = hist_tag_ignore_match_df
if not self.consistency[sheet_name].empty:
self.logging.warning(
(
f"{len(self.consistency[sheet_name])} "
r"histTags should not be in histTags ignore."
)
)
else:
self.logging.info("hisTags ignore list consistent with idmaps")
def check_double_idmaps(self, sheet_name="idmaps double"):
"""Check if identical idmaps are doubled."""
self.consistency[sheet_name] = pd.DataFrame(
columns=[
"bestand",
"externalLocation",
"externalParameter",
"internalLocation",
"internalParameter",
]
)
for idmap_file in self.idmap_files:
idmaps = self._get_idmaps(idmap_files=[idmap_file])
idmap_doubles = [idmap for idmap in idmaps if idmaps.count(idmap) > 1]
if len(idmap_doubles) > 0:
idmap_doubles = list(
{
idmap["externalLocation"]: idmap for idmap in idmap_doubles
}.values()
)
df = pd.DataFrame(
idmap_doubles,
columns=[
"internalLocation",
"externalLocation",
"internalParameter",
"externalParameter",
],
)
df["bestand"] = idmap_file
self.consistency[sheet_name] = pd.concat(
[self.consistency[sheet_name], df], axis=0
)
self.logging.warning(
"{} double idmap(s) in {}".format(len(idmap_doubles), idmap_file)
)
else:
self.logging.info("No double idmaps in {}".format(idmap_file))
def check_missing_pars(self, sheet_name="pars missing"):
"""Check if internal parameters in idmaps are missing in paramters.xml."""
config_parameters = list(
self.fews_config.get_parameters(dict_keys="parameters").keys()
)
idmaps = self._get_idmaps()
id_map_parameters = [id_map["internalParameter"] for id_map in idmaps]
params_missing = [
parameter
for parameter in id_map_parameters
if parameter not in config_parameters
]
if len(params_missing) == 0:
self.logging.info("all internal paramters are in config")
else:
self.logging.warning(
"{} parameter(s) in idMaps are missing in config".format(
len(params_missing)
)
)
self.consistency[sheet_name] = pd.DataFrame({"parameters": params_missing})
# self.consistency[sheet_name] = self.consistency[sheet_name].set_index(
# "parameters"
# )
def check_hloc_consistency(self, sheet_name="hloc error"):
"""Check if all sublocs of same hloc have consistent parameters."""
if "xy_ignore" in self.consistency.keys():
xy_ignore_df = self.consistency["xy_ignore"]
else:
xy_ignore_df = pd.DataFrame({"internalLocation": [], "x": [], "y": []})
if self.hoofdloc is None:
self._read_locs()
hloc_errors = {
"LOC_ID": [],
"SUB_LOCS": [],
"LOC_NAME": [],
"GEOMETRY": [],
"SYSTEEM": [],
"RAYON": [],
"KOMPAS": [],
}
grouper = self.subloc.groupby("PAR_ID")
par_dict = {
"LOC_ID": [],
"LOC_NAME": [],
"X": [],
"Y": [],
"ALLE_TYPES": [],
"START": [],
"EIND": [],
"SYSTEEM": [],
"RAYON": [],
"KOMPAS": [],
}
for loc_id, gdf in grouper:
caw_code = loc_id[2:-2]
errors = dict.fromkeys(
["LOC_NAME", "GEOMETRY", "SYSTEEM", "RAYON", "KOMPAS"], False
)
fields = dict.fromkeys(par_dict.keys(), None)
fields["LOC_ID"] = loc_id
loc_names = np.unique(
gdf["LOC_NAME"]
.str.extract(pat=f"([A-Z0-9 ]*_{caw_code}-K_[A-Z0-9 ]*)")
.values
)
if not len(loc_names) == 1:
errors["LOC_NAME"] = ",".join(loc_names)
else:
fields["LOC_NAME"] = loc_names[0]
if any([re.match(loc, loc_id) for loc in xy_ignore_df["internalLocation"]]):
fields["X"], fields["Y"] = next(
[row["x"], row["y"]]
for index, row in xy_ignore_df.iterrows()
if re.match(row["internalLocation"], loc_id)
)
else:
geoms = gdf["geometry"].unique()
if not len(geoms) == 1:
errors["GEOMETRY"] = ",".join(
[f"({geom.x} {geom.y})" for geom in geoms]
)
else:
fields["X"] = geoms[0].x
fields["Y"] = geoms[0].y
all_types = list(gdf["TYPE"].unique())
all_types.sort()
fields["ALLE_TYPES"] = "/".join(all_types)
fields["START"] = gdf["START"].min()
fields["EIND"] = gdf["EIND"].max()
for attribuut in ["SYSTEEM", "RAYON", "KOMPAS"]:
vals = gdf[attribuut].unique()
if not len(vals) == 1:
errors[attribuut] = ",".join(vals)
else:
fields[attribuut] = vals[0]
if None not in fields.values():
for key, value in fields.items():
par_dict[key].append(value)
if any(errors.values()):
hloc_errors["LOC_ID"].append(loc_id)
hloc_errors["SUB_LOCS"].append(",".join(gdf["LOC_ID"].values))
for key, value in errors.items():
if value is False:
value = ""
hloc_errors[key].append(value)
self.consistency[sheet_name] = pd.DataFrame(hloc_errors)
if self.consistency[sheet_name].empty:
self.logging.info("no consistency errors. Hlocs rewritten from sublocs")
par_gdf = pd.DataFrame(par_dict)
columns = list(self.hoofdloc.columns)
drop_cols = [
col
for col in self.hoofdloc.columns
if (col in par_gdf.columns) & (not col == "LOC_ID")
]
drop_cols = drop_cols + ["geometry"]
self.hoofdloc = self.hoofdloc.drop(drop_cols, axis=1)
self.hoofdloc = par_gdf.merge(self.hoofdloc, on="LOC_ID")
self.hoofdloc["geometry"] = self.hoofdloc.apply(
(lambda x: Point(float(x["X"]), float(x["Y"]))), axis=1
)
self.hoofdloc = self.hoofdloc[columns]
else:
self.logging.warning(
"{} Errors in consistency hlocs".format(
len(self.consistency[sheet_name])
)
)
self.logging.warning(
(
"Hoofdlocaties will only be re-written "
"when consistency errors are resolved"
)
)
def check_expar_errors_intloc_missing(
self, expar_sheet="exPar error", intloc_sheet="intLoc missing"
):
"""Check on wrong external parameters and missing internal locations."""
expars_allowed = self.external_parameters_allowed
if self.hoofdloc is None:
self._read_locs()
ex_par_errors = {
"internalLocation": [],
"locationType": [],
"exParError": [],
"types": [],
"FQ": [],
"I.X": [],
"IX.": [],
"SS./SM.": [],
}
int_loc_missing = []
idmap_df = pd.DataFrame.from_dict(self._get_idmaps(["IdOPVLWATER"]))
for int_loc, loc_group in idmap_df.groupby("internalLocation"):
errors = dict.fromkeys(["I.X", "IX.", "FQ", "SS./SM."], False)
ex_pars = np.unique(loc_group["externalParameter"].values)
ex_pars_gen = [re.sub(r"\d", ".", ex_par) for ex_par in ex_pars]
if int_loc in self.hoofdloc["LOC_ID"].values:
loc_properties = self.hoofdloc[self.hoofdloc["LOC_ID"] == int_loc]
loc_type = "hoofdloc"
elif int_loc in self.subloc["LOC_ID"].values:
loc_properties = self.subloc[self.subloc["LOC_ID"] == int_loc]
loc_type = "subloc"
regexes = ["HR.$"]
elif int_loc in self.waterstandloc["LOC_ID"].values:
loc_type = "waterstandloc"
elif int_loc in self.mswloc["LOC_ID"].values:
loc_type = "mswloc"
else:
loc_type = None
int_loc_missing += [int_loc]
if loc_type in ["hoofdloc", "subloc"]:
all_types = loc_properties["ALLE_TYPES"].values[0].split("/")
all_types = [item.lower() for item in all_types]
elif loc_type == "waterstandloc":
all_types = ["waterstandloc"]
if loc_type == "subloc":
sub_type = self.subloc[self.subloc["LOC_ID"] == int_loc]["TYPE"].values[
0
]
regexes += [
j
for i in [
values
for keys, values in expars_allowed.items()
if keys in all_types
]
for j in i
]
regexes += list(dict.fromkeys(regexes))
ex_par_error = [
ex_par
for ex_par in ex_pars
if not any(
[
regex.match(ex_par)
for regex in [re.compile(rex) for rex in regexes]
]
)
]
if sub_type == "schuif":
if not any(
[ex_par for ex_par in ex_pars_gen if ex_par in ["SS.", "SM."]]
):
errors["SS./SM."] = True
if any(
[
ex_par
for ex_par in ex_pars_gen
if ex_par in ["I.B", "I.H", "I.L"]
]
):
if not any(
[
ex_par
for ex_par in ex_pars_gen
if ex_par in ["IB.", "IH.", "IL."]
]
):
errors["IX."] = True
elif any(
[
ex_par
for ex_par in ex_pars_gen
if ex_par in ["IB.", "IH.", "IL."]
]
):
errors["I.X"] = True
if "FQ." in ex_pars_gen:
if not any(
[
ex_par
for ex_par in ex_pars_gen
if ex_par in ["IB.", "IH.", "IL.", "I.B", "I.H", "I.L"]
]
):
errors["FQ"] = True
elif loc_type == "hoofdloc":
regexes = ["HS.$", "QR.$", "QS.$", "WR", "WS"]
ex_par_error = [
ex_par
for ex_par in ex_pars
if not any(
[
regex.match(ex_par)
for regex in [re.compile(rex) for rex in regexes]
]
)
]
else:
ex_par_error = []
if len(ex_par_error) > 0 | any(errors.values()):
ex_par_errors["internalLocation"].append(int_loc)
ex_par_errors["locationType"].append(loc_type)
ex_par_errors["exParError"].append(",".join(ex_par_error))
ex_par_errors["types"].append(",".join(all_types))
for key, value in errors.items():
ex_par_errors[key].append(value)
self.consistency[expar_sheet] = pd.DataFrame(ex_par_errors)
self.consistency[intloc_sheet] = pd.DataFrame(
{"internalLocation": int_loc_missing}
)
if len(self.consistency[expar_sheet]) == 0:
self.logging.info("geen ExPar errors")
else:
self.logging.warning(
"{} locaties met ExPar errors".format(
len(self.consistency[expar_sheet])
)
)
if len(self.consistency[intloc_sheet]) == 0:
self.logging.info("All internal locations are in locationSets")
else:
self.logging.warning(
"{} Internal locations are not in locationSets.".format(
len(self.consistency[intloc_sheet])
)
)
def check_expar_missing(self, sheet_name="exPar missing"):
"""Check if external paramters are missing on locations."""
ex_par_missing = {
"internalLocation": [],
"exPars": [],
"QR": [],
"QS": [],
"HS": [],
}
if self.hoofdloc is None:
self._read_locs()
idmap_df = pd.DataFrame.from_dict(self._get_idmaps(["IdOPVLWATER"]))
for index, row in self.hoofdloc.iterrows():
missings = dict.fromkeys(["QR", "QS", "HS"], False)
int_loc = row["LOC_ID"]
loc_group = next(
(
df
for loc, df in idmap_df.groupby("internalLocation")
if loc == int_loc
),
pd.DataFrame(),
)
if not loc_group.empty:
ex_pars = np.unique(loc_group["externalParameter"].values)
ex_pars_gen = [re.sub(r"\d", ".", ex_par) for ex_par in ex_pars]
else:
ex_pars = []
ex_pars_gen = []
if not ("HS." in ex_pars_gen):
missings["HS"] = True
if not ("QR." in ex_pars_gen):
missings["QR"] = True
if not ("QS." in ex_pars_gen):
missings["QS"] = True
if any(missings.values()):
ex_par_missing["internalLocation"].append(int_loc)
ex_par_missing["exPars"].append(",".join(ex_pars))
for key, value in missings.items():
ex_par_missing[key].append(value)
self.consistency[sheet_name] = pd.DataFrame(ex_par_missing)
if len(self.consistency[sheet_name]) == 0:
self.logging.info("No ExPar missing")
else:
self.logging.warning(
"{} Locations with ExPar missing".format(
len(self.consistency[sheet_name])
)
)
def check_exloc_intloc_consistency(self, sheet_name="exLoc error"):
"""Check if external locations are consistent with internal locations."""
ex_loc_errors = {"internalLocation": [], "externalLocation": []}
idmap_df = pd.DataFrame.from_dict(self._get_idmaps(["IdOPVLWATER"]))
for loc_group in idmap_df.groupby("externalLocation"):
int_loc_error = []
ex_loc = loc_group[0]
int_locs = np.unique(loc_group[1]["internalLocation"].values)
if len(ex_loc) == 3:
if not bool(re.match("8..$", ex_loc)):
int_loc_error = [
int_loc
for int_loc in int_locs
if not bool(re.match(f"...{ex_loc}..$", int_loc))
]
else:
for loc_type in ["KW", "OW"]:
int_locs_select = [
int_loc
for int_loc in int_locs
if bool(re.match(f"{loc_type}.", int_loc))
]
if (
len(
np.unique([int_loc[:-1] for int_loc in int_locs_select])
)
> 1
):
int_loc_error += list(int_locs_select)
if len(ex_loc) == 4:
if not bool(re.match(".8..$", ex_loc)):
int_loc_error += [
int_loc
for int_loc in int_locs
if not bool(re.match(f"..{ex_loc}..$", int_loc))
]
else:
for loc_type in ["KW", "OW"]:
int_locs_select = [
int_loc
for int_loc in int_locs
if bool(re.match(f"{loc_type}.", int_loc))
]
if (
len(
np.unique([int_loc[:-1] for int_loc in int_locs_select])
)
> 1
):
int_loc_error += list(int_locs_select)
if "exLoc_ignore" in self.consistency.keys():
if (
int(ex_loc)
in self.consistency["exLoc_ignore"]["externalLocation"].values
):
int_loc_error = [
int_loc
for int_loc in int_loc_error
if int_loc
not in self.consistency["exLoc_ignore"][
self.consistency["exLoc_ignore"]["externalLocation"]
== int(ex_loc)
]["internalLocation"].values
]
for int_loc in int_loc_error:
ex_loc_errors["internalLocation"].append(int_loc)
ex_loc_errors["externalLocation"].append(ex_loc)
self.consistency[sheet_name] = | pd.DataFrame(ex_loc_errors) | pandas.DataFrame |
from multiprocessing import cpu_count
import numba as nb
import numexpr as ne
import numpy as np
import pandas as pd
from typing import Tuple, Union, List, Callable, Iterable
EPS = 1.0e-7
def matrix_balancing_1d(m: np.ndarray, a: np.ndarray, axis: int) -> np.ndarray:
"""Balances a matrix using a single constraint.
Args:
m (numpy.ndarray): The matrix (a 2-dimensional ndarray) to be balanced
a (numpy.ndarray): The totals vector (a 1-dimensional ndarray) constraint
axis (int): Direction to constrain (0 = along columns, 1 = along rows)
Return:
numpy.ndarray: A balanced matrix
"""
assert axis in [0, 1], "axis must be either 0 or 1"
assert m.ndim == 2, "`m` must be a two-dimensional matrix"
assert a.ndim == 1, "`a` must be an one-dimensional vector"
assert np.all(m.shape[axis] == a.shape[0]), "axis %d of matrices 'm' and 'a' must be the same." % axis
return _balance(m, a, axis)
def matrix_balancing_2d(m: Union[np.ndarray, pd.DataFrame], a: np.ndarray, b: np.ndarray, totals_to_use: str = 'raise',
max_iterations: int = 1000, rel_error: float = 0.0001,
n_procs: int = 1) -> Tuple[Union[np.ndarray, pd.DataFrame], float, int]:
"""Balances a two-dimensional matrix using iterative proportional fitting.
Args:
m (Union[numpy.ndarray, pandas.DataFrame]): The matrix (a 2-dimensional ndarray) to be balanced. If a DataFrame
is supplied, the output will be returned as a DataFrame.
a (numpy.ndarray): The row totals (a 1-dimensional ndarray) to use for balancing
b (numpy.ndarray): The column totals (a 1-dimensional ndarray) to use for balancing
totals_to_use (str, optional): Defaults to ``'raise'``. Describes how to scale the row and column totals if
their sums do not match. Must be one of ['rows', 'columns', 'average', 'raise'].
- rows: scales the columns totals so that their sums matches the row totals
- columns: scales the row totals so that their sums matches the column totals
- average: scales both row and column totals to the average value of their sums
- raise: raises an Exception if the sums of the row and column totals do not match
max_iterations (int, optional): Defaults to ``1000``. Maximum number of iterations
rel_error (float, optional): Defaults to ``1.0E-4``. Relative error stopping criteria
n_procs (int, optional): Defaults to ``1``. Number of processors for parallel computation. (Not used)
Return:
Tuple[Union[numpy.ndarray, pandas.DataFrame], float, int]: The balanced matrix, residual, and n_iterations
"""
max_iterations = int(max_iterations)
n_procs = int(n_procs)
# Test if matrix is Pandas DataFrame
data_type = ''
m_pd = None
if isinstance(m, pd.DataFrame):
data_type = 'pd'
m_pd = m
m = m_pd.values
if isinstance(a, pd.Series) or isinstance(a, pd.DataFrame):
a = a.values
if isinstance(b, pd.Series) or isinstance(b, pd.DataFrame):
b = b.values
# ##################################################################################
# Validations:
# - m is an MxM square matrix, a and b are vectors of size M
# - totals_to_use is one of ['rows', 'columns', 'average']
# - the max_iterations is a +'ve integer
# - rel_error is a +'ve float between 0 and 1
# - the n_procs is a +'ve integer between 1 and the number of available processors
# ##################################################################################
valid_totals_to_use = ['rows', 'columns', 'average', 'raise']
assert m.ndim == 2 and m.shape[0] == m.shape[1], "m must be a two-dimensional square matrix"
assert a.ndim == 1 and a.shape[0] == m.shape[0], \
"'a' must be a one-dimensional array, whose size matches that of 'm'"
assert b.ndim == 1 and b.shape[0] == m.shape[0], \
"'a' must be a one-dimensional array, whose size matches that of 'm'"
assert totals_to_use in valid_totals_to_use, "totals_to_use must be one of %s" % valid_totals_to_use
assert max_iterations >= 1, "max_iterations must be integer >= 1"
assert 0 < rel_error < 1.0, "rel_error must be float between 0.0 and 1.0"
assert 1 <= n_procs <= cpu_count(), \
"n_procs must be integer between 1 and the number of processors (%d) " % cpu_count()
if n_procs > 1:
raise NotImplementedError("Multiprocessing capability is not implemented yet.")
# Scale row and column totals, if required
a_sum = a.sum()
b_sum = b.sum()
if not np.isclose(a_sum, b_sum):
if totals_to_use == 'rows':
b = np.multiply(b, a_sum / b_sum)
elif totals_to_use == 'columns':
a = np.multiply(a, b_sum / a_sum)
elif totals_to_use == 'average':
avg_sum = 0.5 * (a_sum + b_sum)
a = np.multiply(a, avg_sum / a_sum)
b = np.multiply(b, avg_sum / b_sum)
else:
raise RuntimeError("a and b vector totals do not match.")
initial_error = _calc_error(m, a, b)
err = 1.0
i = 0
while err > rel_error:
if i > max_iterations:
# todo: convert to logger, if possible
print("Matrix balancing did not converge")
break
m = _balance(m, a, 1)
m = _balance(m, b, 0)
err = _calc_error(m, a, b) / initial_error
i += 1
if data_type == 'pd':
new_df = pd.DataFrame(m, index=m_pd.index, columns=m_pd.columns)
return new_df, err, i
else:
return m, err, i
def _balance(matrix: np.ndarray, tot: np.ndarray, axis: int) -> np.ndarray:
"""Balances a matrix using a single constraint.
Args:
matrix (numpy.ndarray): The matrix to be balanced
tot (numpy.ndarray): The totals constraint
axis (int): Direction to constrain (0 = along columns, 1 = along rows)
Return:
numpy.ndarray: The balanced matrix
"""
sc = tot / (matrix.sum(axis) + EPS)
sc = np.nan_to_num(sc) # replace divide by 0 errors from the prev. line
if axis: # along rows
matrix = np.multiply(matrix.T, sc).T
else: # along columns
matrix = np.multiply(matrix, sc)
return matrix
def _calc_error(m, a, b):
row_sum = np.absolute(a - m.sum(1)).sum()
col_sum = np.absolute(b - m.sum(0)).sum()
return row_sum + col_sum
@nb.jit(nb.float64[:, :](nb.float64[:, :], nb.int64))
def _nbf_bucket_round(a_, decimals=0):
a = a_.ravel()
b = np.copy(a)
residual = 0
for i in range(0, len(b)):
b[i] = np.round(a[i] + residual, decimals)
residual += a[i] - b[i]
return b.reshape(a_.shape)
def matrix_bucket_rounding(m: Union[np.ndarray, pd.DataFrame], decimals: int = 0) -> Union[np.ndarray, pd.DataFrame]:
"""Bucket rounds to the given number of decimals.
Args:
m (Union[numpy.ndarray, pandas.DataFrame]): The matrix to be rounded
decimals (int, optional): Defaults to ``0``. Number of decimal places to round to. If decimals is negative, it
specifies the number of positions to the left of the decimal point.
Return:
Union[numpy.ndarray, pandas.DataFrame]: The rounded matrix
"""
# Test if matrix is Pandas DataFrame
data_type = ''
m_pd = None
if isinstance(m, pd.DataFrame):
data_type = 'pd'
m_pd = m
m = m_pd.values
decimals = int(decimals)
# I really can't think of a way to vectorize bucket rounding, so here goes the slow for loop
b = _nbf_bucket_round(m, decimals)
if decimals <= 0:
b = b.astype(np.int32)
if data_type == 'pd':
new_df = pd.DataFrame(b.reshape(m.shape), index=m_pd.index, columns=m_pd.columns)
return new_df
else:
return b.reshape(m.shape)
def split_zone_in_matrix(base_matrix: pd.DataFrame, old_zone: int, new_zones: List[int],
proportions: List[float]) -> pd.DataFrame:
"""Takes a zone in a matrix (as a DataFrame) and splits it into several new zones, prorating affected cells by a
vector of proportions (one value for each new zone). The old zone is removed.
Args:
base_matrix (pandas.DataFrame): The matrix to re-shape
old_zone (int): The original zone to split
new_zones (List[int]): The list of new zones to add
proportions (List[float]): The proportions to split the original zone to. The list must be the same length as
``new_zones`` and sum to 1.0
Returns:
pandas.DataFrame: The re-shaped matrix
"""
assert isinstance(base_matrix, pd.DataFrame), "Base matrix must be a DataFrame"
old_zone = int(old_zone)
new_zones = np.array(new_zones, dtype=np.int32)
proportions = np.array(proportions, dtype=np.float64)
assert len(new_zones) == len(proportions), "Proportion array must be the same length as the new zone array"
assert len(new_zones.shape) == 1, "New zones must be a vector"
assert base_matrix.index.equals(base_matrix.columns), "DataFrame is not a matrix"
assert np.isclose(proportions.sum(), 1.0), "Proportions must sum to 1.0 "
n_new_zones = len(new_zones)
intersection_index = base_matrix.index.drop(old_zone)
new_index = intersection_index
for z in new_zones:
new_index = new_index.insert(-1, z)
new_index = pd.Index(sorted(new_index))
new_matrix = pd.DataFrame(0, index=new_index, columns=new_index, dtype=base_matrix.dtypes.iat[0])
# 1. Copy over the values from the regions of the matrix not being updated
new_matrix.loc[intersection_index, intersection_index] = base_matrix
# 2. Prorate the row corresponding to the dropped zone
# This section (and the next) works with the underlying Numpy arrays, since they handle
# broadcasting better than Pandas does
original_row = base_matrix.loc[old_zone, intersection_index]
original_row = original_row.values[:] # Make a shallow copy to preserve shape of the original data
original_row.shape = 1, len(intersection_index)
proportions.shape = n_new_zones, 1
result = pd.DataFrame(original_row * proportions, index=new_zones, columns=intersection_index)
new_matrix.loc[result.index, result.columns] = result
# 3. Proprate the column corresponding to the dropped zone
original_column = base_matrix.loc[intersection_index, old_zone]
original_column = original_column.values[:]
original_column.shape = len(intersection_index), 1
proportions.shape = 1, n_new_zones
result = pd.DataFrame(original_column * proportions, index=intersection_index, columns=new_zones)
new_matrix.loc[result.index, result.columns] = result
# 4. Expand the old intrazonal
proportions_copy = proportions[:, :]
proportions_copy.shape = 1, n_new_zones
proportions.shape = n_new_zones, 1
intrzonal_matrix = proportions * proportions_copy
intrazonal_scalar = base_matrix.at[old_zone, old_zone]
result = | pd.DataFrame(intrazonal_scalar * intrzonal_matrix, index=new_zones, columns=new_zones) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 11 20:08:48 2021
@author: jan_c
"""
import pandas as pd
from tkinter import *
from tkinter import filedialog
if __name__ == '__main__':
def frame():
def abrir_archivo():
global archivo
archivo = filedialog.askopenfilename(title="Abrir archivo .xlsx", initialdir="F:/", filetypes=(("Archivo .xlsx", "*.xlsx"), ("Archivo .xls", "*.xls")))
raiz.destroy()
raiz = Tk()
mi_frame = Frame(raiz, width=200, height=60)
mi_frame.pack()
boton = Button(raiz, text="Abrir archivo", command=abrir_archivo)
boton.pack(fill=X)
boton.config(cursor="hand2")
boton.config(bd=4)
boton.config(relief="groove")
raiz.mainloop()
return archivo
archivo = frame()
#Leer archivo de entrada
datos = pd.read_excel(archivo, sheet_name="Resumen de resultados", header=4)
# Se filtran las columnas de interes y se generan los datos ordenados
filtro_fluorescencia = datos.filter(regex = "Fluorescencia") # filtra columnas
datos_f = pd.DataFrame(filtro_fluorescencia)
muestra = | pd.DataFrame(datos["Muestra"]) | pandas.DataFrame |
import os
import pandas as pd
# https://github.com/CSSEGISandData/COVID-19.git
REPOSITORY = "https://raw.githubusercontent.com/CSSEGISandData"
MAIN_FOLDER = "COVID-19/master/csse_covid_19_data/csse_covid_19_time_series"
CONFIRMED_FILE = "time_series_covid19_confirmed_global.csv"
DEATHS_FILE = "time_series_covid19_deaths_global.csv"
RECOVERED_FILE = "time_series_covid19_recovered_global.csv"
CONFIRMED_PATH = os.path.join(REPOSITORY, MAIN_FOLDER, CONFIRMED_FILE)
DEATHS_PATH = os.path.join(REPOSITORY, MAIN_FOLDER, DEATHS_FILE)
RECOVERED_PATH = os.path.join(REPOSITORY, MAIN_FOLDER, RECOVERED_FILE)
def group_data_by_country(df):
df = df.drop(columns=["Lat", "Long"])
df_bycountry = df.groupby("Country/Region").sum()
# summing for all country
df_bycountry.loc["Total"] = df_bycountry.sum(axis=0)
return df_bycountry
def get_data_normalized(df):
# dividing by the sum
maximums = df.iloc[:, -1]
df_normalized = df.div(maximums.to_numpy(), axis=0)
return df_normalized
def get_data_for_sir(df_death, df_recovered, df_confirmed):
df_recovered_or_passed = df_recovered + df_death
df_infected = df_confirmed - df_recovered_or_passed
return df_recovered_or_passed, df_infected
def extract_process_data():
df_confirmed = pd.read_csv(CONFIRMED_PATH)
df_deaths = pd.read_csv(DEATHS_PATH)
df_recovered = pd.read_csv(RECOVERED_PATH)
df_confirmed_by_country = group_data_by_country(df_confirmed)
df_deaths_by_country = group_data_by_country(df_deaths)
df_recovered_by_country = group_data_by_country(df_recovered)
df_recovered_or_passed_by_country, df_infected_by_country = get_data_for_sir(
df_deaths_by_country, df_recovered_by_country, df_confirmed_by_country
)
return (
add_datetime(df_confirmed_by_country),
add_datetime(df_deaths_by_country),
add_datetime(df_recovered_by_country),
add_datetime(df_recovered_or_passed_by_country),
add_datetime(df_infected_by_country),
)
def add_datetime(df):
df.loc["Time"] = | pd.period_range(df.columns[0], df.columns[-1], freq="D") | pandas.period_range |
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import xarray as xr
def plot_obs_preds(pred_file, obs_file, site_id, start_date, end_date,
outfile=None, info_dict=None):
df_pred = | pd.read_feather(pred_file) | pandas.read_feather |
from sqlalchemy import true
import FinsterTab.W2020.DataForecast
import datetime as dt
from FinsterTab.W2020.dbEngine import DBEngine
import pandas as pd
import sqlalchemy as sal
import numpy
from datetime import datetime, timedelta, date
import pandas_datareader.data as dr
def get_past_data(self):
"""
Get raw data from Yahoo! Finance for SPY during Great Recession
Store data in MySQL database
:param sources: provides ticker symbols of instruments being tracked
"""
# Assume that date is 2010
now = dt.date(2009, 1, 1) # Date Variables
start = now - timedelta(days=1500) # get date value from 5 years ago
end = now
# data will be a 2D Pandas Dataframe
data = dr.DataReader('SPY', 'yahoo', start, end)
symbol = [3] * len(data) # add column to identify instrument id number
data['instrumentid'] = symbol
data = data.reset_index() # no designated index - easier to work with mysql database
# Yahoo! Finance columns to match column names in MySQL database.
# Column names are kept same to avoid any ambiguity.
# Column names are not case-sensitive.
data.rename(columns={'Date': 'date', 'High': 'high', 'Low': 'low', 'Open': 'open', 'Close': 'close',
'Adj Close': 'adj close', 'Volume': 'volume'}, inplace=True)
data.sort_values(by=['date']) # make sure data is ordered by trade date
# send data to database
# replace data each time program is run
data.to_sql('dbo_paststatistics', self.engine, if_exists=('replace'),
index=False,
dtype={'date': sal.Date, 'open': sal.FLOAT, 'high': sal.FLOAT, 'low': sal.FLOAT,
'close': sal.FLOAT, 'adj close': sal.FLOAT, 'volume': sal.FLOAT})
# Tests the accuracy of the old functions
def accuracy(self):
query = 'SELECT * FROM dbo_algorithmmaster'
algorithm_df = pd.read_sql_query(query, self.engine)
query = 'SELECT * FROM dbo_instrumentmaster'
instrument_master_df = pd.read_sql_query(query, self.engine)
# Changes algorithm code
for code in range(len(algorithm_df)):
# Dynamic range for changing instrument ID starting at 1
for ID in range(1, len(instrument_master_df) + 1):
query = 'SELECT * FROM dbo_algorithmforecast AS a, dbo_instrumentstatistics AS b WHERE a.forecastdate = b.date AND' \
' a.instrumentid = %d AND b.instrumentid = %d AND a.algorithmcode = "%s"' % (
ID, ID, algorithm_df['algorithmcode'][code])
df = pd.read_sql_query(query, self.engine)
count = 0
# Calculates accuracy
for x in range((len(df) - 1)):
# Check if upward or downward trend
if (df['close'][x + 1] > df['close'][x] and df['forecastcloseprice'][x + 1] > df['forecastcloseprice'][
x]) or \
(df['close'][x + 1] < df['close'][x] and df['forecastcloseprice'][x + 1] <
df['forecastcloseprice'][
x]):
count += 1
# Populates absolute_percent_error with the calculated percent error for a specific data point
absolute_percent_error = []
for i in range(len(df)):
absolute_percent_error.append(
abs((df['close'].loc[i] - df['forecastcloseprice'].loc[i]) / df['close'].loc[i]))
# Calculate sum of percent error and find average
average_percent_error = 0
for i in absolute_percent_error:
average_percent_error = average_percent_error + i
average_percent_error = average_percent_error / len(df)
# return the average percent error calculated above
print("Average percent error for instrument: %d and algorithm: %s " % (ID, algorithm_df['algorithmcode'][code]), average_percent_error)
#print('Algorithm:', algorithm_df['algorithmcode'][code])
#print('instrumentid: %d' % ID, instrument_master_df['instrumentname'][ID - 1])
#print('length of data is:', len(df))
#print('number correct: ', count)
d = len(df)
b = (count / d) * 100
#print('The accuracy is: %.2f%%\n' % b)
# Isolated tests for ARIMA as we where trying to determine why it was so accurate
def arima_accuracy(self):
query = 'SELECT * FROM dbo_algorithmforecast AS a, dbo_instrumentstatistics AS b WHERE a.forecastdate = b.date AND' \
' a.instrumentid = 1 AND b.instrumentid = 1 AND a.algorithmcode = "ARIMA"'
df = pd.read_sql_query(query, self.engine)
df = df.tail(10)
df = df.reset_index(drop=true)
#print(df)
arima_count = 0
for x in range((len(df) - 1)):
# Check if upward or downward trend
if df['close'][x + 1] > df['close'][x] and df['forecastcloseprice'][x + 1] > df['forecastcloseprice'][x] \
or (df['close'][x + 1] < df['close'][x] and df['forecastcloseprice'][x + 1] < df['forecastcloseprice'][x]):
arima_count += 1
#print(df['close'], df['forecastcloseprice'])
#print(arima_count)
#print(arima_count/len(df))
# Accuracy test for the new function MSF1
def MSF1_accuracy(self):
# Queires the database to grab all of the Macro Economic Variable codes
query = "SELECT macroeconcode FROM dbo_macroeconmaster WHERE activecode = 'A'"
id = pd.read_sql_query(query, self.engine)
id = id.reset_index(drop=True)
# Queries the database to grab all of the instrument IDs
query = 'SELECT instrumentid FROM dbo_instrumentmaster'
id2 = pd.read_sql_query(query, self.engine)
id2 = id2.reset_index(drop=True)
# These are the date ranges we are working with
# start_date represents the starting date for the forecasts and the end of the training dates
start_date = "'2018-01-01'"
# end_date represents the date for which the forecasting ends
end_date = "'2020-01-01'"
# train_date represents the date we start collecting the instrument statistics used to forecast prices
train_date = "'2016-01-01'"
# Bool to determine whether we append to dbo_tempvisualize or replace the values
to_append = False
# Create a for loop to iterate through all of the instrument ids
for v in id2['instrumentid']:
# Initializes a list for which we will eventually be storing all data to add to the macroeconalgorithm database table
data = []
# Data1 will be used to store the forecastdate, instrumentid, forecastprice, and algorithm code
# It will be used to graph our backtested forecast against the actual instrument prices
data1 = []
# Getting Dates for Future Forecast as well as actual close prices for instrumentID#
# We chose 2018 - 2020, to alter this date range simply change the dates in the 3rd line of the query for the dates you want to test on
# Make sure they are valid dates as some instruments only have statistics that go back so far, check the instrument statistic table to figure out how far back each instrument goes
query = "SELECT date, close FROM ( SELECT date, close, instrumentID, ROW_NUMBER() OVER " \
"(PARTITION BY YEAR(date), MONTH(date) ORDER BY DAY(date) DESC) AS rowNum FROM " \
"dbo_instrumentstatistics WHERE instrumentid = {} AND date BETWEEN {} AND {} ) z " \
"WHERE rowNum = 1 AND ( MONTH(z.date) = 3 OR MONTH(z.date) = 6 OR MONTH(z.date) = 9 OR " \
"MONTH(z.date) = 12)".format(v, start_date, end_date)
# instrument_stats will hold the closing prices and the dates for the dates we are forecasting for
instrument_stats = pd.read_sql_query(query, self.engine)
# We isolate the dates and closing prices into individual arrays to make them easier to work with
date = []
close = []
for i in instrument_stats['date']:
date.append(i)
for i in instrument_stats['close']:
close.append(i)
# n will always correspond to the amount of dates, as the amount of dates is the number of data points being compared
n = len(date)
# Median_forecast will be a dictionary where the key is the date and the value is a list of forecasted prices
median_forecast = {}
# This disctionary will be used to easily combine all of the forecasts for different dates to determine the median forecast value
for i in date:
temp = {i: []}
median_forecast.update(temp)
# This query will grab quarterly instrument prices from between 2014 and the current date to be used in the forecasting
query = "SELECT date, close, instrumentid FROM ( SELECT date, close, instrumentid, ROW_NUMBER() OVER " \
"(PARTITION BY YEAR(date), MONTH(date) ORDER BY DAY(date) DESC) AS rowNum FROM " \
"dbo_instrumentstatistics WHERE instrumentid = {} AND date BETWEEN {} AND {} ) z " \
"WHERE rowNum = 1 AND ( MONTH(z.date) = 3 OR MONTH(z.date) = 6 OR MONTH(z.date) = 9 OR " \
"MONTH(z.date) = 12)".format(v, train_date, start_date)
# Executes the query and stores the result in a dataframe variable
df2 = pd.read_sql_query(query, self.engine)
# This for loop iterates through the different macro economic codes to calculate the percent change for each macroeconomic variable
for x in id['macroeconcode']:
# Retrieves the most recent macro economic statistics prior to the date for which we are testing our algorithm
query = "SELECT * FROM dbo_macroeconstatistics WHERE macroeconcode = {} and date <= {} ".format('"' + str(x) + '"', start_date)
df = pd.read_sql_query(query, self.engine)
macro = df.tail(n)
SP = df2.tail(n)
temp = df.tail(n + 1)
temp = temp.reset_index()
# Converts macro variables to precent change
macroPercentChange = macro
macro = macro.reset_index(drop=True)
SP = SP.reset_index(drop=True)
macroPercentChange = macroPercentChange.reset_index(drop=True)
for i in range(0, n):
if (i == 0):
macrov = (macro['statistics'][i] - temp['statistics'][i]) / temp['statistics'][i]
macroPercentChange['statistics'].iloc[i] = macrov * 100
else:
macrov = (macro['statistics'][i] - macro['statistics'][i - 1]) / macro['statistics'][i - 1]
macroPercentChange['statistics'].iloc[i] = macrov * 100
# Algorithm for forecast price
S = calc(self, macroPercentChange, SP,n) # Calculates the average GDP and S&P values for the given data points over n days and performs operations on GDP average
# isFirst will determine whether or not this is the first calculation being done
# If it is true then we use the most recent instrument statistic to forecast the first pricepoint
# IF it is false then we use the previous forecast price to predict the next forecast price
isFirst = True
# temp_price will be used to hold the previous forecast price for the next prediction
temp_price = 0
# Setup a for loop to calculate the final forecast price and add data to the list variable data
for i in range(n):
if isFirst:
if x in [2, 3, 4]:
temp_price = ((S * (SP['close'].iloc[n-1])) + (SP['close'].iloc[n-1]))
isFirst = False
else:
temp_price = ((S * SP['close'].iloc[n-1]) + SP['close'].iloc[n-1])
isFirst = False
else:
if x in [2, 3, 4]:
temp_price = ((S * temp_price) + temp_price)
else:
temp_price = ((S * temp_price) + temp_price)
# Once the forecast price is calculated append it to median_forecast list
median_forecast[date[i]].append(temp_price)
# Calculates the median value for each date using a list of prices forecasted by each individual macro economic variable
forecast_prices = []
for i in date:
# Sort the forecasted prices based on date
sorted_prices = sorted(median_forecast[i])
# calculate the median forecasted price for each date
if len(sorted_prices) % 2 == 0:
center = int(len(sorted_prices) / 2)
forecast_prices.append(sorted_prices[center])
else:
center = int(len(sorted_prices) / 2)
forecast_prices.append((sorted_prices[center] + sorted_prices[center - 1]) / 2)
# Set up a for loop to construct a list using variables associated with macroeconalgorithm database table
for i in range(len(forecast_prices)):
data.append([date[i], v, 'ALL', forecast_prices[i], close[i], 'MSF1', 0])
data1.append([date[i], v, forecast_prices[i], 'MSF1'])
# Convert data list to dataframe variable
df = pd.DataFrame(data, columns=['forecastdate', 'instrumentid', 'macroeconcode',
'forecastcloseprice', 'close', 'algorithmcode', 'prederror'])
df1 = pd.DataFrame(data1, columns=['forecastdate', 'instrumentid', 'forecastcloseprice', 'algorithmcode'])
df1.to_sql('dbo_tempvisualize', self.engine, if_exists=('replace' if not to_append else 'append'), index=False)
to_append = True
# Populates absolute_percent_error with the calculated percent error for a specific data point
absolute_percent_error = []
for i in range(n):
absolute_percent_error.append(abs((df['close'].loc[i] - df['forecastcloseprice'].loc[i]) / df['close'].loc[i]))
# Calculate sum of percent error and find average
average_percent_error = 0
for i in absolute_percent_error:
average_percent_error = average_percent_error + i
average_percent_error = average_percent_error / n
count = 0
# Calculates trend accuracy
for x in range((len(df) - 1)):
# Check if upward or downward trend
if (df['close'][x + 1] > df['close'][x] and df['forecastcloseprice'][x + 1] > df['forecastcloseprice'][
x]) or \
(df['close'][x + 1] < df['close'][x] and df['forecastcloseprice'][x + 1] <
df['forecastcloseprice'][
x]):
count += 1
length = len(df)
trend_error = (count / length) * 100
print("Trend accuracy for %s for instrument %d is %.2f%%" % ('MSF1', v, trend_error))
print("The average percent error for %s for instrument %d is %.2f%%" % ('MSF1', v, average_percent_error * 100))
# return the average percent error calculated above
# This function is not currently used, it can be used to check the accuracy of MSF2 but will need set weightings
# The functions below this one will test the accuracy using a variety of weightings and choose the weightings with the best results
def MSF2_accuracy(self):
n = 8
#Gets the macro economic variables codes and names to loop through the inidividual macro variables
query = "SELECT macroeconcode, macroeconname FROM dbo_macroeconmaster WHERE activecode = 'A'"
data = pd.read_sql_query(query, self.engine)
macrocodes = []
indicators = {}
for i in range(len(data['macroeconcode'])):
macrocodes.append(data['macroeconcode'].loc[i])
d = {data['macroeconcode'].loc[i]: []}
indicators.update(d)
#Gets the instrument ids to loop through the individual instruments
query = 'SELECT instrumentid, instrumentname FROM dbo_instrumentmaster'
data = pd.read_sql_query(query, self.engine)
instrumentids = []
for i in data['instrumentid']:
instrumentids.append(i)
# These are the date ranges we are working with
# start_date represents the starting date for the forecasts and the end of the training dates
start_date = "'2018-01-01'"
# end_date represents the date for which the forecasting ends
end_date = "'2020-01-01'"
# train_date represents the date we start collecting the instrument statistics used to forecast prices
train_date = "'2016-01-01'"
#Loops through each instrument id to preform error calculations 1 instrument at a time
for i in instrumentids:
#Gets the instrument statistics to run through the function
query = "SELECT date, close, instrumentid FROM ( SELECT date, close, instrumentID, ROW_NUMBER() OVER " \
"(PARTITION BY YEAR(date), MONTH(date) ORDER BY DAY(date) DESC) AS rowNum FROM " \
"dbo_instrumentstatistics WHERE instrumentid = {} AND date BETWEEN {} AND {} ) z " \
"WHERE rowNum = 1 AND ( MONTH(z.date) = 3 OR MONTH(z.date) = 6 OR MONTH(z.date) = 9 OR " \
"MONTH(z.date) = 12)".format(i, train_date, start_date)
train_data = pd.read_sql_query(query, self.engine)
#Gets the instrument statistics to check against the forecast prices
query = "SELECT date, close, instrumentid FROM ( SELECT date, close, instrumentID, ROW_NUMBER() OVER " \
"(PARTITION BY YEAR(date), MONTH(date) ORDER BY DAY(date) DESC) AS rowNum FROM " \
"dbo_instrumentstatistics WHERE instrumentid = {} AND date BETWEEN {} AND {} ) z " \
"WHERE rowNum = 1 AND ( MONTH(z.date) = 3 OR MONTH(z.date) = 6 OR MONTH(z.date) = 9 OR " \
"MONTH(z.date) = 12)".format(i, start_date, end_date)
check_data = pd.read_sql_query(query, self.engine)
#Gets the dates for the future forecast prices so they match the instrument statistics
dates = []
for l in check_data['date']:
dates.append(str(l))
#Loops through the macro economic variable codes to calculate percent change
for j in macrocodes:
#Retrieves macro economic statistics for each macro variables
query = "SELECT date, statistics, macroeconcode FROM dbo_macroeconstatistics WHERE macroeconcode = {} AND date <= {}".format('"' + j + '"', start_date)
data = pd.read_sql_query(query, self.engine)
# For loop to retrieve macro statistics and calculate percent change
for k in range(n):
temp = data.tail(n + 1)
data = data.tail(n)
if j == k:
macrov = (data['statistics'].iloc[k] - temp['statistics'].iloc[0]) / temp['statistics'].iloc[0]
indicators[j].append(macrov)
else:
macrov = (data['statistics'].iloc[k] - data['statistics'].iloc[k - 1]) / data['statistics'].iloc[
k - 1]
indicators[j].append(macrov)
#Preforms the actual calculations and stores them in an array called calculated forecast
calculated_forecast = []
for k in range(n):
stat = indicators['GDP'][k] * 1 - (indicators['UR'][k] * 0 + indicators['IR'][k] * .5) - (
indicators['MI'][k] * indicators['MI'][k])
stat = (stat * train_data['close'].iloc[n-1]) + train_data['close'].iloc[n-1]
calculated_forecast.append(stat)
#Creates and inserts the forecast dates, instrument ids, calculated forecast prices, and actual close prices into an array
results = []
for k in range(n):
results.append([dates[k], i, calculated_forecast[k], check_data['close'].loc[k]])
#Creates a dataframe out of the array created above
df = pd.DataFrame(results, columns=['forecastdate', 'instrumentid', 'forecastcloseprice', 'close'])
#print(df)
count = 0
# Calculates accuracy
percent_error = []
temp_error = 0
for x in range((len(df) - 1)):
# Check if upward or downward trend
if (df['close'][x + 1] > df['close'][x] and df['forecastcloseprice'][x + 1] > df['forecastcloseprice'][x]) or \
(df['close'][x + 1] < df['close'][x] and df['forecastcloseprice'][x + 1] < df['forecastcloseprice'][x]):
count += 1
temp_error = abs((df['close'][x] - df['forecastcloseprice'][x]))/df['close']
#Percent Error calculation
temp_error = (df['close'] - df['forecastcloseprice']) / df['close']
absolute_percent_error = [abs(ele) for ele in temp_error]
percent_error.append(absolute_percent_error)
if df['instrumentid'][i] == 1:
gm_temp_error = (df['close'] - df['forecastcloseprice']) / df['close']
gm_absolute_percent_error = [abs(ele) for ele in gm_temp_error]
#Calculate sum of percent error and find average
gm_average_percent_error = sum(gm_absolute_percent_error) / 8
#print("Average percent error of MSF2 on GM stock is: ", gm_average_percent_error * 100, "%")
if df['instrumentid'][i] == 2:
pfe_temp_error = (df['close'] - df['forecastcloseprice']) / df['close']
pfe_absolute_percent_error = [abs(ele) for ele in pfe_temp_error]
#Calculate sum of percent error and find average
pfe_average_percent_error = sum(pfe_absolute_percent_error) / 8
#print("Average percent error of MSF2 on PFE stock is: ", pfe_average_percent_error * 100, "%")
if df['instrumentid'][i] == 3:
spy_temp_error = (df['close'] - df['forecastcloseprice']) / df['close']
spy_absolute_percent_error = [abs(ele) for ele in spy_temp_error]
#Calculate sum of percent error and find average
spy_average_percent_error = sum(spy_absolute_percent_error) / 8
#print("Average percent error of MSF2 on S&P 500 stock is: ", spy_average_percent_error * 100, "%")
if df['instrumentid'][i] == 4:
xph_temp_error = (df['close'] - df['forecastcloseprice']) / df['close']
xph_absolute_percent_error = [abs(ele) for ele in xph_temp_error]
#Calculate sum of percent error and find average
xph_average_percent_error = sum(xph_absolute_percent_error) / 8
#print("Average percent error of MSF2 on XPH stock is: ", xph_average_percent_error * 100, "%")
if df['instrumentid'][i] == 5:
carz_temp_error = (df['close'] - df['forecastcloseprice']) / df['close']
carz_absolute_percent_error = [abs(ele) for ele in carz_temp_error]
#Calculate sum of percent error and find average
carz_average_percent_error = sum(carz_absolute_percent_error) / 8
#print("Average percent error of MSF2 on CARZ index stock is: ", carz_average_percent_error * 100, "%")
if df['instrumentid'][i] == 6:
tyx_temp_error = (df['close'] - df['forecastcloseprice']) / df['close']
tyx_absolute_percent_error = [abs(ele) for ele in tyx_temp_error]
#Calculate sum of percent error and find average
tyx_average_percent_error = sum(tyx_absolute_percent_error) / 8
#print("Average percent error of MSF2 on TYX 30-YR bond is: ", tyx_average_percent_error * 100, "%")
d = len(df)
b = (count / d) * 100
#Prints the trend accuracy
#print('The accuracy for instrument %d: %.2f%%\n' % (i, b))
#Create weightings MSF2 runs the MSF2 algorithm for past dates and compares them to actual instrument prices, generating a percent error calculation
#We then iterate through several different weightings and we compare each percent error for each instrument and determine the weightings with the lowest percent error
def create_weightings_MSF2(self, setWeightings):
# Query to grab the macroeconcodes and macroeconnames from the macroeconmaster database table
query = "SELECT macroeconcode, macroeconname FROM dbo_macroeconmaster WHERE activecode = 'A'"
data = pd.read_sql_query(query, self.engine)
# Query to grab the instrumentid and instrument name from the instrumentmaster database table
query = 'SELECT instrumentid, instrumentname FROM dbo_instrumentmaster'
data1 = pd.read_sql_query(query, self.engine)
# Keys is a dictionary that will be used to store the macro econ code for each macro econ name
keys = {}
for i in range(len(data)):
keys.update({data['macroeconname'].iloc[i]: data['macroeconcode'].iloc[i]})
# ikeys is a dictionary that will be used to store instrument ids for each instrument name
ikeys = {}
for x in range(len(data1)):
ikeys.update({data1['instrumentname'].iloc[x]: data1['instrumentid'].iloc[x]})
#Vars is a dictionary used to store the macro economic variable percent change for each macro economic code
vars = {}
#Vars is only populated with the relevant macro economic variables (GDP, COVI, CPIUC, and FSI)
for i in data['macroeconcode']:
if (i == 'GDP' or i == 'UR' or i == 'IR' or i == 'MI'):
d = {i: []}
vars.update(d)
#Weightings is used to store the best weightings for each instrument id which is returned to dataforecast and used for actual prediction
weightings = {}
#n represents the number of datapoints we are working with (represented in quarters)
n = 8
# These are the date ranges we are working with
# start_date represents the starting date for the forecasts and the end of the training dates
start_date = "'2018-01-01'"
# end_date represents the date for which the forecasting ends
end_date = "'2020-01-01'"
# train_date represents the date we start collecting the instrument statistics used to forecast prices
train_date = "'2016-01-01'"
# For loop to loop through the macroeconomic codes to calculate the macro economic variable percent change
for i in keys:
# Check to make sure the macroeconcode we are working with is one of the relevant ones
if keys[i] in vars:
# Query to grab the macroeconomic statistics from the database using the relevant macro economic codes
query = "SELECT date, statistics, macroeconcode FROM dbo_macroeconstatistics WHERE macroeconcode = {} AND date <= {}".format(
'"' + keys[i] + '"', start_date)
data = pd.read_sql_query(query, self.engine)
# For loop to retrieve macro statistics and calculate percent change
for j in range(n):
# This will grab the n+1 statistic to use to calculate the percent change to the n statistic
temp = data.tail(n + 1)
# This will grab the most recent n statistics from the query, as we are working only with n points
data = data.tail(n)
# For the first iteration we need to use the n+1th statistic to calculate percent change on the oldest point
if j == 0:
macrov = (data['statistics'].iloc[j] - temp['statistics'].iloc[0]) / temp['statistics'].iloc[0]
vars[keys[i]].append(macrov)
else:
macrov = (data['statistics'].iloc[j] - data['statistics'].iloc[j - 1]) / \
data['statistics'].iloc[j - 1]
vars[keys[i]].append(macrov)
# If you are not using set weightings then this if statement will run and create the best fit weightings
if not setWeightings:
# We now iterate through the instrument ids
for x in ikeys:
# This query will grab the quarterly instrument statistics from 2016 to 2018
query = "SELECT date, close, instrumentid FROM ( SELECT date, close, instrumentid, ROW_NUMBER() OVER " \
"(PARTITION BY YEAR(date), MONTH(date) ORDER BY DAY(date) DESC) AS rowNum FROM " \
"dbo_instrumentstatistics WHERE instrumentid = {} AND date BETWEEN {} AND {} ) z " \
"WHERE rowNum = 1 AND ( MONTH(z.date) = 3 OR MONTH(z.date) = 6 OR MONTH(z.date) = 9 OR " \
"MONTH(z.date) = 12)".format(ikeys[x], train_date, start_date)
# Then we execute the query and store the returned values in instrumentStats, and grab the last n stats from the dataframe as we are only using n datapoints
instrumentStats = pd.read_sql_query(query, self.engine)
instrumentStats = instrumentStats.tail(n)
#Best weightings will be used to store the best weightings for each instrument
best_weightings = [0, 0, 0]
#Best avg error will be used to store the best average percent error for each isntrument
best_avg_error = -1
#Best trend error will be used to store the best trend error for each instrument
best_trend_error = -1
#Best forecast prices will be used to store the forecast prices for the best weightings to store them in a database for visual comparison later
best_forecast_prices = []
# We now iterate through all 3 different possible weightings
for weight in numpy.arange(-5.7, 2.8, .25):
for uweight in numpy.arange(-3.7, 3.6, .25):
for iweight in numpy.arange(-.8, .9, .25):
# We intialize a list to store the resulting forecasted prices to compare in another function
stat_check = []
# isFirst will determine whether or not this is the first calculation being done
# If it is true then we use the most recent instrument statistic to forecast the first pricepoint
# IF it is false then we use the previous forecast price to predict the next forecast price
isFirst = True
# This is the actual calculation of MSF3 where we store the result in stat_check to compare to actual instrument prices
for i in range(n):
if isFirst:
#Change to pluses and test accuracy
stat = vars['GDP'][i] * weight - vars['UR'][i] * uweight + vars['IR'][i] * iweight - (
vars['MI'][i] * vars['MI'][i])
stat = (stat * instrumentStats['close'].iloc[n-1]) + instrumentStats['close'].iloc[n-1]
stat_check.append(stat)
temp_price = stat
isFirst = False
else:
stat = vars['GDP'][i] * weight - (vars['UR'][i] * uweight + vars['IR'][i] * iweight) - (
vars['MI'][i] * vars['MI'][i])
stat = (stat * temp_price) + temp_price
stat_check.append(stat)
temp_price = stat
# We call to the weight check function using the list of forecasted prices, the current instrument id, the amount of datapoints we are working with, and the name of the function we are testing
# It then returns the average percent error and trend error for the forecasted prices, as well as the dates we are forecasting for so we can insert them into the visualize table
temp_avg_error, temp_trend_error, dates = weight_check(DBEngine().mysql_engine(), stat_check, ikeys[x], n, 'MSF2', start_date, end_date)
# Check to see if the best_avg_error has been initialized to a valid average percent error, if not then no average error or trend error has been calculated yet
if (best_avg_error < 0):
# If so store the average percent error, the best weightings, best trend error, and the resulting forecasted prices for comparison with other weightings
best_avg_error = temp_avg_error
best_weightings = [weight, uweight, iweight]
best_trend_error = temp_trend_error
best_forecast_prices = stat_check
# Otherwise check if the newly calculated average percent error is worse than the newly calculated one
elif (best_avg_error > temp_avg_error):
# And if so set the values for all the relevant variables
best_avg_error = temp_avg_error
best_weightings = [weight, uweight, iweight]
best_trend_error = temp_trend_error
best_forecast_prices = stat_check
# Print statements to view the average percent error, trend error, and best weightings
print("The lowest avg percent error is %.7f%% for instrumentID %d" % (best_avg_error * 100, ikeys[x]), ' for function: MSF2')
print("The weightings are: ", best_weightings, ' for function: MSF2')
print('The trend accuracy is: ', best_trend_error)
# initializes weightings dictionary as the best weightings found for each instrument id
weightings[ikeys[x]] = best_weightings
# visual_comparisons will be used to store the past forecasted prices so we can visualize them compared to actual instrument prices on a graph
visual_comparisons = []
for k in range(n):
visual_comparisons.append([dates[k], ikeys[x], best_forecast_prices[k], 'MSF2'])
df1 = pd.DataFrame(visual_comparisons, columns=['forecastdate', 'instrumentid', 'forecastcloseprice', 'algorithmcode'])
df1.to_sql('dbo_tempvisualize', self.engine,
if_exists=('append'), index=False)
# The weightings for each instrument ID are returned to dataforecast and used for prediction
return weightings
# This else statement will make use of the preset weightings for prediction and comparison
else:
# These are the set weightings as of 4/14/2020, these may not be relevant in the future. Feel free to change them
weightings = {1: [-2.2, 3.3, 0.44999999999999996],
2: [1.0499999999999998, -3.2, -0.8],
3: [2.55, 3.3, 0.7],
4: [0.04999999999999982, 3.05, 0.7],
5: [-4.7, 3.3, 0.44999999999999996],
6: [-1.2000000000000002, -3.7, -0.8]}
# We now iterate through the instrument ids
for x in ikeys:
# This query will grab the quarterly instrument statistics from 2016 to 2018
query = "SELECT date, close, instrumentid FROM ( SELECT date, close, instrumentid, ROW_NUMBER() OVER " \
"(PARTITION BY YEAR(date), MONTH(date) ORDER BY DAY(date) DESC) AS rowNum FROM " \
"dbo_instrumentstatistics WHERE instrumentid = {} AND date BETWEEN {} AND {} ) z " \
"WHERE rowNum = 1 AND ( MONTH(z.date) = 3 OR MONTH(z.date) = 6 OR MONTH(z.date) = 9 OR " \
"MONTH(z.date) = 12)".format(ikeys[x], train_date, start_date)
# Then we execute the query and store the returned values in instrumentStats, and grab the last n stats from the dataframe as we are only using n datapoints
instrumentStats = | pd.read_sql_query(query, self.engine) | pandas.read_sql_query |
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import os
import sys
import glob
import pandas as pd
import numpy as np
import gym
from gym import spaces
import sim_analysis
import tqdm
from pprint import pprint
import config
from scipy.spatial.distance import cdist
import sim_utils
class DynamicPlume:
def __init__(self,
sim_dt=0.01,
birth_rate=1.0,
env_dt=0.04,
birthx=1.0, # per-episode puff birth rate sparsity minimum
birthx_max=1.0, # overall odor puff birth rate sparsity max
wind_speed=0.5,
wind_y_var=0.5,
qvar=0.0, # Variance of init. location; higher = more off-plume initializations
diff_max=0.8, # teacher curriculum
diff_min=0.4, # teacher curriculum
warmup=25, # warmup upto these many steps
max_steps=300, # max steps in episode (used for switch_idxs, ok to run longer)
dataset=None, # optional: imitate a "dataset"
verbose=0):
super(DynamicPlume, self).__init__()
# Init
# print(os.getcwd())
self.verbose = verbose
self.warmup = warmup
self.max_steps = max_steps
self.snapshots = self.init_snapshots(config.datadir)
self.birthx = birthx
self.steps_per_env_dt = 4 # env_dt/sim_dt hardcoded
self.birth_rate = birth_rate
self.wind_y_var = wind_y_var
self.wind_speed = wind_speed
self.wind_degree = 0
# self.switch_counts = [0]*6 + [i for i in range(1, 13)] # mix of constant & switch
self.switch_counts = [0, 0, 0, 1, 1, 1, 2, 4, 6, 8] # mix of constant & switch
if dataset is not None and 'constant' in dataset:
self.switch_counts = [0]
if dataset is not None and 'noisy' in dataset:
self.switch_counts = [0, 0, 1, 1, 1, 2, 3, 4, 5, 6] # mix of constant & switch
self.diff_min = diff_min
self.diff_max = diff_max
self.qvar = qvar
self.reset()
def init_snapshots(self, snapshots_dir):
fnames = list(glob.glob(f"{snapshots_dir}/*_snapshot.csv"))[:10]
if len(fnames) < 1:
print(len(fnames), snapshots_dir)
return [ | pd.read_csv(x) | pandas.read_csv |
from datetime import datetime
import warnings
import numpy as np
import pytest
from pandas.core.dtypes.generic import ABCDateOffset
import pandas as pd
from pandas import (
DatetimeIndex,
Index,
PeriodIndex,
Series,
Timestamp,
bdate_range,
date_range,
)
from pandas.tests.test_base import Ops
import pandas.util.testing as tm
from pandas.tseries.offsets import BDay, BMonthEnd, CDay, Day, Hour
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
class TestDatetimeIndexOps(Ops):
def setup_method(self, method):
super().setup_method(method)
mask = lambda x: (isinstance(x, DatetimeIndex) or isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
f = lambda x: isinstance(x, DatetimeIndex)
self.check_ops_properties(DatetimeIndex._field_ops, f)
self.check_ops_properties(DatetimeIndex._object_ops, f)
self.check_ops_properties(DatetimeIndex._bool_ops, f)
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH#7206
msg = "'Series' object has no attribute '{}'"
for op in ["year", "day", "second", "weekday"]:
with pytest.raises(AttributeError, match=msg.format(op)):
getattr(self.dt_series, op)
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
assert s.year == 2000
assert s.month == 1
assert s.day == 10
msg = "'Series' object has no attribute 'weekday'"
with pytest.raises(AttributeError, match=msg):
s.weekday
def test_repeat_range(self, tz_naive_fixture):
tz = tz_naive_fixture
rng = date_range("1/1/2000", "1/1/2001")
result = rng.repeat(5)
assert result.freq is None
assert len(result) == 5 * len(rng)
index = pd.date_range("2001-01-01", periods=2, freq="D", tz=tz)
exp = pd.DatetimeIndex(
["2001-01-01", "2001-01-01", "2001-01-02", "2001-01-02"], tz=tz
)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = pd.date_range("2001-01-01", periods=2, freq="2D", tz=tz)
exp = pd.DatetimeIndex(
["2001-01-01", "2001-01-01", "2001-01-03", "2001-01-03"], tz=tz
)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = pd.DatetimeIndex(["2001-01-01", "NaT", "2003-01-01"], tz=tz)
exp = pd.DatetimeIndex(
[
"2001-01-01",
"2001-01-01",
"2001-01-01",
"NaT",
"NaT",
"NaT",
"2003-01-01",
"2003-01-01",
"2003-01-01",
],
tz=tz,
)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
def test_repeat(self, tz_naive_fixture):
tz = tz_naive_fixture
reps = 2
msg = "the 'axis' parameter is not supported"
rng = pd.date_range(start="2016-01-01", periods=2, freq="30Min", tz=tz)
expected_rng = DatetimeIndex(
[
Timestamp("2016-01-01 00:00:00", tz=tz, freq="30T"),
Timestamp("2016-01-01 00:00:00", tz=tz, freq="30T"),
Timestamp("2016-01-01 00:30:00", tz=tz, freq="30T"),
Timestamp("2016-01-01 00:30:00", tz=tz, freq="30T"),
]
)
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
assert res.freq is None
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
with pytest.raises(ValueError, match=msg):
np.repeat(rng, reps, axis=1)
def test_resolution(self, tz_naive_fixture):
tz = tz_naive_fixture
for freq, expected in zip(
["A", "Q", "M", "D", "H", "T", "S", "L", "U"],
[
"day",
"day",
"day",
"day",
"hour",
"minute",
"second",
"millisecond",
"microsecond",
],
):
idx = pd.date_range(start="2013-04-01", periods=30, freq=freq, tz=tz)
assert idx.resolution == expected
def test_value_counts_unique(self, tz_naive_fixture):
tz = tz_naive_fixture
# GH 7735
idx = pd.date_range("2011-01-01 09:00", freq="H", periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)), tz=tz)
exp_idx = pd.date_range("2011-01-01 18:00", freq="-1H", periods=10, tz=tz)
expected = Series(range(10, 0, -1), index=exp_idx, dtype="int64")
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.date_range("2011-01-01 09:00", freq="H", periods=10, tz=tz)
tm.assert_index_equal(idx.unique(), expected)
idx = DatetimeIndex(
[
"2013-01-01 09:00",
"2013-01-01 09:00",
"2013-01-01 09:00",
"2013-01-01 08:00",
"2013-01-01 08:00",
pd.NaT,
],
tz=tz,
)
exp_idx = DatetimeIndex(["2013-01-01 09:00", "2013-01-01 08:00"], tz=tz)
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = DatetimeIndex(["2013-01-01 09:00", "2013-01-01 08:00", pd.NaT], tz=tz)
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(
DatetimeIndex,
(
[0, 1, 0],
[0, 0, -1],
[0, -1, -1],
["2015", "2015", "2016"],
["2015", "2015", "2014"],
),
):
assert idx[0] in idx
@pytest.mark.parametrize(
"idx",
[
DatetimeIndex(
["2011-01-01", "2011-01-02", "2011-01-03"], freq="D", name="idx"
),
DatetimeIndex(
["2011-01-01 09:00", "2011-01-01 10:00", "2011-01-01 11:00"],
freq="H",
name="tzidx",
tz="Asia/Tokyo",
),
],
)
def test_order_with_freq(self, idx):
ordered = idx.sort_values()
tm.assert_index_equal(ordered, idx)
assert ordered.freq == idx.freq
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
tm.assert_index_equal(ordered, expected)
assert ordered.freq == expected.freq
assert ordered.freq.n == -1
ordered, indexer = idx.sort_values(return_indexer=True)
tm.assert_index_equal(ordered, idx)
tm.assert_numpy_array_equal(indexer, np.array([0, 1, 2]), check_dtype=False)
assert ordered.freq == idx.freq
ordered, indexer = idx.sort_values(return_indexer=True, ascending=False)
expected = idx[::-1]
tm.assert_index_equal(ordered, expected)
tm.assert_numpy_array_equal(indexer, np.array([2, 1, 0]), check_dtype=False)
assert ordered.freq == expected.freq
assert ordered.freq.n == -1
@pytest.mark.parametrize(
"index_dates,expected_dates",
[
(
["2011-01-01", "2011-01-03", "2011-01-05", "2011-01-02", "2011-01-01"],
["2011-01-01", "2011-01-01", "2011-01-02", "2011-01-03", "2011-01-05"],
),
(
["2011-01-01", "2011-01-03", "2011-01-05", "2011-01-02", "2011-01-01"],
["2011-01-01", "2011-01-01", "2011-01-02", "2011-01-03", "2011-01-05"],
),
(
[pd.NaT, "2011-01-03", "2011-01-05", "2011-01-02", pd.NaT],
[pd.NaT, pd.NaT, "2011-01-02", "2011-01-03", "2011-01-05"],
),
],
)
def test_order_without_freq(self, index_dates, expected_dates, tz_naive_fixture):
tz = tz_naive_fixture
# without freq
index = DatetimeIndex(index_dates, tz=tz, name="idx")
expected = DatetimeIndex(expected_dates, tz=tz, name="idx")
ordered = index.sort_values()
tm.assert_index_equal(ordered, expected)
assert ordered.freq is None
ordered = index.sort_values(ascending=False)
tm.assert_index_equal(ordered, expected[::-1])
assert ordered.freq is None
ordered, indexer = index.sort_values(return_indexer=True)
tm.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
tm.assert_numpy_array_equal(indexer, exp, check_dtype=False)
assert ordered.freq is None
ordered, indexer = index.sort_values(return_indexer=True, ascending=False)
tm.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
tm.assert_numpy_array_equal(indexer, exp, check_dtype=False)
assert ordered.freq is None
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.date_range("2011-01-01", "2011-01-31", freq="D", name="idx")
result = idx.drop_duplicates()
| tm.assert_index_equal(idx, result) | pandas.util.testing.assert_index_equal |
import pandas as pd
def dataframe_column_to_str(dataframe, col_name, inplace=False,
return_col=False):
"""Convert columun in the dataframe into string type while preserving NaN
values.
This method is useful when performing join over numeric columns. Currently,
the join methods expect the join columns to be of string type. Hence, the
numeric columns need to be converted to string type before performing the
join.
Args:
dataframe (DataFrame): Input pandas dataframe.
col_name (string): Name of the column in the dataframe to be converted.
inplace (boolean): A flag indicating whether the input dataframe should
be modified inplace or in a copy of it.
return_col (boolean): A flag indicating whether a copy of the converted
column should be returned. When this flag is set to True, the method
will not modify the original dataframe and will return a new column
of string type. Only one of inplace and return_col can be set to
True.
Returns:
A Boolean value when inplace is set to True.
A new dataframe when inplace is set to False and return_col is set to False.
A series when inplace is set to False and return_col is set to True.
"""
if not isinstance(dataframe, pd.DataFrame):
raise AssertionError('First argument is not of type pandas dataframe')
if col_name not in dataframe.columns:
raise AssertionError('Column \'' + col_name + '\' not found in the' + \
' input dataframe')
if not isinstance(inplace, bool):
raise AssertionError('Parameter \'inplace\' is not of type bool')
if not isinstance(return_col, bool):
raise AssertionError('Parameter \'return_col\' is not of type bool')
if inplace and return_col:
raise AssertionError('Both \'inplace\' and \'return_col\' parameters' +\
'cannot be set to True')
col_type = dataframe[col_name].dtype
if inplace:
num_rows = len(dataframe[col_name])
if (num_rows == 0 or sum(pd.isnull(dataframe[col_name])) == num_rows):
dataframe[col_name] = dataframe[col_name].astype(pd.np.object)
return True
else:
return series_to_str(dataframe[col_name], inplace)
elif return_col:
return series_to_str(dataframe[col_name], inplace)
else:
dataframe_copy = dataframe.copy()
series_to_str(dataframe_copy[col_name], True)
return dataframe_copy
def series_to_str(series, inplace=False):
"""Convert series into string type while preserving NaN values.
Args:
series (Series): Input pandas series.
inplace (boolean): A flag indicating whether the input series should
be modified inplace or in a copy of it. This flag is ignored when
the input series consists of only NaN values or the series is
empty (with int or float type). In these two cases, we always return
a copy irrespective of the inplace flag.
Returns:
A Boolean value when inplace is set to True.
A series when inplace is set to False.
"""
if not isinstance(series, pd.Series):
raise AssertionError('First argument is not of type pandas dataframe')
if not isinstance(inplace, bool):
raise AssertionError('Parameter \'inplace\' is not of type bool')
col_type = series.dtype
# Currently, we ignore the inplace flag when the series is empty and is of
# type int or float. In this case, we will always return a copy.
if len(series) == 0:
if col_type == pd.np.object and inplace:
return True
else:
return series.astype(pd.np.object)
if col_type == pd.np.object:
# If column is already of type object, do not perform any conversion.
if inplace:
return True
else:
return series.copy()
elif pd.np.issubdtype(col_type, pd.np.integer):
# If the column is of type int, then there are no missing values in the
# column and hence we can directly convert it to string using
# the astype method.
col_str = series.astype(str)
if inplace:
series.update(col_str)
return True
else:
return col_str
elif pd.np.issubdtype(col_type, pd.np.float):
# If the column is of type float, then there are two cases:
# (1) column only contains interger values along with NaN.
# (2) column actually contains floating point values.
# For case 1, we preserve the NaN values as such and convert the float
# values to string by first converting them to int and then to string.
# For case 1, we preserve the NaN values as such and convert the float
# values directly to string.
# get the column values that are not NaN
col_non_nan_values = series.dropna()
# Currently, we ignore the inplace flag when all values in the column
# are NaN and will always return a copy of the column cast into
# object type.
if len(col_non_nan_values) == 0:
return series.astype(pd.np.object)
# find how many of these values are actually integer values cast into
# float.
int_values = sum(col_non_nan_values.apply(lambda val: val.is_integer()))
# if all these values are interger values, then we handle according
# to case 1, else we proceed by case 2.
if int_values == len(col_non_nan_values):
col_str = series.apply(lambda val: pd.np.NaN if
| pd.isnull(val) | pandas.isnull |
import numpy as np
import pandas as pd
from nilearn import image
import json
import pytest
from neuroquery_image_search import _searching, _datasets
def test_image_search(tmp_path, fake_img):
img_path = str(tmp_path / "img.nii.gz")
fake_img.to_filename(img_path)
results_path = tmp_path / "results.json"
_searching.image_search(
f"{img_path} -o {results_path} --n_studies 7 --n_terms 3".split()
)
results = json.loads(results_path.read_text())
study_results = pd.DataFrame(results["studies"])
assert study_results.shape == (7, 4)
assert np.allclose(study_results.reset_index().at[0, "similarity"], 1.0)
results_path = tmp_path / "results.html"
_searching.image_search(
[img_path, "-o", str(results_path), "--n_studies", "1"]
)
results = results_path.read_text()
assert results.strip().startswith("<!DOCTYPE html>")
_searching.image_search(["-o", str(results_path), "--n_studies", "7"])
results = results_path.read_text()
assert "Image" in results
_searching.image_search([])
def test_json_encoder():
df = pd.DataFrame({"A": [2, 3]}, index=list("ab"))
data = {"a": {"B": 3.3}, "b": df}
as_json = json.dumps(data, cls=_searching._JSONEncoder)
loaded = json.loads(as_json)
loaded_df = | pd.DataFrame(loaded["b"]) | pandas.DataFrame |
"""
This script loads Google and Apple Mobility reports, builds cleaned reports in different formats and builds merged files from both sources.
Original data:
- Google Community Mobility reports: https://www.google.com/covid19/mobility/
- Apple Mobility Trends reports: https://www.apple.com/covid19/mobility
"""
import io
import os
import datetime
import requests
import urllib.request
import time
from bs4 import BeautifulSoup
import re
import json
import pandas as pd
def get_google_link():
'''Get link of Google Community Mobility report file
Returns:
link (str): link of Google Community report file
'''
# get webpage source
url = 'https://www.google.com/covid19/mobility/'
response = requests.get(url)
soup = BeautifulSoup(response.text, "html.parser")
csv_tag = soup.find('a', {"class": "icon-link"})
link = csv_tag['href']
return link
def download_google_reports(directory="google_reports"):
'''Download Google Community Mobility report in CSV format
Args:
directory: directory to which CSV report will be downloaded
Returns:
new_files (bool): flag indicating whether or not new files have been downloaded
'''
new_files = False
# create directory if it don't exist
if not os.path.exists(directory):
os.makedirs(directory)
# download CSV file
link = get_google_link()
file_name = "Global_Mobility_Report.csv"
path = os.path.join(directory, file_name)
if not os.path.isfile(path):
new_files = True
urllib.request.urlretrieve(link, path)
else:
path_new = os.path.join(directory, file_name + "_new")
urllib.request.urlretrieve(link, path_new)
if os.path.getsize(path) == os.path.getsize(path_new):
os.remove(path_new)
else:
new_files = True
os.remove(path)
os.rename(path_new, path)
if not new_files:
print('Google: No updates')
else:
print('Google: Update available')
return new_files
def build_google_report(
source="Global_Mobility_Report.csv",
destination="mobility_report.csv",
report_type="regions"):
'''Build cleaned Google report for worldwide or for some country (currently only for the US)
Args:
source: location of the raw Google CSV report
destination: destination file path
report_type: two options available: "regions" - report for worldwide, "US" - report for the US
'''
df = pd.read_csv(source, low_memory=False)
df = df.drop(columns=['country_region_code'])
df = df.rename(
columns={
'country_region': 'country',
'retail_and_recreation_percent_change_from_baseline': 'retail',
'grocery_and_pharmacy_percent_change_from_baseline': 'grocery and pharmacy',
'parks_percent_change_from_baseline': 'parks',
'transit_stations_percent_change_from_baseline': 'transit stations',
'workplaces_percent_change_from_baseline': 'workplaces',
'residential_percent_change_from_baseline': 'residential'})
if report_type == "regions":
df = df[df['sub_region_2'].isnull()]
df = df.drop(columns=['sub_region_2'])
df = df.rename(columns={'sub_region_1': 'region'})
df['region'].fillna('Total', inplace=True)
elif report_type == "US":
df = df[(df['country'] == "United States")]
df = df.drop(columns=['country'])
df = df.rename(
columns={
'sub_region_1': 'state',
'sub_region_2': 'county'})
df['state'].fillna('Total', inplace=True)
df['county'].fillna('Total', inplace=True)
df.to_csv(destination, index=False)
def get_apple_link():
'''Get link of Apple Mobility Trends report file
Returns:
link (str): link of Apple Mobility Trends report file
'''
# get link via API
json_link = "https://covid19-static.cdn-apple.com/covid19-mobility-data/current/v3/index.json"
with urllib.request.urlopen(json_link) as url:
json_data = json.loads(url.read().decode())
link = "https://covid19-static.cdn-apple.com" + \
json_data['basePath'] + json_data['regions']['en-us']['csvPath']
return link
def download_apple_report(directory="apple_reports"):
'''Download Apple Mobility Trends report in CSV
Args:
directory: directory to which CSV report will be downloaded
Returns:
new_files (bool): flag indicating whether or not a new file has been downloaded
'''
new_files = False
if not os.path.exists(directory):
os.makedirs(directory)
link = get_apple_link()
file_name = "applemobilitytrends.csv"
path = os.path.join(directory, file_name)
if not os.path.isfile(path):
new_files = True
urllib.request.urlretrieve(link, path)
else:
path_new = os.path.join(directory, file_name + "_new")
urllib.request.urlretrieve(link, path_new)
if os.path.getsize(path) == os.path.getsize(path_new):
os.remove(path_new)
else:
new_files = True
os.remove(path)
os.rename(path_new, path)
if not new_files:
print('Apple: No updates')
else:
print('Apple: Update available')
return new_files
def build_apple_report(
source=os.path.join(
'apple_reports',
"applemobilitytrends.csv"),
destination=os.path.join(
'apple_reports',
"apple_mobility_report.csv"),
report_type="regions"):
'''Build cleaned Apple report (transform dates from columns to rows, add country names for subregions and cities)
for worldwide or for some country (currently only for the US)
Args:
source: location of the raw Apple CSV report
destination: destination file path
report_type: two options available: "regions" - report for worldwide, "US" - report for the US
'''
apple = pd.read_csv(source)
apple = apple.drop(columns=['alternative_name'])
apple['country'] = apple.apply(
lambda x: x['region'] if x['geo_type'] == 'country/region' else x['country'],
axis=1)
if report_type == 'regions':
apple = apple[apple.geo_type != 'county']
apple['sub-region'] = apple.apply(lambda x: 'Total' if x['geo_type'] == 'country/region' else (
x['region'] if x['geo_type'] == 'sub-region' else x['sub-region']), axis=1)
apple['subregion_and_city'] = apple.apply(
lambda x: 'Total' if x['geo_type'] == 'country/region' else x['region'], axis=1)
apple = apple.drop(columns=['region'])
apple['sub-region'] = apple['sub-region'].fillna(
apple['subregion_and_city'])
apple = apple.melt(
id_vars=[
'geo_type',
'subregion_and_city',
'sub-region',
'transportation_type',
'country'],
var_name='date')
apple['value'] = apple['value'] - 100
apple = apple.pivot_table(
index=[
"geo_type",
"subregion_and_city",
"sub-region",
"date",
"country"],
columns='transportation_type').reset_index()
apple.columns = [t + (v if v != "value" else "")
for v, t in apple.columns]
apple = apple[['country', 'sub-region', 'subregion_and_city',
'geo_type', 'date', 'driving', 'transit', 'walking']]
apple = apple.sort_values(by=['country',
'sub-region',
'subregion_and_city',
'date']).reset_index(drop=True)
elif report_type == "US":
apple = apple[apple.country == "United States"].drop(columns=[
'country'])
apple['sub-region'] = apple['sub-region'].fillna(
apple['region']).replace({"United States": "Total"})
apple['region'] = apple.apply(lambda x: x['region'] if (
x['geo_type'] == 'city' or x['geo_type'] == 'county') else 'Total', axis=1)
apple = apple.rename(
columns={
'sub-region': 'state',
'region': 'county_and_city'})
apple = apple.melt(
id_vars=[
'geo_type',
'state',
'county_and_city',
'transportation_type'],
var_name='date')
apple['value'] = apple['value'] - 100
apple = apple.pivot_table(
index=[
'geo_type',
'state',
'county_and_city',
'date'],
columns='transportation_type').reset_index()
apple.columns = [t + (v if v != "value" else "")
for v, t in apple.columns]
apple = apple[['state', 'county_and_city', 'geo_type',
'date', 'driving', 'transit', 'walking']]
apple = apple.sort_values(
by=['state', 'county_and_city', 'geo_type', 'date']).reset_index(drop=True)
apple.to_csv(destination, index=False)
def build_summary_report(
apple_source=os.path.join(
'apple_reports',
"applemobilitytrends.csv"),
google_source=os.path.join(
"google_reports",
"Global_Mobility_Report.csv"),
destination=os.path.join(
"summary_reports",
"summary_report.csv")):
'''Build a merged report from Google and Apple data
Args:
apple_source: location of the raw Apple CSV report
google_source: location of the raw Google CSV report
destination: destination file path
'''
# preprocess apple data
apple = pd.read_csv(apple_source)
apple['country'] = apple.apply(
lambda x: x['region'] if x['geo_type'] == 'country/region' else x['country'],
axis=1)
apple['sub_region_1'] = apple.apply(
lambda x: 'Total' if x['geo_type'] == 'country/region' else (
x['region'] if x['geo_type'] == 'city' or x['geo_type'] == 'sub-region' else (
x['sub-region'] if x['geo_type'] == 'county' else None)), axis=1)
apple['sub_region_2'] = apple.apply(
lambda x: x['region'] if x['geo_type'] == 'county' else 'Total', axis=1)
apple = apple.drop(
columns=[
'alternative_name',
'geo_type',
'region',
'sub-region'])
apple = apple.melt(
id_vars=[
'country',
'sub_region_1',
'sub_region_2',
'transportation_type'],
var_name='date')
apple['value'] = apple['value'] - 100
apple = apple.pivot_table(
index=[
'country',
'sub_region_1',
'sub_region_2',
'date'],
columns='transportation_type').reset_index()
apple.columns = [t + (v if v != "value" else "")for v, t in apple.columns]
# convert Apple countries and subregions to Google names
country_AtoG_file = os.path.join(
'auxiliary_data', 'country_Apple_to_Google.csv')
subregions_AtoG_file = os.path.join(
'auxiliary_data', 'subregions_Apple_to_Google.csv')
if os.path.isfile(country_AtoG_file):
country_AtoG = pd.read_csv(country_AtoG_file, index_col=0)
else:
country_AtoG = None
if os.path.isfile(subregions_AtoG_file):
subregions_AtoG = pd.read_csv(subregions_AtoG_file, index_col=0)
else:
subregions_AtoG = None
apple['country'] = apple.apply(lambda x: country_AtoG.loc[x['country'], 'country_google'] if (
country_AtoG is not None and x['country'] in country_AtoG.index) else x['country'], axis=1)
apple['sub_region_1'] = apple.apply(lambda x: subregions_AtoG.loc[x['sub_region_1'], 'subregion_Google'] if (
subregions_AtoG is not None and x['sub_region_1'] in subregions_AtoG.index) else x['sub_region_1'], axis=1)
# process google data
google = | pd.read_csv(google_source, low_memory=False) | pandas.read_csv |
import numpy as np
import sys
import pandas as pd
import os
import datetime
from fnmatch import fnmatch
from splitDate import splitDate
from ObligorReminder import ObligorReminder
def UpdatePeopleState(ifEmail):
PeopleExpenditure()
PeopleAccount()
if ifEmail == 'yes':
key = raw_input("Shall people with negative balance be informed via email [y/N]? ")
if key == 'y' or key == 'Y':
ObligorReminder()
def PeopleExpenditure():
df = pd.read_csv('PeopleList')
# os.system("rm *~")
# os.system("clear")
names= df['Name'].values
Dates = FindcsvMarks()
tmp = np.empty((len(names),len(Dates)))
tmp[:] = 0
df = pd.DataFrame(tmp,index=names,columns=Dates)
for d in Dates:
df_tmp = pd.read_csv('Marks_'+ d)
df_tmp = df_tmp.loc[0:len(df_tmp)-2]
name_tmp = df_tmp['Name'].values
df_tmp = df_tmp['Total (euros)'].values
df.loc[name_tmp,d] = df_tmp
df['Name'] = names
df = df[['Name']+Dates]
df.to_csv('PeopleExpenditure',header=True,index=False)
if len(Dates) == 0:
CreatePeopleExpenditure()
def PeopleAccount():
df = pd.read_csv('PeopleList')
num_members = len(df)
if num_members >0:
if os.path.isfile('PeoplePayment'):
pass
else:
CreatePeoplePayment()
df = pd.read_csv('PeoplePayment')
paid = df['Sum']
df = pd.read_csv('PeopleExpenditure')
# nam = df['Name']
Dates = FindcsvMarks()
Header = np.hstack(('Name','Paid', Dates,'Balance'))
tmp = np.empty((len(df),1))
tmp[:] = 0
df['Paid'] = paid
df['Balance'] = tmp
df = df[Header]
df['Balance'] = df['Paid']-df[Dates].sum(axis=1)
df.to_csv('PeopleBalance',header=True,index=False)
tmp_balance = df['Balance']
df = pd.read_csv('PeopleList')
df['Balance'] = tmp_balance
df.to_csv('PeopleList',header=True,index=False)
def FindcsvMarks():
pwd = os.getcwd()
Files = filter(lambda x: fnmatch(x,'Marks_*'),os.listdir(pwd))
for idx in range(len(Files)):
f = Files[idx]
Files[idx] = f.split('_',1).pop()
pass
Files.sort(key=splitDate,reverse=True)
Dates = Files
return Dates
def CreatePeopleExpenditure():
df = pd.read_csv('PeopleList')
names= df['Name'].values
Dates = ['00.00.0000']
tmp = np.empty((len(names),len(Dates)))
tmp[:] = 0
df = pd.DataFrame(tmp,index=names,columns=Dates)
df['Name'] = names
df = df[['Name']+Dates]
df.to_csv('PeopleExpenditure',header=True,index=False)
def CreatePeoplePayment():
df = | pd.read_csv('PeopleList') | pandas.read_csv |
import pandas as pd
import numpy as np
import xml.etree.ElementTree as ET
from math import radians, cos, sin, asin, sqrt
def parse_gpx(filename):
"""Parse data from a GPX file and return a Pandas Dataframe"""
tree = ET.parse(filename)
root = tree.getroot()
# define a namespace dictionary to make element names simpler
# this mirrors the namespace definintions in the XML files
ns = {'gpx':'http://www.topografix.com/GPX/1/1',
'gpxtpx': 'http://www.garmin.com/xmlschemas/TrackPointExtension/v1'}
# when we look for elements, we need to use the namespace prefix
trk = root.find('gpx:trk', ns)
trkseg = trk.find('gpx:trkseg', ns)
data = []
times = []
# iterate over the first ten trkpt elements - the children of trkseg
for trkpt in trkseg:
# get some properties from the attributes
lon = trkpt.attrib['lon']
lat = trkpt.attrib['lat']
# get values from the child elements
ele = trkpt.find('gpx:ele', ns).text
time = trkpt.find('gpx:time', ns).text
# now dive into the extensions
ext = trkpt.find('gpx:extensions', ns)
if ext.find('gpx:power', ns) != None:
power = ext.find('gpx:power', ns).text
else:
power = 0.0
tpext = ext.find('gpxtpx:TrackPointExtension', ns)
if tpext.find('gpxtpx:atemp', ns) != None:
temp = tpext.find('gpxtpx:atemp', ns).text
else:
temp = 0.0
if tpext.find('gpxtpx:cad', ns) != None:
cadence = tpext.find('gpxtpx:cad', ns).text
else:
cadence = 0.0
hr = tpext.find('gpxtpx:hr', ns).text
row = {
'latitude': float(lat),
'longitude': float(lon),
'elevation': float(ele),
'temperature': float(temp),
'power': float(power),
'cadence': float(cadence),
'hr': float(hr),
}
data.append(row)
times.append(time)
times = pd.to_datetime(times)
df = | pd.DataFrame(data, index=times) | pandas.DataFrame |
import builtins
from io import StringIO
import numpy as np
import pytest
from pandas.errors import UnsupportedFunctionCall
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, date_range, isna
import pandas._testing as tm
import pandas.core.nanops as nanops
from pandas.util import _test_decorators as td
@pytest.fixture(
params=[np.int32, np.int64, np.float32, np.float64],
ids=["np.int32", "np.int64", "np.float32", "np.float64"],
)
def numpy_dtypes_for_minmax(request):
"""
Fixture of numpy dtypes with min and max values used for testing
cummin and cummax
"""
dtype = request.param
min_val = (
np.iinfo(dtype).min if np.dtype(dtype).kind == "i" else np.finfo(dtype).min
)
max_val = (
np.iinfo(dtype).max if np.dtype(dtype).kind == "i" else np.finfo(dtype).max
)
return (dtype, min_val, max_val)
@pytest.mark.parametrize("agg_func", ["any", "all"])
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize(
"vals",
[
["foo", "bar", "baz"],
["foo", "", ""],
["", "", ""],
[1, 2, 3],
[1, 0, 0],
[0, 0, 0],
[1.0, 2.0, 3.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[True, True, True],
[True, False, False],
[False, False, False],
[np.nan, np.nan, np.nan],
],
)
def test_groupby_bool_aggs(agg_func, skipna, vals):
df = DataFrame({"key": ["a"] * 3 + ["b"] * 3, "val": vals * 2})
# Figure out expectation using Python builtin
exp = getattr(builtins, agg_func)(vals)
# edge case for missing data with skipna and 'any'
if skipna and all(isna(vals)) and agg_func == "any":
exp = False
exp_df = DataFrame([exp] * 2, columns=["val"], index=Index(["a", "b"], name="key"))
result = getattr(df.groupby("key"), agg_func)(skipna=skipna)
tm.assert_frame_equal(result, exp_df)
def test_max_min_non_numeric():
# #2700
aa = DataFrame({"nn": [11, 11, 22, 22], "ii": [1, 2, 3, 4], "ss": 4 * ["mama"]})
result = aa.groupby("nn").max()
assert "ss" in result
result = aa.groupby("nn").max(numeric_only=False)
assert "ss" in result
result = aa.groupby("nn").min()
assert "ss" in result
result = aa.groupby("nn").min(numeric_only=False)
assert "ss" in result
def test_min_date_with_nans():
# GH26321
dates = pd.to_datetime(
pd.Series(["2019-05-09", "2019-05-09", "2019-05-09"]), format="%Y-%m-%d"
).dt.date
df = pd.DataFrame({"a": [np.nan, "1", np.nan], "b": [0, 1, 1], "c": dates})
result = df.groupby("b", as_index=False)["c"].min()["c"]
expected = pd.to_datetime(
pd.Series(["2019-05-09", "2019-05-09"], name="c"), format="%Y-%m-%d"
).dt.date
tm.assert_series_equal(result, expected)
result = df.groupby("b")["c"].min()
expected.index.name = "b"
tm.assert_series_equal(result, expected)
def test_intercept_builtin_sum():
s = | Series([1.0, 2.0, np.nan, 3.0]) | pandas.Series |
from datasets import load_dataset
import streamlit as st
import pandas as pd
from googletrans import Translator
import session_state
import time
from fuzzywuzzy import fuzz,process
# Security
#passlib,hashlib,bcrypt,scrypt
import hashlib
# DB Management
import sqlite3
import os
import psycopg2
# import torch
# from transformers import PegasusForConditionalGeneration, PegasusTokenizer
state = session_state.get(question_number=0)
translator = Translator()
# model_name = 'tuner007/pegasus_paraphrase'
# torch_device = 'cuda' if torch.cuda.is_available() else 'cpu'
# tokenizer = PegasusTokenizer.from_pretrained(model_name)
# model = PegasusForConditionalGeneration.from_pretrained(model_name).to(torch_device)
# def get_response(input_text,num_return_sequences,num_beams):
# batch = tokenizer([input_text],truncation=True,padding='longest',max_length=60, return_tensors="pt").to(torch_device)
# translated = model.generate(**batch,max_length=60,num_beams=num_beams, num_return_sequences=num_return_sequences, temperature=1.5)
# tgt_text = tokenizer.batch_decode(translated, skip_special_tokens=True)
# return tgt_text
@st.cache(suppress_st_warning=True)
def get_qa_pair_low(file, rand):
df = pd.read_csv(file, sep="\t", lineterminator='\n')
a = df.sample(1).reset_index()
st.text(df)
return {
"text": a["text"][0],
"question": a["question"][0],
"answer": a["answer\r"][0]
}
@st.cache(suppress_st_warning=True)
def get_qa_pair_mid(file, rand):
df = pd.read_csv(file,sep="\t", lineterminator='\n')
a = df.sample(1).reset_index()
return {
"text": a["text"][0],
"question": a["question"][0],
"answer": a["answer\r"][0]
}
@st.cache(suppress_st_warning=True)
def get_qa_pair_high(file, rand):
df = pd.read_csv(file,sep="\t", lineterminator='\n')
a = df.sample(1).reset_index()
return {
"text": a["text"][0],
"question": a["question"][0],
"answer": a["answer\r"][0]
}
@st.cache(suppress_st_warning=True)
def getmcq(rand):
df = | pd.read_csv("mcq.tsv",sep="\t", lineterminator='\n') | pandas.read_csv |
import click
import pandas as pd
from Bio.SeqIO import parse, write
from random import randint, choice
TENMIL = 10 * 1000 * 1000
REGION_SIZES = [1000, 2 * 1000, 4 * 1000, 8 * 1000, 16 * 1000, 32 * 1000]
def insert_repetitive_regions(seq_rec, window_size=TENMIL, region_sizes=REGION_SIZES):
"""Insert repetitive regions into seq_rec in place. Return their coordinates."""
nwindows, regions = len(seq_rec.seq) // window_size, {}
if not nwindows:
return
for i in range(nwindows):
window_start, window_end = window_size * i, window_size * (i + 1)
region_start = randint(window_start + window_size / 4, window_end - window_size / 4)
before_region, after_region = seq_rec.seq[:region_start], seq_rec.seq[region_start:]
region_size = choice(region_sizes)
seq_rec.seq = before_region + 'A' * region_size + after_region
regions[(seq_rec.id, i)] = {
'region_start': region_start,
'region_size': region_size,
'motif': 'A',
}
return | pd.DataFrame.from_dict(regions, orient='index') | pandas.DataFrame.from_dict |
import sys
import numpy as np
import pandas as pd
import sqlalchemy
from sqlalchemy import create_engine
# import sqlite3
def load_data(messages_filepath, categories_filepath):
'''
Function to load data and merge them into one file
Args:
messages_filepath: Filepath to load the messages.csv
categories_filepath: Filepath to load the categories.csv
Output:
df: combined dataFrame
'''
messages = | pd.read_csv(messages_filepath) | pandas.read_csv |
import datetime
from datetime import datetime
from functools import reduce
from pkg_resources import normalize_path
import streamlit as st
import pandas as pd
import altair as alt
import plotly.express as px
import plotly.graph_objects as go
import pydeck as pdk
import os
import matplotlib.pyplot as plt
import numpy as np
#Poblaciones en 2021 (actualizado el 25-Abril-2021)
habitantes = {
'Andorra': 0.077,
'Argentina': 45.20,
'Australia': 25.50,
'Austria': 9.06,
'Bangladesh': 164.69,
'Belgium': 11.59,
'Brazil': 212.56,
'Canada': 37.74,
'China': 1439.33,
'Colombia': 50.88,
'Denmark': 5.79,
'Egypt': 102.33,
'Ethiopia': 114.96,
'France': 65.27,
'Germany': 83.78,
'Greece': 10.42,
'India': 1380.00,
'Indonesia': 273.52,
'Italy': 60.46,
'Japan': 126.47,
'Netherlands': 17.13,
'Norway': 5.41,
'Poland': 37.84,
'Romania': 19.23,
'Russia': 145.93,
'Spain': 46.75,
'Sweden': 10.09,
'Switzerland': 8.65,
'United Kingdom': 67.89}
@st.cache(ttl=60*60*1)
def read_data():
BASEURL = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series"
url_confirmed = f"{BASEURL}/time_series_covid19_confirmed_global.csv"
url_deaths = f"{BASEURL}/time_series_covid19_deaths_global.csv"
url_recovered = f"{BASEURL}/time_series_covid19_recovered_global.csv"
confirmed = pd.read_csv(url_confirmed, index_col=0)
deaths = | pd.read_csv(url_deaths, index_col=0) | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# # Дружественные числа. Исследование
# ### #Занимательная Математика
#
# #### Весь код на Github, ссылка в конце статьи!
# Импорт библиотек
# In[1]:
from IPython.display import Image
from IPython.core.display import HTML
from IPython.core.interactiveshell import InteractiveShell
from scipy.ndimage.filters import gaussian_filter1d
from scipy.signal import savgol_filter
import numpy as np
import sympy as sp
import pandas as pd
import random as r
import time
import matplotlib.pyplot as plt
import ipyturtle as turtle
InteractiveShell.ast_node_interactivity = "all"
def drawPlot(ss,title="Скорости",y="Секунд",x="Номер итерации"):
fig,ax=plt.subplots(figsize=(6,6))
ax.set_facecolor("#F2F2F2")
ax.grid()
ax.set_title(title)
ax.set_ylabel(y)
ax.set_xlabel(x)
ax.plot(ss)
# In[45]:
Image(url="https://sun9-46.userapi.com/c858036/v858036072/1e3bba/s6JwXFoOgLM.jpg", width=400)
# Хммм. Что же такое дружественные числа?
#
# Посмотрим на примере:
# Есть число 220. И есть число 284. Так вот. Они - друзья.
#
# Почему? Как определить дружественные числа?
# Дружественные числа - это такая пара, у которой сумма собственных делителей каждого числа равна другому числу.
#
# Посмотрим на практике. Создадим функцию поиска делителей, а потом просуммируем.
# In[3]:
def Divisors(num):
from math import sqrt as mmsq
s=set([1])
i=1
a=int(mmsq(num)+1)
while i<=a:
if(num//i==num):
i+=1
continue
if (num%i==0):
if (num//i!=i):
s.add(num//i)
s.add(i)
i+=1
return s
# In[4]:
Divisors(220)," #Делители числа 220"
Divisors(284)," #Делители числа 284"
# 1+2+4 = 6 Верно? Продолжим прибавлять..
# 1+2+4+5+10+11+20+22+44+55+110
#
# Ну, или зачем вручную. Для подсчёта суммы любого множества(списка) есть готовая функция **sum()**
# In[5]:
sum(Divisors(220))
sum(Divisors(284))
# И что мы наблюдаем? Сумма собственных делителей у 220 равна 284, а у числа 284 сумма - 220
# Поэтому эти числа и являются парой друзей :)
# In[6]:
Image(url="https://wikimedia.org/api/rest_v1/media/math/render/svg/f5f9a3fcecf20f39301b0f651492c3fbe712262d")
# In[7]:
Image(url="https://wikimedia.org/api/rest_v1/media/math/render/svg/4c5a454d5e4adb6fb2e7e3e467e0804e28d01824")
# А ещё кстати говоря, эта пара является первой из всего списка дружественных чисел!
# Кстати интересный факт, но хоть нам и известно уже огромное количество таких пар, мы не знаем есть ли такая пара, у которой числа были разной чётности. Вот например 220 и 284 чётные...Есть числа, где первое и второе нечётные, но чтобы один чётный, другой нечётный, такого мы не знаем ещё.
# А вот возникает наверное вопрос. Как находить эти пары?
# Конкретной формулы пока ещё нет для этих чисел, поэтому я создал итерационную фунцию.
#
# Для этого я создал функцию **AmicableNumber()**, которая на вход принимает какое-либо число. В самой функции алгоритм находит друга, если он есть на ближайшей дистанции по числовой оси.
# In[8]:
def AmicableNumber(k,returni=False):
allDels = dict()
from itertools import chain
concatenated = chain( range(k, int(k*1.6)+1 ),range(k, int(k/1.6)+1 ,-1) )
for i in concatenated:
if(str(i) not in allDels):
allDels[str(i)] = Divisors(i)
if(i != k and sum(allDels[str(i)]) == k and sum(allDels[str(k)]) == i):
if(returni):
return (k,i)
else:
print(k,"->",i)
# In[9]:
AmicableNumber(1) #Например тут ответа нет
AmicableNumber(219) #Например тут ответа нет
AmicableNumber(220) #А тут уже есть
# Если не понятно, что я имел в виду под "дистанциями", то объясню. Для начала нам нужно найти хотя-бы 5 пар дружественных чисел.
# In[10]:
AmicableNumbers=[]
i=0
while len(AmicableNumbers)!=10:
i+=1
amn=AmicableNumber(i,True)
if(amn is not None):
AmicableNumbers.append(amn)
AmicableNumbers=[{tuple(sorted(amn)) for amn in AmicableNumbers}]
print(AmicableNumbers)
# Решил найти только первые 5 пар, потому что таким алгоритмом всё это вычисляется примерно 10 минут.
# А как он работает?
#
# Мы просто перебираем относительно заданного числа x в интервале x/1.6 -> 1.6x
# Это значительно ускоряет программу, но всё ещё недостаточно.
# Вопрос, почему именно такие цифры?
#
# Показываю. Возьмём первые 10 пар дружественных чисел, а потом составим вектор из отношений чисел в парах.
#
# In[11]:
df= | pd.DataFrame([(220,284),(1184,1210),(2620,2924),(5020,5564),(6232,6368),(10744,10856),(12285,14595),(17296,18416),(63020,76084),(66928,66992)]) | pandas.DataFrame |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
import logging, os
import h5py
import glob
import numpy as np
import io3d.datareader as DR
import io3d.datawriter as DW
import argparse
import pandas as pd
import imtools.misc as misc
logger = logging.getLogger(__name__)
def sliver_preparation(datadirpath, output_datadirpath="output_data", res=100, ax=0, organ='liver'):
csvpath = output_datadirpath + '/sliver_label_'+str(res)+'_'+str(ax)+'_'+organ+'.csv'
stat = output_datadirpath + '/sliver_stat'+str(res)+'_'+str(ax)+'_'+organ+'.csv'
# datadirpath = '/home/trineon/projects/metalisa/data/SLIVER'
f = h5py.File(output_datadirpath + '/sliver_' + str(res) + '_' + str(ax) + '_' + organ +'.hdf5', 'a')
num = 1
for image in glob.glob(datadirpath + '/*orig*.mhd'):
group = f.create_group(image.split('/')[-1])
orig, _ = DR.read(image)
if ax != 0:
orig = np.rollaxis(orig, ax)
i = orig.shape[0]
orig = misc.resize_to_shape(orig, [i, res, res])
DW.write(orig, output_datadirpath + '/sliver_' +str(num)+ '_' + str(res)+'_' + str(ax)+ '.pklz', metadata={"voxelsize_mm": [1, 1, 1]})
filename = output_datadirpath + '/sliver' +str(num) +'_' + str(res)+ '_' + str(ax)+ '.pklz'
num += 1
seg = image.replace('orig','seg')
lab, _ = DR.read(seg)
if ax != 0:
lab = np.rollaxis(lab, ax)
l = list()
a = 1
for slice in lab:
if len(np.unique(slice)) > 1:
l.append(2)
a = 2
else:
if a == 2:
l.append(3)
else:
l.append(1)
del lab
for ind, slice in enumerate(orig):
name = str(ind)
dset = group.create_dataset(name, data=slice)
dset.attrs['teacher'] = l[ind]
dset.attrs['origin file'] = filename
if l[-1] == 2:
x = len(l)
else:
x = l.index(3)
if l[0] == 2:
y = 0
else:
y = l.index(2)
dt = {'filename': [filename, filename, filename], 'label': ['under ' + organ, organ, 'above ' + organ],
'start_slice_number': [0, y, x],
'stop_slice_number': [y - 1, x - 1, len(l)-1], 'axis': ax}
if dt['stop_slice_number'][0] == -1:
dt['stop_slice_number'][0] = 0
if os.path.exists(csvpath):
new_df = pd.read_csv(csvpath)
df = pd.DataFrame.from_dict(dt)
new_df = pd.concat([new_df, df], ignore_index=True)
else:
df0 = pd.DataFrame.from_dict(dt)
new_df = df0
new_df.to_csv(csvpath, index=False)
a = y
b = x-y
c = len(l)-x
dt = {'filename': [filename], 'under liver': [a] , 'liver': [b], 'above liver': [c], 'slices':[len(l)]}
if os.path.exists(stat):
new_df = pd.read_csv(stat)
df = pd.DataFrame.from_dict(dt)
new_df = pd.concat([new_df, df], ignore_index=True)
else:
df0 = pd.DataFrame.from_dict(dt)
new_df = df0
new_df.to_csv(stat, index=False)
pass
def ircad_group(datadirpath, organ='liver'):
# datadirpath = '/home/trineon/projects/metalisa/data/IRCAD'
for folder in glob.glob(datadirpath + '/labels/*/'+organ+'/'):
name = folder.split('/')[-3]
if (folder + 'IRCAD_' + str(name) + '_' + organ +'.pklz') in glob.glob(folder+'*'):
continue
else:
# concatenate CT slicis to one 3D ndarray [number_of slices, res(1), res(2)]
scan = [None]* len(glob.glob(folder + '*'))
for image in glob.glob(folder + '*'):
label, _ = DR.read(image)
scan[int(image.split('/')[-1].split('_')[-1])] = label
scan = np.array(scan).astype(np.int32)
scan = scan.squeeze()
DW.write(scan, folder + 'IRCAD_' + str(name) + '_' + organ + '.pklz',
metadata={"voxelsize_mm": [1, 1, 1]})
pass
def ircad_preparation(datadirpath, output_datadirpath="output_data", organ="liver",res=100, ax=0):
#test
stat = output_datadirpath+'/stat_ircad'+str(res)+'_'+str(ax)+'_'+organ+'.csv'
csvpath = output_datadirpath+'/label_ircad_'+str(res)+'_'+str(ax)+'_'+organ+'.csv'
# datadirpath = '/home/trineon/projects/metalisa/data/IRCAD'
seznam = [None] * 20
for folder in glob.glob(datadirpath+'/Pacient/*/'):
count = len(glob.glob(folder+'*'))
l = [None] * count
for image in glob.glob(folder+'*'):
number = int(image.split('/')[-1].split('_')[-1])-1
l[number], _ = DR.read(image)
if ax != 0:
l[number] = np.rollaxis(l[number], ax)
for ind, i in enumerate(l):
l[ind] = misc.resize_to_shape(i, [1, res, res])
scan = np.array(l)
if ax != 0:
np.rollaxis(scan, ax)
name = folder.split('/')[-2]
scan = scan.squeeze()
DW.write(scan, output_datadirpath + '/IRCAD_' +str(name) +'_' + str(res)+'_' + str(ax)+'.pklz', metadata={"voxelsize_mm": [1, 1, 1]})
seznam[int(name)-1] = output_datadirpath + '/IRCAD_'+str(name) +'_' + str(res)+'_' + str(ax)+'.pklz'
ll = [None] * 20
for folder in glob.glob(datadirpath + '/labels/*/'+organ+'/'):
count = len(glob.glob(folder+'*'))
sez = list()
for image in glob.glob(folder+'IRCAD*.pklz'):
label, _ = DR.read(image)
if ax != 0:
label = np.rollaxis(label, ax)
l = list()
a = 1
for slice in label:
if len(np.unique(slice)) > 1:
l.append(2)
a = 2
else:
if a == 2:
l.append(3)
else:
l.append(1)
ll[int(folder.split('/')[-3])-1] = l
file = seznam[int(folder.split('/')[-3])-1]
if l[-1] == 2:
x = len(l)
else:
x = l.index(3)
if l[0] == 2:
y = 0
else:
y = l.index(2)
dt = {'filename': [file, file, file], 'label': ['under ' + organ, organ, 'above ' + organ],
'start_slice_number': [0, y, x],
'stop_slice_number': [y - 1, x - 1, len(l) - 1], 'axis': ax}
if dt['stop_slice_number'][0] == -1:
dt['stop_slice_number'][0] = 0
if os.path.exists(csvpath):
new_df = pd.read_csv(csvpath)
df = pd.DataFrame.from_dict(dt)
new_df = pd.concat([new_df, df], ignore_index=True)
else:
df0 = pd.DataFrame.from_dict(dt)
new_df = df0
new_df.to_csv(csvpath, index=False)
a = y
b = x - y
c = len(l) - x
dt = {'filename': [file], 'under liver': [a], 'liver': [b], 'above liver': [c], 'slices': [len(l)]}
if os.path.exists(stat):
new_df = pd.read_csv(stat)
df = | pd.DataFrame.from_dict(dt) | pandas.DataFrame.from_dict |
import itertools
import numpy as np
import pytest
from pandas import (
DataFrame,
Series,
notna,
)
# create the data only once as we are not setting it
def _create_consistency_data():
def create_series():
return [
Series(dtype=np.float64, name="a"),
Series([np.nan] * 5),
Series([1.0] * 5),
Series(range(5, 0, -1)),
Series(range(5)),
Series([np.nan, 1.0, np.nan, 1.0, 1.0]),
Series([np.nan, 1.0, np.nan, 2.0, 3.0]),
Series([np.nan, 1.0, np.nan, 3.0, 2.0]),
]
def create_dataframes():
return [
DataFrame(columns=["a", "a"]),
DataFrame(np.arange(15).reshape((5, 3)), columns=["a", "a", 99]),
] + [DataFrame(s) for s in create_series()]
def is_constant(x):
values = x.values.ravel("K")
return len(set(values[ | notna(values) | pandas.notna |
from flask import Flask, render_template, request, redirect, url_for, session
import pandas as pd
import pymysql
import os
import io
#from werkzeug.utils import secure_filename
from pulp import *
import numpy as np
import pymysql
import pymysql.cursors
from pandas.io import sql
#from sqlalchemy import create_engine
import pandas as pd
import numpy as np
#import io
import statsmodels.formula.api as smf
import statsmodels.api as sm
import scipy.optimize as optimize
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
#from flask import Flask, render_template, request, redirect, url_for, session, g
from sklearn.linear_model import LogisticRegression
from math import sin, cos, sqrt, atan2, radians
from statsmodels.tsa.arima_model import ARIMA
#from sqlalchemy import create_engine
from collections import defaultdict
from sklearn import linear_model
import statsmodels.api as sm
import scipy.stats as st
import pandas as pd
import numpy as np
from pulp import *
import pymysql
import math
app = Flask(__name__)
app.secret_key = os.urandom(24)
localaddress="D:\\home\\site\\wwwroot"
localpath=localaddress
os.chdir(localaddress)
@app.route('/')
def index():
return redirect(url_for('home'))
@app.route('/home')
def home():
return render_template('home.html')
@app.route('/demandplanning')
def demandplanning():
return render_template("Demand_Planning.html")
@app.route("/elasticopt",methods = ['GET','POST'])
def elasticopt():
if request.method== 'POST':
start_date =request.form['from']
end_date=request.form['to']
prdct_name=request.form['typedf']
# connection = pymysql.connect(host='localhost',
# user='user',
# password='',
# db='test',
# charset='utf8mb4',
# cursorclass=pymysql.cursors.DictCursor)
#
# x=connection.cursor()
# x.execute("select * from `transcdata`")
# connection.commit()
# datass=pd.DataFrame(x.fetchall())
datass = pd.read_csv("C:\\Users\\1026819\\Downloads\\optimizdata.csv")
# datas = datass[(datass['Week']>=start_date) & (datass['Week']<=end_date )]
datas=datass
df = datas[datas['Product'] == prdct_name]
df=datass
changeData=pd.concat([df['Product_Price'],df['Product_Qty']],axis=1)
changep=[]
changed=[]
for i in range(0,len(changeData)-1):
changep.append(changeData['Product_Price'].iloc[i]-changeData['Product_Price'].iloc[i+1])
changed.append(changeData['Product_Qty'].iloc[1]-changeData['Product_Qty'].iloc[i+1])
cpd=pd.concat([pd.DataFrame(changep),pd.DataFrame(changed)],axis=1)
cpd.columns=['Product_Price','Product_Qty']
sortedpricedata=df.sort_values(['Product_Price'], ascending=[True])
spq=pd.concat([sortedpricedata['Product_Price'],sortedpricedata['Product_Qty']],axis=1).reset_index(drop=True)
pint=[]
dint=[]
x = spq['Product_Price']
num_bins = 5
# n, pint, patches = plt.hist(x, num_bins, facecolor='blue', alpha=0.5)
y = spq['Product_Qty']
num_bins = 5
# n, dint, patches = plt.hist(y, num_bins, facecolor='blue', alpha=0.5)
arr= np.zeros(shape=(len(pint),len(dint)))
count=0
for i in range(0, len(pint)):
lbp=pint[i]
if i==len(pint)-1:
ubp=pint[i]+1
else:
ubp=pint[i+1]
for j in range(0, len(dint)):
lbd=dint[j]
if j==len(dint)-1:
ubd=dint[j]+1
else:
ubd=dint[j+1]
print(lbd,ubd)
for k in range(0, len(spq)):
if (spq['Product_Price'].iloc[k]>=lbp\
and spq['Product_Price'].iloc[k]<ubp):
if(spq['Product_Qty'].iloc[k]>=lbd\
and spq['Product_Qty'].iloc[k]<ubd):
count+=1
arr[i][j]+=1
price_range=np.zeros(shape=(len(pint),2))
for j in range(0,len(pint)):
lbp=pint[j]
price_range[j][0]=lbp
if j==len(pint)-1:
ubp=pint[j]+1
price_range[j][1]=ubp
else:
ubp=pint[j+1]
price_range[j][1]=ubp
demand_range=np.zeros(shape=(len(dint),2))
for j in range(0,len(dint)):
lbd=dint[j]
demand_range[j][0]=lbd
if j==len(dint)-1:
ubd=dint[j]+1
demand_range[j][1]=ubd
else:
ubd=dint[j+1]
demand_range[j][1]=ubd
pr=pd.DataFrame(price_range)
pr.columns=['Price','Demand']
dr=pd.DataFrame(demand_range)
dr.columns=['Price','Demand']
priceranges=pr.Price.astype(str).str.cat(pr.Demand.astype(str), sep='-')
demandranges=dr.Price.astype(str).str.cat(dr.Demand.astype(str), sep='-')
price=pd.DataFrame(arr)
price.columns=demandranges
price.index=priceranges
pp=price.reset_index()
global data
data=pd.concat([df['Week'],df['Product_Qty'],df['Product_Price'],df['Comp_Prod_Price'],df['Promo1'],df['Promo2'],df['overallsale']],axis=1)
return render_template('dataview.html',cpd=cpd.values,pp=pp.to_html(index=False),data=data.to_html(index=False),graphdata=data.values,ss=1)
return render_template('dataview.html')
@app.route('/priceelasticity',methods = ['GET','POST'])
def priceelasticity():
return render_template('Optimisation_heatmap_revenue.html')
@app.route("/elasticity",methods = ['GET','POST'])
def elasticity():
if request.method== 'POST':
Price=0
Average_Price=0
Promotions=0
Promotionss=0
if request.form.get('Price'):
Price=1
if request.form.get('Average_Price'):
Average_Price=1
if request.form.get('Promotion_1'):
Promotions=1
if request.form.get('Promotion_2'):
Promotionss=1
Modeldata=pd.DataFrame()
Modeldata['Product_Qty']=data.Product_Qty
lst=[]
for row in data.index:
lst.append(row+1)
Modeldata['Week']=np.log(lst)
if Price == 1:
Modeldata['Product_Price']=data['Product_Price']
if Price == 0:
Modeldata['Product_Price']=0
if Average_Price==1:
Modeldata['Comp_Prod_Price']=data['Comp_Prod_Price']
if Average_Price==0:
Modeldata['Comp_Prod_Price']=0
if Promotions==1:
Modeldata['Promo1']=data['Promo1']
if Promotions==0:
Modeldata['Promo1']=0
if Promotionss==1:
Modeldata['Promo2']=data['Promo2']
if Promotionss==0:
Modeldata['Promo2']=0
diffpriceprodvscomp= (Modeldata['Product_Price']-Modeldata['Comp_Prod_Price'])
promo1=Modeldata.Promo1
promo2=Modeldata.Promo2
week=Modeldata.Week
quantityproduct=Modeldata.Product_Qty
df=pd.concat([quantityproduct,diffpriceprodvscomp,promo1,promo2,week],axis=1)
df.columns=['quantityproduct','diffpriceprodvscomp','promo1','promo2','week']
Model = smf.ols(formula='df.quantityproduct ~ df.diffpriceprodvscomp + df.promo1 + df.promo2 + df.week', data=df)
res = Model.fit()
global intercept,diffpriceprodvscomp_param,promo1_param,promo2_param,week_param
intercept=res.params[0]
diffpriceprodvscomp_param=res.params[1]
promo1_param=res.params[2]
promo2_param=res.params[3]
week_param=res.params[4]
Product_Price_min=0
maxvalue_of_price=int(Modeldata['Product_Price'].max())
Product_Price_max=int(Modeldata['Product_Price'].max())
if maxvalue_of_price==0:
Product_Price_max=1
maxfunction=[]
pricev=[]
weeks=[]
dd=[]
ddl=[]
for vatr in range(0,len(Modeldata)):
weeks.append(lst[vatr])
for Product_Price in range(Product_Price_min,Product_Price_max+1):
function=0
function=(intercept+(Modeldata['Promo1'].iloc[vatr]*promo1_param)+(Modeldata['Promo2'].iloc[vatr]*promo2_param) +
(diffpriceprodvscomp_param*(Product_Price-Modeldata['Comp_Prod_Price'].iloc[vatr]))+(Modeldata['Week'].iloc[vatr]*lst[vatr]))
maxfunction.append(function)
dd.append(Product_Price)
ddl.append(vatr)
for Product_Price in range(Product_Price_min,Product_Price_max+1):
pricev.append(Product_Price)
df1=pd.DataFrame(maxfunction)
df2=pd.DataFrame(dd)
df3=pd.DataFrame(ddl)
dfo=pd.concat([df3,df2,df1],axis=1)
dfo.columns=['weeks','prices','Demandfunctions']
demand=[]
for rows in dfo.values:
w=int(rows[0])
p=int(rows[1])
d=int(rows[2])
demand.append([w,p,d])
Co_eff=pd.DataFrame(res.params.values)#intercept
standard_error=pd.DataFrame(res.bse.values)#standard error
p_values=pd.DataFrame(res.pvalues.values)
conf_lower =pd.DataFrame(res.conf_int()[0].values)
conf_higher =pd.DataFrame(res.conf_int()[1].values)
R_square=res.rsquared
atr=['Intercept','DeltaPrice','Promo1','Promo2','Week']
atribute=pd.DataFrame(atr)
SummaryTable=pd.concat([atribute,Co_eff,standard_error,p_values,conf_lower,conf_higher],axis=1)
SummaryTable.columns=['Atributes','Co_eff','Standard_error','P_values','conf_lower','conf_higher']
reshapedf=df1.values.reshape(len(Modeldata),(-Product_Price_min+(Product_Price_max+1)))
dataofmas=pd.DataFrame(reshapedf)
maxv=dataofmas.apply( max, axis=1 )
minv=dataofmas.apply(min,axis=1)
avgv=dataofmas.sum(axis=1)/(-Product_Price_min+(Product_Price_max+1))
wks=pd.DataFrame(weeks)
ddofs=pd.concat([wks,minv,avgv,maxv],axis=1)
dataofmas=pd.DataFrame(reshapedf)
kk=pd.DataFrame()
sums=0
for i in range(0,len(dataofmas.columns)):
sums=sums+i
vv=i*dataofmas[[i]]
kk=pd.concat([kk,vv],axis=1)
dfr=pd.DataFrame(kk)
mrevenue=dfr.apply( max, axis=1 )
prices=dfr.idxmax(axis=1)
wks=pd.DataFrame(weeks)
revenuedf=pd.concat([wks,mrevenue,prices],axis=1)
return render_template('Optimisation_heatmap_revenue.html',revenuedf=revenuedf.values,ddofs=ddofs.values,SummaryTable=SummaryTable.to_html(index=False),ss=1,weeks=weeks,demand=demand,pricev=pricev,R_square=R_square)
@app.route('/inputtomaxm',methods=["GET","POST"])
def inputtomaxm():
return render_template("Optimize.html")
@app.route("/maxm",methods=["GET","POST"])
def maxm():
if request.method=="POST":
week=request.form['TimePeriod']
price_low=request.form['Price_Lower']
price_max=request.form['Price_Upper']
promofirst=request.form['Promotion_1']
promosecond=request.form['Promotion_2']
# week=24
# price_low=6
# price_max=20
# promofirst=1
# promosecond=0
#
# time_period=24
#
# global a
# a=243.226225
# global b
# b=-9.699634
# global d
# d=1.671505
# global pr1
# pr1=21.866260
# global pr2
# pr2=-0.511606
# global cm
# cm=-14.559594
# global s_0
# s_0= 2000
# promo1=1
# promo2=0
time_period=int(week)
global a
a=intercept
global b
b=diffpriceprodvscomp_param
global d
d=week_param
global pr1
pr1=promo1_param
global pr2
pr2=promo2_param
global s_0
s_0= 2000
promo1=int(promofirst)
promo2=int(promosecond)
global comp
comp=np.random.randint(7,15,time_period)
def demand(p, a=a, b=b, d=d, promo1=promo1,promo2_param=promo2,comp=comp, t=np.linspace(1,time_period,time_period)):
""" Return demand given an array of prices p for times t
(see equation 5 above)"""
return a+(b*(p-comp))+(d*t)+(promo1*pr1)+(promo2*pr2)
def objective(p_t, a, b, d,promo1,promo2, comp, t=np.linspace(1,time_period,time_period)):
return -1.0 * np.sum( p_t * demand(p_t, a, b, d,promo1,promo2, comp, t) )
def constraint_1(p_t, s_0, a, b, d, promo1,promo2, comp, t=np.linspace(1,time_period,time_period)):
""" Inventory constraint. s_0 - np.sum(x_t) >= 0.
This is an inequality constraint. See more below.
"""
return s_0 - np.sum(demand(p_t, a, b, d,promo1,promo2, comp, t))
def constraint_2(p_t):
#""" Positive demand. Another inequality constraint x_t >= 0 """
return p_t
t = np.linspace(1,time_period,time_period)
# Starting values :
b_min=int(price_low)
p_start = b_min * np.ones(len(t))
# bounds on the values :
bmax=int(price_max)
bounds = tuple((0,bmax) for x in p_start)
import scipy.optimize as optimize
# Constraints :
constraints = ({'type': 'ineq', 'fun': lambda x, s_0=s_0: constraint_1(x,s_0, a, b, d,promo1,promo2, comp, t=t)},
{'type': 'ineq', 'fun': lambda x: constraint_2(x)}
)
opt_results = optimize.minimize(objective, p_start, args=(a, b, d,promo1,promo2, comp, t),
method='SLSQP', bounds=bounds, constraints=constraints)
np.sum(opt_results['x'])
opt_price=opt_results['x']
opt_demand=demand(opt_results['x'], a, b, d, promo1,promo2_param, comp, t=t)
weeks=[]
for row in range(1,len(opt_price)+1):
weeks.append(row)
d=pd.DataFrame(weeks).astype(int)
dd=pd.DataFrame(opt_price)
optimumumprice_perweek=pd.concat([d,dd,pd.DataFrame(opt_demand).astype(int)],axis=1)
optimumumprice_perweek.columns=['Week','Price','Demand']
dataval=optimumumprice_perweek
diff=[]
diffs=[]
for i in range(0,len(opt_demand)-1):
valss=opt_demand[i]-opt_demand[i+1]
diff.append(valss)
diffs.append(i+1)
differenceofdemand_df=pd.concat([pd.DataFrame(diffs),pd.DataFrame(diff)],axis=1)
MP=round(optimumumprice_perweek.loc[optimumumprice_perweek['Price'].idxmin()],1)
minimumprice=pd.DataFrame(MP).T
MaxP=round(optimumumprice_perweek.loc[optimumumprice_perweek['Price'].idxmax()],1)
maximumprice=pd.DataFrame(MaxP).T
averageprice=round((optimumumprice_perweek['Price'].sum()/len(optimumumprice_perweek)),2)
MD=round(optimumumprice_perweek.loc[optimumumprice_perweek['Demand'].idxmin()],0)
minimumDemand=pd.DataFrame(MD).T
MaxD=round(optimumumprice_perweek.loc[optimumumprice_perweek['Demand'].idxmax()],0)
maximumDemand=pd.DataFrame(MaxD).T
averageDemand=round((optimumumprice_perweek['Demand'].sum()/len(optimumumprice_perweek)),0)
totaldemand=round(optimumumprice_perweek['Demand'].sum(),0)
return render_template("Optimize.html",totaldemand=totaldemand,averageDemand=averageDemand,maximumDemand=maximumDemand.values,minimumDemand=minimumDemand.values,averageprice=averageprice,maximumprice=maximumprice.values,minimumprice=minimumprice.values,dataval=dataval.values,differenceofdemand_df=differenceofdemand_df.values,optimumumprice_perweek=optimumumprice_perweek.to_html(index=False),ll=1)
@app.route("/Inventorymanagment",methods=["GET","POST"])
def Inventorymanagment():
return render_template("Inventory_Management.html")
@app.route("/DISTRIBUTION_NETWORK_OPT",methods=["GET","POST"])
def DISTRIBUTION_NETWORK_OPT():
return render_template("DISTRIBUTION_NETWORK_OPTIMIZATION.html")
@app.route("/Procurement_Plan",methods=["GET","POST"])
def Procurement_Plan():
return render_template("Procurement_Planning.html")
#<NAME>
@app.route("/fleetallocation")
def fleetallocation():
return render_template('fleetallocation.html')
@app.route("/reset")
def reset():
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("DELETE FROM `input`")
cur.execute("DELETE FROM `output`")
cur.execute("DELETE FROM `Scenario`")
conn.commit()
conn.close()
open(localaddress+'\\static\\demodata.txt', 'w').close()
return render_template('fleetallocation.html')
@app.route("/dalink",methods = ['GET','POST'])
def dalink():
sql = "INSERT INTO `input` (`Route`,`SLoc`,`Ship-to Abb`,`Primary Equipment`,`Batch`,`Prod Dt`,`SW`,`Met Held`,`Heat No`,`Delivery Qty`,`Width`,`Length`,`Test Cut`,`Customer Priority`) VALUES( %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
if request.method == 'POST':
typ = request.form.get('type')
frm = request.form.get('from')
to = request.form.get('to')
if typ and frm and to:
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
cur.execute("SELECT * FROM `inventory_data` WHERE `Primary Equipment` = '" + typ + "' AND `Prod Dt` BETWEEN '" + frm + "' AND '" + to + "'")
res = cur.fetchall()
if len(res)==0:
conn.close()
return render_template('fleetallocation.html',alert='No data available')
sfile = pd.DataFrame(res)
df1 = pd.DataFrame(sfile)
df1['Prod Dt'] =df1['Prod Dt'].astype(object)
for index, i in df1.iterrows():
data = (i['Route'],i['SLoc'],i['Ship-to Abb'],i['Primary Equipment'],i['Batch'],i['Prod Dt'],i['SW'],i['Met Held'],i['Heat No'],i['Delivery Qty'],i['Width'],i['Length'],i['Test Cut'],i['Customer Priority'])
curr.execute(sql,data)
conn.commit()
conn.close()
return render_template('fleetallocation.html',typ=" Equipment type: "+typ,frm="From: "+frm,to=" To:"+to,data = sfile.to_html(index=False))
else:
return render_template('fleetallocation.html',alert ='All input fields are required')
return render_template('fleetallocation.html')
@app.route('/optimise', methods=['GET', 'POST'])
def optimise():
open(localaddress+'\\static\\demodata.txt', 'w').close()
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
cur.execute("DELETE FROM `output`")
conn.commit()
os.system('python optimising.py')
sa=1
cur.execute("SELECT * FROM `output`")
result = cur.fetchall()
if len(result)==0:
say=0
else:
say=1
curr.execute("SELECT * FROM `input`")
sfile = curr.fetchall()
if len(sfile)==0:
conn.close()
return render_template('fleetallocation.html',say=say,sa=sa,alert='No data available')
sfile = pd.DataFrame(sfile)
conn.close()
with open(localaddress+"\\static\\demodata.txt", "r") as f:
content = f.read()
return render_template('fleetallocation.html',say=say,sa=sa,data = sfile.to_html(index=False),content=content)
@app.route("/scenario")
def scenario():
return render_template('scenario.html')
@app.route("/scenario_insert", methods=['GET','POST'])
def scenario_insert():
if request.method == 'POST':
scenario = request.form.getlist("scenario[]")
customer_priority = request.form.getlist("customer_priority[]")
oldest_sw = request.form.getlist("oldest_sw[]")
production_date = request.form.getlist("production_date[]")
met_held_group = request.form.getlist("met_held_group[]")
test_cut_group = request.form.getlist("test_cut_group[]")
sub_grouping_rules = request.form.getlist("sub_grouping_rules[]")
load_lower_bounds = request.form.getlist("load_lower_bounds[]")
load_upper_bounds = request.form.getlist("load_upper_bounds[]")
width_bounds = request.form.getlist("width_bounds[]")
length_bounds = request.form.getlist("length_bounds[]")
description = request.form.getlist("description[]")
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
lngth = len(scenario)
curr.execute("DELETE FROM `scenario`")
if scenario and customer_priority and oldest_sw and production_date and met_held_group and test_cut_group and sub_grouping_rules and load_lower_bounds and load_upper_bounds and width_bounds and length_bounds and description:
say=0
for i in range(lngth):
scenario_clean = scenario[i]
customer_priority_clean = customer_priority[i]
oldest_sw_clean = oldest_sw[i]
production_date_clean = production_date[i]
met_held_group_clean = met_held_group[i]
test_cut_group_clean = test_cut_group[i]
sub_grouping_rules_clean = sub_grouping_rules[i]
load_lower_bounds_clean = load_lower_bounds[i]
load_upper_bounds_clean = load_upper_bounds[i]
width_bounds_clean = width_bounds[i]
length_bounds_clean = length_bounds[i]
description_clean = description[i]
if scenario_clean and customer_priority_clean and oldest_sw_clean and production_date_clean and met_held_group_clean and test_cut_group_clean and sub_grouping_rules_clean and load_lower_bounds_clean and load_upper_bounds_clean and width_bounds_clean and length_bounds_clean:
cur.execute("INSERT INTO `scenario`(scenario, customer_priority, oldest_sw, production_date, met_held_group, test_cut_group, sub_grouping_rules, load_lower_bounds, load_upper_bounds, width_bounds, length_bounds, description) VALUES('"+scenario_clean+"' ,'"+customer_priority_clean+"','"+oldest_sw_clean+"','"+production_date_clean+"','"+met_held_group_clean+"','"+test_cut_group_clean+"', '"+sub_grouping_rules_clean+"','"+load_lower_bounds_clean+"', '"+load_upper_bounds_clean+"','"+width_bounds_clean+"','"+length_bounds_clean+"','"+description_clean+"')")
else:
say = 1
conn.commit()
if(say==0):
alert='All Scenarios inserted'
else:
alert='Some scenarios were not inserted'
return (alert)
conn.close()
return ('All fields are required!')
return ('Failed!!!')
@app.route("/fetch", methods=['GET','POST'])
def fetch():
if request.method == 'POST':
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("SELECT * FROM scenario")
result = cur.fetchall()
if len(result)==0:
conn.close()
return render_template('scenario.html',alert1='No scenarios Available')
result1 = pd.DataFrame(result)
result1 = result1.drop('Sub-grouping rules', axis=1)
conn.close()
return render_template('scenario.html',sdata = result1.to_html(index=False))
return ("Error")
@app.route("/delete", methods=['GET','POST'])
def delete():
if request.method == 'POST':
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("DELETE FROM scenario")
conn.commit()
conn.close()
return render_template('scenario.html',alert1="All the scenerios were dropped!")
return ("Error")
@app.route('/papadashboard', methods=['GET', 'POST'])
def papadashboard():
sql1 = "SELECT `Scenario`, MAX(`Wagon-No`) AS 'Wagon Used', COUNT(`Batch`) AS 'Products Allocated', SUM(`Delivery Qty`) AS 'Total Product Allocated', SUM(`Delivery Qty`)/(MAX(`Wagon-No`)) AS 'Average Load Carried', SUM(`Width`)/(MAX(`Wagon-No`)) AS 'Average Width Used' FROM `output` WHERE `Wagon-No`>0 GROUP BY `Scenario`"
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
curs = conn.cursor()
curs.execute("SELECT `scenario` FROM `scenario`")
sdata = curs.fetchall()
if len(sdata)==0:
conn.close()
return render_template('warning.html',alert='No data available')
cur1 = conn.cursor()
cur1.execute(sql1)
data1 = cur1.fetchall()
if len(data1)==0:
conn.close()
return render_template('warning.html',alert='Infeasible to due Insufficient Load')
cu = conn.cursor()
cu.execute("SELECT `length_bounds`,`width_bounds`,`load_lower_bounds`,`load_upper_bounds` FROM `scenario`")
sdaa = cu.fetchall()
sdaa = pd.DataFrame(sdaa)
asa=list()
for index, i in sdaa.iterrows():
hover = "Length Bound:"+str(i['length_bounds'])+", Width Bound:"+str(i['width_bounds'])+", Load Upper Bound:"+str(i['load_upper_bounds'])+", Load Lower Bound:"+str(i['load_lower_bounds'])
asa.append(hover)
asa=pd.DataFrame(asa)
asa.columns=['Details']
data1 = pd.DataFrame(data1)
data1['Average Width Used'] = data1['Average Width Used'].astype(int)
data1['Total Product Allocated'] = data1['Total Product Allocated'].astype(int)
data1['Average Load Carried'] = data1['Average Load Carried'].astype(float)
data1['Average Load Carried'] = round(data1['Average Load Carried'],2)
data1['Average Load Carried'] = data1['Average Load Carried'].astype(str)
fdata = pd.DataFrame(columns=['Scenario','Wagon Used','Products Allocated','Total Product Allocated','Average Load Carried','Average Width Used','Details'])
fdata[['Scenario','Wagon Used','Products Allocated','Total Product Allocated','Average Load Carried','Average Width Used']] = data1[['Scenario','Wagon Used','Products Allocated','Total Product Allocated','Average Load Carried','Average Width Used']]
fdata['Details'] = asa['Details']
fdata = fdata.values
sql11 = "SELECT `Scenario`, SUM(`Delivery Qty`)/(MAX(`Wagon-No`)) AS 'Average Load Carried', COUNT(`Batch`) AS 'Allocated', SUM(`Delivery Qty`) AS 'Load Allocated' FROM `output`WHERE `Wagon-No`>0 GROUP BY `Scenario`"
sql21 = "SELECT COUNT(`Batch`) AS 'Total Allocated' FROM `output` GROUP BY `Scenario`"
sql31 = "SELECT `load_upper_bounds` FROM `scenario`"
conn1 = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur11 = conn1.cursor()
cur21 = conn1.cursor()
cur31 = conn1.cursor()
cur11.execute(sql11)
data11 = cur11.fetchall()
data11 = pd.DataFrame(data11)
cur21.execute(sql21)
data21 = cur21.fetchall()
data21 = pd.DataFrame(data21)
cur31.execute(sql31)
data31 = cur31.fetchall()
data31 = pd.DataFrame(data31)
data11['Average Load Carried']=data11['Average Load Carried'].astype(float)
fdata1 = pd.DataFrame(columns=['Scenario','Utilisation Percent','Allocation Percent','Total Load Allocated'])
fdata1['Utilisation Percent'] = round(100*(data11['Average Load Carried']/data31['load_upper_bounds']),2)
data11['Load Allocated']=data11['Load Allocated'].astype(int)
fdata1[['Scenario','Total Load Allocated']]=data11[['Scenario','Load Allocated']]
data11['Allocated']=data11['Allocated'].astype(float)
data21['Total Allocated']=data21['Total Allocated'].astype(float)
fdata1['Allocation Percent'] = round(100*(data11['Allocated']/data21['Total Allocated']),2)
fdata1['Allocation Percent'] = fdata1['Allocation Percent'].astype(str)
fdat1 = fdata1.values
conn1.close()
if request.method == 'POST':
conn2 = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn2.cursor()
ata = request.form['name']
cur.execute("SELECT * FROM `output` WHERE `Scenario` = '"+ata+"' ")
ssdata = cur.fetchall()
datasss = pd.DataFrame(ssdata)
data=datasss.replace("Not Allocated", 0)
df=data[['Delivery Qty','Wagon-No','Width','Group-Number']]
df['Wagon-No']=df['Wagon-No'].astype(int)
a=df['Wagon-No'].max()
##bar1
result_array = np.array([])
for i in range (a):
data_i = df[df['Wagon-No'] == i+1]
del_sum_i = data_i['Delivery Qty'].sum()
per_i=[((del_sum_i)/(205000)*100)]
result_array = np.append(result_array, per_i)
result_array1 = np.array([])
for j in range (a):
data_j = df[df['Wagon-No'] == j+1]
del_sum_j = data_j['Width'].sum()
per_util_j=[((del_sum_j)/(370)*100)]
result_array1 = np.append(result_array1, per_util_j)
##pie1
df112 = df[df['Wagon-No'] == 0]
pie1 = df112 ['Width'].sum()
df221 = df[df['Wagon-No'] > 0]
pie11 = df221['Width'].sum()
df1=data[['SW','Group-Number']]
dff1 = df1[data['Wagon-No'] == 0]
da1 =dff1.groupby(['SW']).count()
re11 = np.array([])
res12 = np.append(re11,da1)
da1['SW'] = da1.index
r1 = np.array([])
r12 = np.append(r1, da1['SW'])
df0=data[['Group-Number','Route','SLoc','Ship-to Abb','Wagon-No','Primary Equipment']]
df1=df0.replace("Not Allocated", 0)
f2 = pd.DataFrame(df1)
f2['Wagon-No']=f2['Wagon-No'].astype(int)
####Not-Allocated
f2['Group']=data['Group-Number']
df=f2[['Group','Wagon-No']]
dee = df[df['Wagon-No'] == 0]
deer =dee.groupby(['Group']).count()##Not Allocated
deer['Group'] = deer.index
##Total-Data
f2['Group1']=data['Group-Number']
dfc=f2[['Group1','Wagon-No']]
dfa=pd.DataFrame(dfc)
der = dfa[dfa['Wagon-No'] >= 0]
dear =der.groupby(['Group1']).count()##Wagons >1
dear['Group1'] = dear.index
dear.rename(columns={'Wagon-No': 'Allocated'}, inplace=True)
result = pd.concat([deer, dear], axis=1, join_axes=[dear.index])
resu=result[['Group1','Wagon-No','Allocated']]
result1=resu.fillna(00)
r5 = np.array([])
r6 = np.append(r5, result1['Wagon-No'])
r66=r6[0:73]###Not Allocated
r7 = np.append(r5, result1['Allocated'])
r77=r7[0:73]####total
r8 = np.append(r5, result1['Group1'])
r88=r8[0:73]###group
conn2.close()
return render_template('papadashboard.html',say=1,data=fdata,data1=fdat1,ata=ata,bar1=result_array,bar11=result_array1,pie11=pie1,pie111=pie11,x=r12,y=res12,xname=r88, bar7=r77,bar8=r66)
conn.close()
return render_template('papadashboard.html',data=fdata,data1=fdat1)
@app.route('/facilityallocation')
def facilityallocation():
return render_template('facilityhome.html')
@app.route('/dataimport')
def dataimport():
return render_template('facilityimport.html')
@app.route('/dataimport1')
def dataimport1():
return redirect(url_for('dataimport'))
@app.route('/facility_location')
def facility_location():
return render_template('facility_location.html')
@app.route('/facility')
def facility():
return redirect(url_for('facilityallocation'))
@app.route("/imprt", methods=['GET','POST'])
def imprt():
global customerdata
global factorydata
global Facyy
global Custo
customerfile = request.files['CustomerData'].read()
factoryfile = request.files['FactoryData'].read()
if len(customerfile)==0 or len(factoryfile)==0:
return render_template('facilityhome.html',warning='Data Invalid')
cdat=pd.read_csv(io.StringIO(customerfile.decode('utf-8')))
customerdata=pd.DataFrame(cdat)
fdat=pd.read_csv(io.StringIO(factoryfile.decode('utf-8')))
factorydata=pd.DataFrame(fdat)
Custo=customerdata.drop(['Lat','Long'],axis=1)
Facyy=factorydata.drop(['Lat','Long'],axis=1)
return render_template('facilityimport1.html',loc1=factorydata.values,loc2=customerdata.values,factory=Facyy.to_html(index=False),customer=Custo.to_html(index=False))
@app.route("/gmap")
def gmap():
custdata=customerdata
Factorydata=factorydata
price=1
#to get distance beetween customer and factory
#first get the Dimension
#get no of factories
Numberoffact=len(Factorydata)
#get Number of Customer
Numberofcust=len(custdata)
#Get The dist/unit cost
cost=price
#def function for distance calculation
# approximate radius of earth in km
def dist(lati1,long1,lati2,long2,cost):
R = 6373.0
lat1 = radians(lati1)
lon1 = radians(long1)
lat2 = radians(lati2)
lon2 = radians(long2)
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
distance =round(R * c,2)
return distance*cost
#Create a list for customer and factory
def costtable(custdata,Factorydata):
distance=list()
for lat1,long1 in zip(custdata.Lat, custdata.Long):
for lat2,long2 in zip(Factorydata.Lat, Factorydata.Long):
distance.append(dist(lat1,long1,lat2,long2,cost))
distable=np.reshape(distance, (Numberofcust,Numberoffact)).T
tab=pd.DataFrame(distable,index=[Factorydata.Factory],columns=[custdata.Customer])
return tab
DelCost=costtable(custdata,Factorydata)#return cost table of the customer and factoery
#creating Demand Table
demand=np.array(custdata.Demand)
col1=np.array(custdata.Customer)
Demand=pd.DataFrame(demand,col1).T
cols=sorted(col1)
#Creating capacity table
fact=np.array(Factorydata.Capacity)
col2=np.array(Factorydata.Factory)
Capacity=pd.DataFrame(fact,index=col2).T
colo=sorted(col2)
#creating Fixed cost table
fixed_c=np.array(Factorydata.FixedCost)
col3=np.array(Factorydata.Factory)
FixedCost= pd.DataFrame(fixed_c,index=col3)
# Create the 'prob' variable to contain the problem data
model = LpProblem("Min Cost Facility Location problem",LpMinimize)
production = pulp.LpVariable.dicts("Production",
((factory, cust) for factory in Capacity for cust in Demand),
lowBound=0,
cat='Integer')
factory_status =pulp.LpVariable.dicts("factory_status", (factory for factory in Capacity),
cat='Binary')
cap_slack =pulp.LpVariable.dicts("capslack",
(cust for cust in Demand),
lowBound=0,
cat='Integer')
model += pulp.lpSum(
[DelCost.loc[factory, cust] * production[factory, cust] for factory in Capacity for cust in Demand]
+ [FixedCost.loc[factory] * factory_status[factory] for factory in Capacity] + 5000000*cap_slack[cust] for cust in Demand)
for cust in Demand:
model += pulp.lpSum(production[factory, cust] for factory in Capacity)+cap_slack[cust] == Demand[cust]
for factory in Capacity:
model += pulp.lpSum(production[factory, cust] for cust in Demand) <= Capacity[factory]*factory_status[factory]
model.solve()
print("Status:", LpStatus[model.status])
for v in model.variables():
print(v.name, "=", v.varValue)
print("Total Cost of Ingredients per can = ", value(model.objective))
# Getting the table for the Factorywise Allocation
def factoryalloc(model,Numberoffact,Numberofcust,listoffac,listofcus):
listj=list()
listk=list()
listcaps=list()
for v in model.variables():
listj.append(v.varValue)
customer=listj[(len(listj)-Numberofcust-Numberoffact):(len(listj)-Numberoffact)]
del listj[(len(listj)-Numberoffact-Numberofcust):len(listj)]
for row in listj:
if row==0:
listk.append(0)
else:
listk.append(1)
x=np.reshape(listj,(Numberoffact,Numberofcust))
y=np.reshape(listk,(Numberoffact,Numberofcust))
FactoryAlloc_table=pd.DataFrame(x,index=listoffac,columns=listofcus)
Factorystatus=pd.DataFrame(y,index=listoffac,columns=listofcus)
return FactoryAlloc_table,Factorystatus,customer
Alltable,FactorystatusTable,ded=factoryalloc(model,Numberoffact,Numberofcust,colo,cols)
Allstatus=list()
dede=pd.DataFrame(ded,columns=['UnSatisfied'])
finaldede=dede[dede.UnSatisfied != 0]
colss=pd.DataFrame(cols,columns=['CustomerLocation'])
fina=pd.concat([colss,finaldede],axis=1, join='inner')
print(fina)
for i in range(len(Alltable)):
for j in range(len(Alltable.columns)):
if (Alltable.loc[Alltable.index[i], Alltable.columns[j]]>0):
all=[Alltable.index[i], Alltable.columns[j], Alltable.loc[Alltable.index[i], Alltable.columns[j]]]
Allstatus.append(all)
Status=pd.DataFrame(Allstatus,columns=['Factory','Customer','Allocation']).astype(str)
#To get the Factory Data
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
#Making Connection to the Database
cur = con.cursor()
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Status.to_sql(con=engine, name='facilityallocation',index=False, if_exists='replace')
cur = con.cursor()
cur1 = con.cursor()
cur.execute("SELECT * FROM `facilityallocation`")
file=cur.fetchall()
dat=pd.DataFrame(file)
lst=dat[['Factory','Customer']]
mlst=[]
names=lst['Factory'].unique().tolist()
for name in names:
lsty=lst.loc[lst.Factory==name]
mlst.append(lsty.values)
data=dat[['Factory','Customer','Allocation']]
sql="SELECT SUM(`Allocation`) AS 'UseCapacity', `Factory` FROM `facilityallocation` GROUP BY `Factory`"
cur1.execute(sql)
file2=cur1.fetchall()
udata=pd.DataFrame(file2)
bdata=factorydata.sort_values(by=['Factory'])
adata=bdata['Capacity']
con.close()
infdata=dat[['Customer','Factory','Allocation']]
infodata=infdata.sort_values(by=['Customer'])
namess=infodata.Customer.unique()
lstyy=[]
for nam in namess:
bb=infodata[infodata.Customer==nam]
comment=bb['Factory']+":"+bb['Allocation']
prin=[nam,str(comment.values).strip('[]')]
lstyy.append(prin)
return render_template('facilityoptimise.html',say=1,lstyy=lstyy,x1=adata.values,x2=udata.values,dat=mlst,loc1=factorydata.values,
loc2=customerdata.values,factory=Facyy.to_html(index=False),customer=Custo.to_html(index=False),summary=data.to_html(index=False))
#Demand Forecast
@app.route('/demandforecast')
def demandforecast():
return render_template('demandforecast.html')
@app.route("/demandforecastdataimport",methods = ['GET','POST'])
def demandforecastdataimport():
if request.method== 'POST':
global actualforecastdata
flat=request.files['flat'].read()
if len(flat)==0:
return('No Data Selected')
cdat=pd.read_csv(io.StringIO(flat.decode('utf-8')))
actualforecastdata=pd.DataFrame(cdat)
return render_template('demandforecast.html',data=actualforecastdata.to_html(index=False))
@app.route('/demandforecastinput', methods = ['GET', 'POST'])
def demandforecastinput():
if request.method=='POST':
global demandforecastfrm
global demandforecasttoo
global demandforecastinputdata
demandforecastfrm=request.form['from']
demandforecasttoo=request.form['to']
value=request.form['typedf']
demandforecastinputdata=actualforecastdata[(actualforecastdata['Date'] >= demandforecastfrm) & (actualforecastdata['Date'] <= demandforecasttoo)]
if value=='monthly': ##monthly
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
demandforecastinputdata.to_sql(con=engine, name='demandforecastinputdata', index=False,if_exists='replace')
return redirect(url_for('monthlyforecast'))
if value=='quarterly': ##quarterly
global Quaterdata
dated2 = demandforecastinputdata['Date']
nlst=[]
for var in dated2:
var1 = int(var[5:7])
if var1 >=1 and var1 <4:
varr=var[:4]+'-01-01'
elif var1 >=4 and var1 <7:
varr=var[:4]+'-04-01'
elif var1 >=7 and var1 <10:
varr=var[:4]+'-07-01'
else:
varr=var[:4]+'-10-01'
nlst.append(varr)
nwlst=pd.DataFrame(nlst,columns=['Newyear'])
demandforecastinputdata=demandforecastinputdata.reset_index()
demandforecastinputdata['Date']=nwlst['Newyear']
Quaterdata=demandforecastinputdata.groupby(['Date']).sum()
Quaterdata=Quaterdata.reset_index()
Quaterdata=Quaterdata.drop('index',axis=1)
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Quaterdata.to_sql(con=engine, name='demandforecastinputdata', index=False,if_exists='replace')
return redirect(url_for('quarterlyforecast'))
if value=='yearly': ##yearly
global Yeardata
#copydata=demandforecastinputdata
dated1 = demandforecastinputdata['Date']
lst=[]
for var in dated1:
var1 = var[:4]+'-01-01'
lst.append(var1)
newlst=pd.DataFrame(lst,columns=['NewYear'])
demandforecastinputdata=demandforecastinputdata.reset_index()
demandforecastinputdata['Date']=newlst['NewYear']
Yeardata=demandforecastinputdata.groupby(['Date']).sum()
Yeardata=Yeardata.reset_index()
Yeardata=Yeardata.drop('index',axis=1)
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Yeardata.to_sql(con=engine, name='demandforecastinputdata', index=False,if_exists='replace')
return redirect(url_for('yearlyforecast'))
#if value=='weakly': ##weakly
# return redirect(url_for('output4'))
return render_template('demandforecast.html')
@app.route("/monthlyforecast",methods = ['GET','POST'])
def monthlyforecast():
data = pd.DataFrame(demandforecastinputdata)
# container1
a1=data.sort_values(['GDP','TotalDemand'], ascending=[True,True])
# container2
a2=data.sort_values(['Pi_Exports','TotalDemand'], ascending=[True,True])
# container3
a3=data.sort_values(['Market_Share','TotalDemand'], ascending=[True,True])
# container4
a4=data.sort_values(['Advertisement_Expense','TotalDemand'], ascending=[True,True])
# container1
df=a1[['GDP']]
re11 = np.array([])
res11 = np.append(re11,df)
df1=a1[['TotalDemand']]
r1 = np.array([])
r11 = np.append(r1, df1)
# top graph
tdf=data['Date'].astype(str)
tre11 = np.array([])
tres11 = np.append(tre11,tdf)
tr1 = np.array([])
tr11 = np.append(tr1, df1)
# container2
udf=a2[['Pi_Exports']]
ure11 = np.array([])
ures11 = np.append(ure11,udf)
ur1 = np.array([])
ur11 = np.append(ur1, df1)
# container3
vdf=a3[['Market_Share']]
vre11 = np.array([])
vres11 = np.append(vre11,vdf)
vr1 = np.array([])
vr11 = np.append(vr1, df1)
# container4
wdf=a4[['Advertisement_Expense']]
wre11 = np.array([])
wres11 = np.append(wre11,wdf)
wr1 = np.array([])
wr11 = np.append(wr1, df1)
if request.method == 'POST':
mov=0
exp=0
reg=0
ari=0
arx=0
till = request.form.get('till')
if request.form.get('moving'):
mov=1
if request.form.get('ESPO'):
exp=1
if request.form.get('regression'):
reg=1
if request.form.get('ARIMA'):
ari=1
if request.form.get('ARIMAX'):
arx=1
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS `ftech` (`mov` VARCHAR(1),`exp` VARCHAR(1), `reg` VARCHAR(1),`ari` VARCHAR(1),`arx` VARCHAR(1),`till` VARCHAR(10))")
cur.execute("DELETE FROM `ftech`")
con.commit()
cur.execute("INSERT INTO `ftech` VALUES('"+str(mov)+"','"+str(exp)+"','"+str(reg)+"','"+str(ari)+"','"+str(arx)+"','"+str(till)+"')")
con.commit()
cur.execute("CREATE TABLE IF NOT EXISTS `forecastoutput`(`Model` VARCHAR(25),`Date` VARCHAR(10),`TotalDemand` VARCHAR(10),`RatioIncrease` VARCHAR(10),`Spain` VARCHAR(10),`Austria` VARCHAR(10),`Japan` VARCHAR(10),`Hungary` VARCHAR(10),`Germany` VARCHAR(10),`Polland` VARCHAR(10),`UK` VARCHAR(10),`France` VARCHAR(10),`Romania` VARCHAR(10),`Italy` VARCHAR(10),`Greece` VARCHAR(10),`Crotia` VARCHAR(10),`Holland` VARCHAR(10),`Finland` VARCHAR(10),`Hongkong` VARCHAR(10))")
con.commit()
cur.execute("DELETE FROM `forecastoutput`")
con.commit()
sql = "INSERT INTO `forecastoutput` (`Model`,`Date`,`TotalDemand`,`RatioIncrease`,`Spain`,`Austria`,`Japan`,`Hungary`,`Germany`,`Polland`,`UK`,`France`,`Romania`,`Italy`,`Greece`,`Crotia`,`Holland`,`Finland`,`Hongkong`) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
#read the monthly file and index that with time
df=data.set_index('Date')
split_point =int(0.7*len(df))
D, V = df[0:split_point],df[split_point:]
data=pd.DataFrame(D)
#Functions for ME, MAE, MAPE
#ME
def ME(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(y_true - y_pred)
#MAE
def MAE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs(y_true - y_pred))
#MAPE
def MAPE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_pred)) * 100
cur1=con.cursor()
cur1.execute("SELECT * FROM `ftech`")
ftech=pd.DataFrame(cur1.fetchall())
ari=int(ftech['ari'])
arx=int(ftech['arx'])
exp=int(ftech['exp'])
mov=int(ftech['mov'])
reg=int(ftech['reg'])
start_index1=str(D['GDP'].index[-1])
end_index1=str(ftech['till'][0])
#end_index1=indx[:4]
df2 = pd.DataFrame(data=0,index=["ME","MAE","MAPE"],columns=["Moving Average","ARIMA","Exponential Smoothing","Regression"])
if mov==1:
#2---------------simple moving average-------------------------
#################################MovingAverage#######################
list1=list()
def mavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(0,0,1))
results_ARIMA1=model1.fit(disp=0)
# start_index1 = '2017-01-01'
# end_index1 = '2022-01-01' #4 year forecast
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list1.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["Moving Average"].iloc[0]=s
df2["Moving Average"].iloc[1]=so
df2["Moving Average"].iloc[2]=son
s=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(s)+1):
a=s.iloc[j-2]
b=s.iloc[j-1]
ratio_inc.append(int(((b-a)/a)*100))
return list1,ratio_inc
print(data)
Ma_Out,ratio_incma=mavg(data)
dfs=pd.DataFrame(Ma_Out)
tdfs=dfs.T
print(tdfs)
tdfs.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
tdfs['Model']='Moving Average'
tdfs['RatioIncrease']=ratio_incma
tdfs['Date']=(tdfs.index).strftime("20%y-%m-%d")
tdfs.astype(str)
for index, i in tdfs.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if ari==1:
##--------------min errors--ARIMA (1,0,0)-----------------------------
############################for Total Demand Monthly####################################
list2=list()
def AutoRimavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(1,0,0))
results_ARIMA1=model1.fit(disp=0)
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list2.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["ARIMA"].iloc[0]=s
df2["ARIMA"].iloc[1]=so
df2["ARIMA"].iloc[2]=son
Ars=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(Ars)+1):
As=(Ars.iloc[j-2])
bs=(Ars.iloc[j-1])
ratio_inc.append(int(((As-bs)/As)*100))
return list1,ratio_inc
Arimamodel,ratio_inc=AutoRimavg(data)
Amodel=pd.DataFrame(Arimamodel)
Results=Amodel.T
Results.astype(str)
Results.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
Results['Model']="ARIMA"
Results['RatioIncrease']=ratio_inc
Results['Date']=Results.index.astype(str)
for index, i in Results.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if reg==1:
#Linear Regression
#Regression Modeling
dates=pd.date_range(start_index1,end_index1,freq='M')
lprd=len(dates)
dateofterms= pd.PeriodIndex(freq='M', start=start_index1, periods=lprd+1)
dofterm=dateofterms.strftime("20%y-%m-%d")
Rdate=pd.DataFrame(dofterm)
noofterms=len(dofterm)
def regression(data,V,noofterms):
#Getting length of Data Frame
lenofdf=len(data.columns.tolist())
#Getting List Of Atributes in Data Frame
listofatr=list()
listofatr=data.columns.tolist()
#making list of pred
pred=pd.DataFrame()
#now riun for each row
for i in range(0,(lenofdf)-5):
df=pd.DataFrame(data[data.columns.tolist()[i]].reset_index())
xvar=list()
for row in df[listofatr[i]]:
xvar.append(row)
df5=pd.DataFrame(xvar)
yvar=list()
for j in range(0,len(df[listofatr[i]])):
yvar.append(j)
dfss=pd.DataFrame(yvar)
clf = linear_model.LinearRegression()
clf.fit(dfss,df5)
# Make predictions using the testing set
dfv=pd.DataFrame(V[V.columns.tolist()[i]].reset_index())
k=list()
for l in range(len(df[listofatr[i]]),len(df[listofatr[i]])+len(dfv)):
k.append(l)
ks=pd.DataFrame(k)
#Future prediction
predlist=list()
for j in range(len(df[listofatr[i]]),len(df[listofatr[i]])+noofterms):
predlist.append(j)
dataframeoflenofpred=pd.DataFrame(predlist)
dateframeofpred=pd.DataFrame(clf.predict(dataframeoflenofpred))
pred=pd.concat([pred,dateframeofpred],axis=1)
#Accuracy Of the mODEL
y_pred = clf.predict(ks)
if(i==0):
meanerror=ME(dfv[listofatr[i]], y_pred)
mae=MAE(dfv[listofatr[i]], y_pred)
mape=MAPE(dfv[listofatr[i]],y_pred)
df2["Regression"].iloc[0]=meanerror
df2["Regression"].iloc[1]=mae
df2["Regression"].iloc[2]=mape
regp=pd.DataFrame(pred)
ratio_incrr=[]
ratio_incrr.append(0)
for j in range(2,len(regp)+1):
Ra=regp.iloc[j-2]
Rb=regp.iloc[j-1]
ratio_incrr.append(int(((Rb-Ra)/Ra)*100))
return pred,ratio_incrr
monthlyRegression,ratio_incrr=regression(data,V,noofterms)
r=pd.DataFrame(monthlyRegression)
r.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
r['Model']="Regression"
r['Date']=Rdate
r['RatioIncrease']=ratio_incrr
r.astype(str)
for index, i in r.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if exp==1:
#Exponential Smoothing
dates=pd.date_range(start_index1,end_index1,freq='M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
Edate=pd.DataFrame(dateofterms)
predictonterm=len(Edate)
def exponential_smoothing(series, alpha,predictonterm):
result = [series[0]] # first value is same as series
for i in range(1,len(series)):
result.append(alpha * series[i] + (1 - alpha) * result[i-1])
preds=result[len(series)-1]#pred
actual=series[len(series)-1]#actual
forecastlist=[]
for i in range(0,predictonterm):
forecast=(alpha*actual)+((1-alpha)*preds)
forecastlist.append(forecast)
actual=preds
preds=forecast
return result,forecastlist
def Exponentialmooth(data,alpha,predicterm):
predexp=list()
forecaste=pd.DataFrame()
m=len(data.columns.tolist())
for i in range(0,m-5):
pred,forecasts=exponential_smoothing(data[data.columns.tolist()[i]],0.5,predictonterm)
ss=pd.DataFrame(forecasts)
predexp.append(pred)
forecaste=pd.concat([forecaste,ss],axis=1)
if(i==0):
meanerr=ME(len(data[data.columns.tolist()[i]]),predexp)
meanaverr=MAE(data[data.columns.tolist()[i]],predexp)
mperr=MAPE(data[data.columns.tolist()[i]],predexp)
df2["Exponential Smoothing"].iloc[0]=meanerr
df2["Exponential Smoothing"].iloc[1]=meanaverr
df2["Exponential Smoothing"].iloc[2]=mperr
Exponentials=pd.DataFrame(forecaste)
ratio_incex=[]
ratio_incex.append(0)
for j in range(2,len(Exponentials)+1):
Ea=Exponentials.iloc[j-2]
Eb=Exponentials.iloc[j-1]
ratio_incex.append(int(((Eb-Ea)/Ea)*100))
return forecaste,ratio_incex
fore,ratio_incex=Exponentialmooth(data,0.5,predictonterm)
skf=pd.DataFrame(fore)
skf.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
skf['Model']="Exponential Smoothing"
skf['Date']=Edate
skf['RatioIncrease']=ratio_incex
skf.astype(str)
for index, i in skf.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
dates=pd.date_range(start_index1,end_index1,freq='M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
ss=pd.DataFrame(dateofterms,columns=['Date'])
dataframeforsum=pd.concat([ss])
if mov==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'Moving Average'" )
Xmdata = cur.fetchall()
Xmadata = pd.DataFrame(Xmdata)
movsummm=pd.DataFrame(Xmadata)
movsummm.columns=['Moving Average']
dataframeforsum=pd.concat([dataframeforsum,movsummm],axis=1)
if ari==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'ARIMA'" )
Xadata = cur.fetchall()
Xardata = pd.DataFrame(Xadata)
movsumma=pd.DataFrame(Xardata)
movsumma.columns=['ARIMA']
dataframeforsum=pd.concat([dataframeforsum,movsumma],axis=1)
if exp==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'Exponential Smoothing'" )
Xedata = cur.fetchall()
Xesdata = pd.DataFrame(Xedata)
exp=pd.DataFrame(Xesdata)
exp.columns=['Exponential Smoothing']
dataframeforsum=pd.concat([dataframeforsum,exp],axis=1)
if reg==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'Regression'" )
Xrdata = cur.fetchall()
Xredata = pd.DataFrame(Xrdata)
regr=pd.DataFrame(Xredata)
regr.columns=['Regression']
dataframeforsum=pd.concat([dataframeforsum,regr],axis=1)
dataframeforsum.astype(str)
from pandas.io import sql
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
dataframeforsum.to_sql(con=engine, name='summaryoutput',index=False, if_exists='replace')
engine2 = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
df2.to_sql(con=engine2, name='summaryerror',index=False, if_exists='replace')
con.commit()
cnr=con.cursor()
cnr.execute("SELECT * FROM `summaryoutput`")
sdata = cnr.fetchall()
summaryq = pd.DataFrame(sdata)
con.close()
return render_template('monthly.html',summaryq=summaryq.to_html(index=False),sayy=1,smt='Monthly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
return render_template('monthly.html',sayy=1,smt='Monthly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
##quarterly
@app.route("/quarterlyforecast",methods = ['GET','POST'])
def quarterlyforecast():
data = pd.DataFrame(Quaterdata)
a1=data.sort_values(['GDP','TotalDemand'], ascending=[True,True])# container1
a2=data.sort_values(['Pi_Exports','TotalDemand'], ascending=[True,True])# container2
a3=data.sort_values(['Market_Share','TotalDemand'], ascending=[True,True])# container3
a4=data.sort_values(['Advertisement_Expense','TotalDemand'], ascending=[True,True])# container4
# container1
df=a1[['GDP']]/3
re11 = np.array([])
res11 = np.append(re11,df)
df1=a1[['TotalDemand']]
r1 = np.array([])
r11 = np.append(r1, df1)
# top graph
tdf=data['Date'].astype(str)
tre11 = np.array([])
tres11 = np.append(tre11,tdf)
tr1 = np.array([])
tr11 = np.append(tr1, df1)
# container2
udf=a2[['Pi_Exports']]
ure11 = np.array([])
ures11 = np.append(ure11,udf)
ur1 = np.array([])
ur11 = np.append(ur1, df1)
# container3
vdf=a3[['Market_Share']]/3
vre11 = np.array([])
vres11 = np.append(vre11,vdf)
vr1 = np.array([])
vr11 = np.append(vr1, df1)
# container4
wdf=a4[['Advertisement_Expense']]
wre11 = np.array([])
wres11 = np.append(wre11,wdf)
wr1 = np.array([])
wr11 = np.append(wr1, df1)
if request.method == 'POST':
mov=0
exp=0
reg=0
ari=0
arx=0
till = request.form.get('till')
if request.form.get('moving'):
mov=1
if request.form.get('ESPO'):
exp=1
if request.form.get('regression'):
reg=1
if request.form.get('ARIMA'):
ari=1
if request.form.get('ARIMAX'):
arx=1
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS `ftech` (`mov` VARCHAR(1),`exp` VARCHAR(1), `reg` VARCHAR(1),`ari` VARCHAR(1),`arx` VARCHAR(1),`till` VARCHAR(10))")
cur.execute("DELETE FROM `ftech`")
con.commit()
cur.execute("INSERT INTO `ftech` VALUES('"+str(mov)+"','"+str(exp)+"','"+str(reg)+"','"+str(ari)+"','"+str(arx)+"','"+str(till)+"')")
con.commit()
cur.execute("CREATE TABLE IF NOT EXISTS `forecastoutputq`(`Model` VARCHAR(25),`Date` VARCHAR(10),`TotalDemand` VARCHAR(10),`RatioIncrease` VARCHAR(10),`Spain` VARCHAR(10),`Austria` VARCHAR(10),`Japan` VARCHAR(10),`Hungary` VARCHAR(10),`Germany` VARCHAR(10),`Polland` VARCHAR(10),`UK` VARCHAR(10),`France` VARCHAR(10),`Romania` VARCHAR(10),`Italy` VARCHAR(10),`Greece` VARCHAR(10),`Crotia` VARCHAR(10),`Holland` VARCHAR(10),`Finland` VARCHAR(10),`Hongkong` VARCHAR(10))")
con.commit()
cur.execute("DELETE FROM `forecastoutputq`")
con.commit()
sql = "INSERT INTO `forecastoutputq` (`Model`,`Date`,`TotalDemand`,`RatioIncrease`,`Spain`,`Austria`,`Japan`,`Hungary`,`Germany`,`Polland`,`UK`,`France`,`Romania`,`Italy`,`Greece`,`Crotia`,`Holland`,`Finland`,`Hongkong`) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
#read the monthly file and index that with time
df=data.set_index('Date')
split_point =int(0.7*len(df))
D, V = df[0:split_point],df[split_point:]
data=pd.DataFrame(D)
#Functions for ME, MAE, MAPE
#ME
def ME(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(y_true - y_pred)
#MAE
def MAE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs(y_true - y_pred))
#MAPE
def MAPE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_pred)) * 100
cur1=con.cursor()
cur1.execute("SELECT * FROM `ftech`")
ftech=pd.DataFrame(cur1.fetchall())
ari=int(ftech['ari'])
arx=int(ftech['arx'])
exp=int(ftech['exp'])
mov=int(ftech['mov'])
reg=int(ftech['reg'])
start_index1=str(D['GDP'].index[-1])
end_index1=str(ftech['till'][0])
#end_index1=indx[:4]
df2 = pd.DataFrame(data=0,index=["ME","MAE","MAPE"],columns=["Moving Average","ARIMA","Exponential Smoothing","Regression"])
if mov==1:
#2---------------simple moving average-------------------------
#################################MovingAverage#######################
list1=list()
def mavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(0,0,1))
results_ARIMA1=model1.fit(disp=0)
# start_index1 = '2017-01-01'
# end_index1 = '2022-01-01' #4 year forecast
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list1.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["Moving Average"].iloc[0]=s
df2["Moving Average"].iloc[1]=so
df2["Moving Average"].iloc[2]=son
s=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(s)+1):
a=s.iloc[j-2]
b=s.iloc[j-1]
ratio_inc.append(int(((b-a)/a)*100))
return list1,ratio_inc
print(data)
Ma_Out,ratio_incma=mavg(data)
dfs=pd.DataFrame(Ma_Out)
tdfs=dfs.T
print(tdfs)
tdfs.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
tdfs['Model']='Moving Average'
tdfs['RatioIncrease']=ratio_incma
tdfs['Date']=(tdfs.index).strftime("20%y-%m-%d")
tdfs.astype(str)
for index, i in tdfs.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if ari==1:
##--------------min errors--ARIMA (1,0,0)-----------------------------
############################for Total Demand Monthly####################################
list2=list()
def AutoRimavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(1,0,0))
results_ARIMA1=model1.fit(disp=0)
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list2.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["ARIMA"].iloc[0]=s
df2["ARIMA"].iloc[1]=so
df2["ARIMA"].iloc[2]=son
Ars=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(Ars)+1):
As=(Ars.iloc[j-2])
bs=(Ars.iloc[j-1])
ratio_inc.append(int(((As-bs)/As)*100))
return list1,ratio_inc
Arimamodel,ratio_inc=AutoRimavg(data)
Amodel=pd.DataFrame(Arimamodel)
Results=Amodel.T
Results.astype(str)
Results.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
Results['Model']="ARIMA"
Results['RatioIncrease']=ratio_inc
Results['Date']=Results.index.astype(str)
for index, i in Results.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if reg==1:
#Linear Regression
#Regression Modeling
dates=pd.date_range(start_index1,end_index1,freq='3M')
lprd=len(dates)
dateofterms= pd.PeriodIndex(freq='3M', start=start_index1, periods=lprd+1)
dofterm=dateofterms.strftime("20%y-%m-%d")
Rdate=pd.DataFrame(dofterm)
noofterms=len(dofterm)
def regression(data,V,noofterms):
#Getting length of Data Frame
lenofdf=len(data.columns.tolist())
#Getting List Of Atributes in Data Frame
listofatr=list()
listofatr=data.columns.tolist()
#making list of pred
pred=pd.DataFrame()
#now riun for each row
for i in range(0,(lenofdf)-5):
df=pd.DataFrame(data[data.columns.tolist()[i]].reset_index())
xvar=list()
for row in df[listofatr[i]]:
xvar.append(row)
df5=pd.DataFrame(xvar)
yvar=list()
for j in range(0,len(df[listofatr[i]])):
yvar.append(j)
dfss=pd.DataFrame(yvar)
clf = linear_model.LinearRegression()
clf.fit(dfss,df5)
# Make predictions using the testing set
dfv=pd.DataFrame(V[V.columns.tolist()[i]].reset_index())
k=list()
for l in range(len(df[listofatr[i]]),len(df[listofatr[i]])+len(dfv)):
k.append(l)
ks=pd.DataFrame(k)
#Future prediction
predlist=list()
for j in range(len(df[listofatr[i]]),len(df[listofatr[i]])+noofterms):
predlist.append(j)
dataframeoflenofpred=pd.DataFrame(predlist)
dateframeofpred=pd.DataFrame(clf.predict(dataframeoflenofpred))
pred=pd.concat([pred,dateframeofpred],axis=1)
#Accuracy Of the mODEL
y_pred = clf.predict(ks)
if(i==0):
meanerror=ME(dfv[listofatr[i]], y_pred)
mae=MAE(dfv[listofatr[i]], y_pred)
mape=MAPE(dfv[listofatr[i]],y_pred)
df2["Regression"].iloc[0]=meanerror
df2["Regression"].iloc[1]=mae
df2["Regression"].iloc[2]=mape
regp=pd.DataFrame(pred)
ratio_incrr=[]
ratio_incrr.append(0)
for j in range(2,len(regp)+1):
Ra=regp.iloc[j-2]
Rb=regp.iloc[j-1]
ratio_incrr.append(int(((Rb-Ra)/Ra)*100))
return pred,ratio_incrr
monthlyRegression,ratio_incrr=regression(data,V,noofterms)
r=pd.DataFrame(monthlyRegression)
r.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
r['Model']="Regression"
r['Date']=Rdate
r['RatioIncrease']=ratio_incrr
r.astype(str)
for index, i in r.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if exp==1:
#Exponential Smoothing
dates=pd.date_range(start_index1,end_index1,freq='3M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='3M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
Edate=pd.DataFrame(dateofterms)
predictonterm=len(Edate)
def exponential_smoothing(series, alpha,predictonterm):
result = [series[0]] # first value is same as series
for i in range(1,len(series)):
result.append(alpha * series[i] + (1 - alpha) * result[i-1])
preds=result[len(series)-1]#pred
actual=series[len(series)-1]#actual
forecastlist=[]
for i in range(0,predictonterm):
forecast=(alpha*actual)+((1-alpha)*preds)
forecastlist.append(forecast)
actual=preds
preds=forecast
return result,forecastlist
def Exponentialmooth(data,alpha,predicterm):
predexp=list()
forecaste=pd.DataFrame()
m=len(data.columns.tolist())
for i in range(0,m-5):
pred,forecasts=exponential_smoothing(data[data.columns.tolist()[i]],0.5,predictonterm)
ss=pd.DataFrame(forecasts)
predexp.append(pred)
forecaste=pd.concat([forecaste,ss],axis=1)
if(i==0):
meanerr=ME(len(data[data.columns.tolist()[i]]),predexp)
meanaverr=MAE(data[data.columns.tolist()[i]],predexp)
mperr=MAPE(data[data.columns.tolist()[i]],predexp)
df2["Exponential Smoothing"].iloc[0]=meanerr
df2["Exponential Smoothing"].iloc[1]=meanaverr
df2["Exponential Smoothing"].iloc[2]=mperr
Exponentials=pd.DataFrame(forecaste)
ratio_incex=[]
ratio_incex.append(0)
for j in range(2,len(Exponentials)+1):
Ea=Exponentials.iloc[j-2]
Eb=Exponentials.iloc[j-1]
ratio_incex.append(int(((Eb-Ea)/Ea)*100))
return forecaste,ratio_incex
fore,ratio_incex=Exponentialmooth(data,0.5,predictonterm)
skf=pd.DataFrame(fore)
skf.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
skf['Model']="Exponential Smoothing"
skf['Date']=Edate
skf['RatioIncrease']=ratio_incex
skf.astype(str)
for index, i in skf.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
dates=pd.date_range(start_index1,end_index1,freq='3M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='3M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
ss=pd.DataFrame(dateofterms,columns=['Date'])
dataframeforsum=pd.concat([ss])
if mov==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputq` WHERE `Model`= 'Moving Average'" )
Xmdata = cur.fetchall()
Xmadata = pd.DataFrame(Xmdata)
movsummm=pd.DataFrame(Xmadata)
movsummm.columns=['Moving Average']
dataframeforsum=pd.concat([dataframeforsum,movsummm],axis=1)
if ari==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputq` WHERE `Model`= 'ARIMA'" )
Xadata = cur.fetchall()
Xardata = pd.DataFrame(Xadata)
movsumma=pd.DataFrame(Xardata)
movsumma.columns=['ARIMA']
dataframeforsum=pd.concat([dataframeforsum,movsumma],axis=1)
if exp==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputq` WHERE `Model`= 'Exponential Smoothing'" )
Xedata = cur.fetchall()
Xesdata = pd.DataFrame(Xedata)
exp=pd.DataFrame(Xesdata)
exp.columns=['Exponential Smoothing']
dataframeforsum=pd.concat([dataframeforsum,exp],axis=1)
if reg==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputq` WHERE `Model`= 'Regression'" )
Xrdata = cur.fetchall()
Xredata = pd.DataFrame(Xrdata)
regr=pd.DataFrame(Xredata)
regr.columns=['Regression']
dataframeforsum=pd.concat([dataframeforsum,regr],axis=1)
dataframeforsum.astype(str)
from pandas.io import sql
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
dataframeforsum.to_sql(con=engine, name='summaryoutputq',index=False, if_exists='replace')
engine2 = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
df2.to_sql(con=engine2, name='summaryerror',index=False, if_exists='replace')
con.commit()
cnr=con.cursor()
cnr.execute("SELECT * FROM `summaryoutputq`")
sdata = cnr.fetchall()
summaryq = pd.DataFrame(sdata)
con.close()
return render_template('quarterly.html',summaryq=summaryq.to_html(index=False),sayy=1,smt='Quarterly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
return render_template('quarterly.html',sayy=1,smt='Quarterly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
##yearly
@app.route("/yearlyforecast",methods = ['GET','POST'])
def yearlyforecast():
data = pd.DataFrame(Yeardata)
a1=data.sort_values(['GDP','TotalDemand'], ascending=[True,True])# container1
a2=data.sort_values(['Pi_Exports','TotalDemand'], ascending=[True,True])# container2
a3=data.sort_values(['Market_Share','TotalDemand'], ascending=[True,True])# container3
a4=data.sort_values(['Advertisement_Expense','TotalDemand'], ascending=[True,True])# container4
# container1
df=a1[['GDP']]/12
re11 = np.array([])
res11 = np.append(re11,df)
df1=a1[['TotalDemand']]
r1 = np.array([])
r11 = np.append(r1, df1)
# top graph
tdf=data['Date']
vari=[]
for var in tdf:
vari.append(var[:4])
tres11 = vari
tr1 = np.array([])
tr11 = np.append(tr1, df1)
# container2
udf=a2[['Pi_Exports']]
ure11 = np.array([])
ures11 = np.append(ure11,udf)
ur1 = np.array([])
ur11 = np.append(ur1, df1)
# container3
vdf=a3[['Market_Share']]/12
vre11 = np.array([])
vres11 = np.append(vre11,vdf)
vr1 = np.array([])
vr11 = np.append(vr1, df1)
# container4
wdf=a4[['Advertisement_Expense']]
wre11 = np.array([])
wres11 = np.append(wre11,wdf)
wr1 = np.array([])
wr11 = np.append(wr1, df1)
if request.method == 'POST':
mov=0
exp=0
reg=0
ari=0
arx=0
till = request.form.get('till')
if request.form.get('moving'):
mov=1
if request.form.get('ESPO'):
exp=1
if request.form.get('regression'):
reg=1
if request.form.get('ARIMA'):
ari=1
if request.form.get('ARIMAX'):
arx=1
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS `ftech` (`mov` VARCHAR(1),`exp` VARCHAR(1), `reg` VARCHAR(1),`ari` VARCHAR(1),`arx` VARCHAR(1),`till` VARCHAR(10))")
cur.execute("DELETE FROM `ftech`")
con.commit()
cur.execute("INSERT INTO `ftech` VALUES('"+str(mov)+"','"+str(exp)+"','"+str(reg)+"','"+str(ari)+"','"+str(arx)+"','"+str(till)+"')")
con.commit()
cur.execute("CREATE TABLE IF NOT EXISTS `forecastoutputy`(`Model` VARCHAR(25),`Date` VARCHAR(10),`TotalDemand` VARCHAR(10),`RatioIncrease` VARCHAR(10),`Spain` VARCHAR(10),`Austria` VARCHAR(10),`Japan` VARCHAR(10),`Hungary` VARCHAR(10),`Germany` VARCHAR(10),`Polland` VARCHAR(10),`UK` VARCHAR(10),`France` VARCHAR(10),`Romania` VARCHAR(10),`Italy` VARCHAR(10),`Greece` VARCHAR(10),`Crotia` VARCHAR(10),`Holland` VARCHAR(10),`Finland` VARCHAR(10),`Hongkong` VARCHAR(10))")
con.commit()
cur.execute("DELETE FROM `forecastoutputy`")
con.commit()
sql = "INSERT INTO `forecastoutputy` (`Model`,`Date`,`TotalDemand`,`RatioIncrease`,`Spain`,`Austria`,`Japan`,`Hungary`,`Germany`,`Polland`,`UK`,`France`,`Romania`,`Italy`,`Greece`,`Crotia`,`Holland`,`Finland`,`Hongkong`) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
#read the monthly file and index that with time
df=data.set_index('Date')
split_point =int(0.7*len(df))
D, V = df[0:split_point],df[split_point:]
data=pd.DataFrame(D)
#Functions for ME, MAE, MAPE
#ME
def ME(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(y_true - y_pred)
#MAE
def MAE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs(y_true - y_pred))
#MAPE
def MAPE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_pred)) * 100
cur1=con.cursor()
cur1.execute("SELECT * FROM `ftech`")
ftech=pd.DataFrame(cur1.fetchall())
ari=int(ftech['ari'])
arx=int(ftech['arx'])
exp=int(ftech['exp'])
mov=int(ftech['mov'])
reg=int(ftech['reg'])
start_index1=str(D['GDP'].index[-1])
end_index1=str(ftech['till'][0])
#end_index1=indx[:4]
df2 = pd.DataFrame(data=0,index=["ME","MAE","MAPE"],columns=["Moving Average","ARIMA","Exponential Smoothing","Regression"])
if mov==1:
#2---------------simple moving average-------------------------
#################################MovingAverage#######################
list1=list()
def mavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(0,0,1))
results_ARIMA1=model1.fit(disp=0)
# start_index1 = '2017-01-01'
# end_index1 = '2022-01-01' #4 year forecast
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list1.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["Moving Average"].iloc[0]=s
df2["Moving Average"].iloc[1]=so
df2["Moving Average"].iloc[2]=son
s=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(s)+1):
a=s.iloc[j-2]
b=s.iloc[j-1]
ratio_inc.append(int(((b-a)/a)*100))
return list1,ratio_inc
print(data)
Ma_Out,ratio_incma=mavg(data)
dfs=pd.DataFrame(Ma_Out)
tdfs=dfs.T
print(tdfs)
tdfs.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
tdfs['Model']='Moving Average'
tdfs['RatioIncrease']=ratio_incma
dindex=(tdfs.index).strftime("20%y")
tdfs['Date']=(dindex)
tdfs.astype(str)
for index, i in tdfs.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if ari==1:
##--------------min errors--ARIMA (1,0,0)-----------------------------
############################for Total Demand Monthly####################################
list2=list()
def AutoRimavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(1,0,0))
results_ARIMA1=model1.fit(disp=0)
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list2.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["ARIMA"].iloc[0]=s
df2["ARIMA"].iloc[1]=so
df2["ARIMA"].iloc[2]=son
Ars=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(Ars)+1):
As=(Ars.iloc[j-2])
bs=(Ars.iloc[j-1])
ratio_inc.append(int(((As-bs)/As)*100))
return list1,ratio_inc
Arimamodel,ratio_inc=AutoRimavg(data)
Amodel=pd.DataFrame(Arimamodel)
Results=Amodel.T
Results.astype(str)
Results.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
Results['Model']="ARIMA"
Results['RatioIncrease']=ratio_inc
Results['Date']=Results.index.astype(str)
for index, i in Results.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if reg==1:
#Linear Regression
#Regression Modeling
dates=pd.date_range(start_index1,end_index1,freq='A')
lprd=len(dates)
dateofterms= pd.PeriodIndex(freq='A', start=start_index1, periods=lprd+1)
dofterm=dateofterms.strftime("20%y")
Rdate=pd.DataFrame(dofterm)
noofterms=len(dofterm)
def regression(data,V,noofterms):
#Getting length of Data Frame
lenofdf=len(data.columns.tolist())
#Getting List Of Atributes in Data Frame
listofatr=list()
listofatr=data.columns.tolist()
#making list of pred
pred=pd.DataFrame()
#now riun for each row
for i in range(0,(lenofdf)-5):
df=pd.DataFrame(data[data.columns.tolist()[i]].reset_index())
xvar=list()
for row in df[listofatr[i]]:
xvar.append(row)
df5=pd.DataFrame(xvar)
yvar=list()
for j in range(0,len(df[listofatr[i]])):
yvar.append(j)
dfss=pd.DataFrame(yvar)
clf = linear_model.LinearRegression()
clf.fit(dfss,df5)
# Make predictions using the testing set
dfv=pd.DataFrame(V[V.columns.tolist()[i]].reset_index())
k=list()
for l in range(len(df[listofatr[i]]),len(df[listofatr[i]])+len(dfv)):
k.append(l)
ks=pd.DataFrame(k)
#Future prediction
predlist=list()
for j in range(len(df[listofatr[i]]),len(df[listofatr[i]])+noofterms):
predlist.append(j)
dataframeoflenofpred=pd.DataFrame(predlist)
dateframeofpred=pd.DataFrame(clf.predict(dataframeoflenofpred))
pred=pd.concat([pred,dateframeofpred],axis=1)
#Accuracy Of the mODEL
y_pred = clf.predict(ks)
if(i==0):
meanerror=ME(dfv[listofatr[i]], y_pred)
mae=MAE(dfv[listofatr[i]], y_pred)
mape=MAPE(dfv[listofatr[i]],y_pred)
df2["Regression"].iloc[0]=meanerror
df2["Regression"].iloc[1]=mae
df2["Regression"].iloc[2]=mape
regp=pd.DataFrame(pred)
ratio_incrr=[]
ratio_incrr.append(0)
for j in range(2,len(regp)+1):
Ra=regp.iloc[j-2]
Rb=regp.iloc[j-1]
ratio_incrr.append(int(((Rb-Ra)/Ra)*100))
return pred,ratio_incrr
monthlyRegression,ratio_incrr=regression(data,V,noofterms)
r=pd.DataFrame(monthlyRegression)
r.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
r['Model']="Regression"
r['Date']=Rdate
r['RatioIncrease']=ratio_incrr
r.astype(str)
for index, i in r.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if exp==1:
#Exponential Smoothing
dates=pd.date_range(start_index1,end_index1,freq='A')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='A', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y")
Edate=pd.DataFrame(dateofterms)
predictonterm=len(Edate)
def exponential_smoothing(series, alpha,predictonterm):
result = [series[0]] # first value is same as series
for i in range(1,len(series)):
result.append(alpha * series[i] + (1 - alpha) * result[i-1])
preds=result[len(series)-1]#pred
actual=series[len(series)-1]#actual
forecastlist=[]
for i in range(0,predictonterm):
forecast=(alpha*actual)+((1-alpha)*preds)
forecastlist.append(forecast)
actual=preds
preds=forecast
return result,forecastlist
def Exponentialmooth(data,alpha,predicterm):
predexp=list()
forecaste=pd.DataFrame()
m=len(data.columns.tolist())
for i in range(0,m-5):
pred,forecasts=exponential_smoothing(data[data.columns.tolist()[i]],0.5,predictonterm)
ss=pd.DataFrame(forecasts)
predexp.append(pred)
forecaste=pd.concat([forecaste,ss],axis=1)
if(i==0):
meanerr=ME(len(data[data.columns.tolist()[i]]),predexp)
meanaverr=MAE(data[data.columns.tolist()[i]],predexp)
mperr=MAPE(data[data.columns.tolist()[i]],predexp)
df2["Exponential Smoothing"].iloc[0]=meanerr
df2["Exponential Smoothing"].iloc[1]=meanaverr
df2["Exponential Smoothing"].iloc[2]=mperr
Exponentials=pd.DataFrame(forecaste)
ratio_incex=[]
ratio_incex.append(0)
for j in range(2,len(Exponentials)+1):
Ea=Exponentials.iloc[j-2]
Eb=Exponentials.iloc[j-1]
ratio_incex.append(int(((Eb-Ea)/Ea)*100))
return forecaste,ratio_incex
fore,ratio_incex=Exponentialmooth(data,0.5,predictonterm)
skf=pd.DataFrame(fore)
skf.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
skf['Model']="Exponential Smoothing"
skf['Date']=Edate
skf['RatioIncrease']=ratio_incex
skf.astype(str)
for index, i in skf.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
dates=pd.date_range(start_index1,end_index1,freq='A')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='A', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y")
ss=pd.DataFrame(dateofterms,columns=['Date'])
dataframeforsum=pd.concat([ss])
if mov==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputy` WHERE `Model`= 'Moving Average'" )
Xmdata = cur.fetchall()
Xmadata = pd.DataFrame(Xmdata)
movsummm=pd.DataFrame(Xmadata)
movsummm.columns=['Moving Average']
dataframeforsum=pd.concat([dataframeforsum,movsummm],axis=1)
if ari==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputy` WHERE `Model`= 'ARIMA'" )
Xadata = cur.fetchall()
Xardata = pd.DataFrame(Xadata)
movsumma=pd.DataFrame(Xardata)
movsumma.columns=['ARIMA']
dataframeforsum=pd.concat([dataframeforsum,movsumma],axis=1)
if exp==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputy` WHERE `Model`= 'Exponential Smoothing'" )
Xedata = cur.fetchall()
Xesdata = pd.DataFrame(Xedata)
exp=pd.DataFrame(Xesdata)
exp.columns=['Exponential Smoothing']
dataframeforsum=pd.concat([dataframeforsum,exp],axis=1)
if reg==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputy` WHERE `Model`= 'Regression'" )
Xrdata = cur.fetchall()
Xredata = pd.DataFrame(Xrdata)
regr=pd.DataFrame(Xredata)
regr.columns=['Regression']
dataframeforsum=pd.concat([dataframeforsum,regr],axis=1)
dataframeforsum.astype(str)
from pandas.io import sql
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
dataframeforsum.to_sql(con=engine, name='summaryoutputy',index=False, if_exists='replace')
engine2 = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
df2.to_sql(con=engine2, name='summaryerror',index=False, if_exists='replace')
con.commit()
cnr=con.cursor()
cnr.execute("SELECT * FROM `summaryoutputy`")
sdata = cnr.fetchall()
summaryq = pd.DataFrame(sdata)
con.close()
return render_template('yearly.html',summaryq=summaryq.to_html(index=False),sayy=1,smt='Yearly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
return render_template('yearly.html',sayy=1,smt='Yearly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
#############################Dashboard#######################################
#yearly
@app.route('/youtgraph', methods = ['GET','POST'])
def youtgraph():
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("SELECT `Model` FROM `forecastoutputy` GROUP BY `Model`")
sfile=cur.fetchall()
global yqst
qlist=pd.DataFrame(sfile)
qlst=qlist['Model'].astype(str)
yqst=qlst.values
con.close()
return render_template('ydashboard.html',qulist=yqst)
@app.route('/youtgraph1', methods = ['GET', 'POST'])
def youtgraph1():
if request.method=='POST':
value=request.form['item']
qconn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
qcur = qconn.cursor()
qcur.execute("SELECT * FROM `demandforecastinputdata")
qsdata = qcur.fetchall()
qdata = pd.DataFrame(qsdata)
#graph1
adata=qdata['TotalDemand']
x_axis=qdata['Date'].astype(str)
#predictedgraph1
pcur = qconn.cursor()
pcur.execute("SELECT * FROM `forecastoutputy` WHERE `Model`='"+value+"'")
psdata = pcur.fetchall()
edata = pd.DataFrame(psdata)
eedata=edata['TotalDemand'].astype(float)
ldata=eedata.values
nur = qconn.cursor()
nur.execute("SELECT MIN(`Date`) AS 'MIN' FROM `forecastoutputy` WHERE `Model`='"+value+"'")
MIN=nur.fetchone()
pdata=[]
i=0
k=0
a="null"
while(x_axis[i]<MIN['MIN']):
pdata.append(a)
i=i+1
k=k+1
ata=np.concatenate((pdata,ldata),axis=0)
#x axis
fcur = qconn.cursor()
fcur.execute("SELECT `Date` FROM `demandforecastinputdata` WHERE `Date`<'"+MIN['MIN']+"'")
fsdata = fcur.fetchall()
indx = pd.DataFrame(fsdata)
indx=indx['Date']
index=np.concatenate((indx,edata['Date'].values),axis=0)
yindx=[]
for var in index:
var1 = var[:4]
yindx.append(var1)
#bargraph
bcur = qconn.cursor()
bcur.execute("SELECT * FROM `forecastoutputy` WHERE `Model`='"+value+"'")
bsdata = bcur.fetchall()
bdata = pd.DataFrame(bsdata)
btdf=bdata['Date'].astype(str)
btre11 = np.array([])
btres11 = np.append(btre11,btdf)
b1tdf1=bdata[['Spain']] #spain
b1tr1 = np.array([])
b1tr11 = np.append(b1tr1, b1tdf1)
b2tdf1=bdata[['Austria']] #austria
b2tr1 = np.array([])
b2tr11 = np.append(b2tr1, b2tdf1)
b3tdf1=bdata[['Japan']] #japan
b3tr1 = np.array([])
b3tr11 = np.append(b3tr1, b3tdf1)
b4tdf1=bdata[['Hungary']] #hungry
b4tr1 = np.array([])
b4tr11 = np.append(b4tr1, b4tdf1)
b5tdf1=bdata[['Germany']] #germany
b5tr1 = np.array([])
b5tr11 = np.append(b5tr1, b5tdf1)
b6tdf1=bdata[['TotalDemand']] #total
b6tr1 = np.array([])
b6tr11 = np.append(b6tr1, b6tdf1)
#comparisonbar
ccur = qconn.cursor()
ccur.execute("SELECT * FROM `forecastoutputy` WHERE `Model`='"+value+"'")
csdata = ccur.fetchall()
cdata = pd.DataFrame(csdata)
ctdf=cdata['Date'].astype(str)
ctre11 = np.array([])
ctres11 = np.append(ctre11,ctdf)
c1tdf1=cdata[['RatioIncrease']] #ratioincrease
c1tr1 = np.array([])
c1tr11 = np.append(c1tr1, c1tdf1)
qcur.execute("SELECT * FROM `summaryerror`")
sdata = qcur.fetchall()
mape = pd.DataFrame(sdata)
qconn.close()
return render_template('ydashboard.html',mon=value,qulist=yqst,mape=mape.to_html(index=False),say=1,pdata=ata,adata=adata.values,x_axis=yindx,frm=len(qdata)-1,to=k,x13=btres11,x14=ctres11,y13=b1tr11,y14=b2tr11,y15=b3tr11,y16=b4tr11,y17=b5tr11,y18=b6tr11,y19=c1tr11)
#monthly
@app.route('/moutgraph', methods = ['GET','POST'])
def moutgraph():
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("SELECT `Model` FROM `forecastoutput` GROUP BY `Model`")
sfile=cur.fetchall()
global mqst
qlist=pd.DataFrame(sfile)
qlst=qlist['Model'].astype(str)
mqst=qlst.values
con.close()
return render_template('mdashboard.html',qulist=mqst)
@app.route('/moutgraph1', methods = ['GET', 'POST'])
def moutgraph1():
if request.method=='POST':
value=request.form['item']
qconn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
qcur = qconn.cursor()
qcur.execute("SELECT * FROM `demandforecastinputdata")
qsdata = qcur.fetchall()
qdata = pd.DataFrame(qsdata)
#graph1
adata=qdata['TotalDemand']
x_axis=qdata['Date'].astype(str)
#predictedgraph1
pcur = qconn.cursor()
pcur.execute("SELECT * FROM `forecastoutput` WHERE `Model`='"+value+"'")
psdata = pcur.fetchall()
edata = pd.DataFrame(psdata)
eedata=edata['TotalDemand'].astype(float)
ldata=eedata.values
nur = qconn.cursor()
nur.execute("SELECT MIN(`Date`) AS 'MIN' FROM `forecastoutput` WHERE `Model`='"+value+"'")
MIN=nur.fetchone()
pdata=[]
i=0
k=0
a="null"
while(x_axis[i]<MIN['MIN']):
pdata.append(a)
i=i+1
k=k+1
ata=np.concatenate((pdata,ldata),axis=0)
#x axis
fcur = qconn.cursor()
fcur.execute("SELECT `Date` FROM `demandforecastinputdata` WHERE `Date`<'"+MIN['MIN']+"'")
fsdata = fcur.fetchall()
indx = pd.DataFrame(fsdata)
indx=indx['Date'].astype(str).values
index=np.concatenate((indx,edata['Date'].values),axis=0)
#bargraph
bcur = qconn.cursor()
bcur.execute("SELECT * FROM `forecastoutput` WHERE `Model`='"+value+"'")
bsdata = bcur.fetchall()
bdata = pd.DataFrame(bsdata)
btdf=bdata['Date'].astype(str)
btre11 = np.array([])
btres11 = np.append(btre11,btdf)
b1tdf1=bdata[['Spain']] #spain
b1tr1 = np.array([])
b1tr11 = np.append(b1tr1, b1tdf1)
b2tdf1=bdata[['Austria']] #austria
b2tr1 = np.array([])
b2tr11 = np.append(b2tr1, b2tdf1)
b3tdf1=bdata[['Japan']] #japan
b3tr1 = np.array([])
b3tr11 = np.append(b3tr1, b3tdf1)
b4tdf1=bdata[['Hungary']] #hungry
b4tr1 = np.array([])
b4tr11 = np.append(b4tr1, b4tdf1)
b5tdf1=bdata[['Germany']] #germany
b5tr1 = np.array([])
b5tr11 = np.append(b5tr1, b5tdf1)
b6tdf1=bdata[['TotalDemand']] #total
b6tr1 = np.array([])
b6tr11 = np.append(b6tr1, b6tdf1)
#comparisonbar
ccur = qconn.cursor()
ccur.execute("SELECT * FROM `forecastoutput` WHERE `Model`='"+value+"'")
csdata = ccur.fetchall()
cdata = pd.DataFrame(csdata)
ctdf=cdata['Date'].astype(str)
ctre11 = np.array([])
ctres11 = np.append(ctre11,ctdf)
c1tdf1=cdata[['RatioIncrease']] #ratioincrease
c1tr1 = np.array([])
c1tr11 = np.append(c1tr1, c1tdf1)
qcur.execute("SELECT * FROM `summaryerror`")
sdata = qcur.fetchall()
mape = pd.DataFrame(sdata)
qconn.close()
return render_template('mdashboard.html',mon=value,qulist=mqst,mape=mape.to_html(index=False),say=1,pdata=ata,adata=adata.values,x_axis=index,frm=len(qdata)-1,to=k,x13=btres11,x14=ctres11,y13=b1tr11,y14=b2tr11,y15=b3tr11,y16=b4tr11,y17=b5tr11,y18=b6tr11,y19=c1tr11)
#quarterly
@app.route('/qoutgraph', methods = ['GET','POST'])
def qoutgraph():
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("SELECT `Model` FROM `forecastoutputq` GROUP BY `Model`")
sfile=cur.fetchall()
global qst
qlist=pd.DataFrame(sfile)
qlst=qlist['Model'].astype(str)
qst=qlst.values
con.close()
return render_template('qdashboard.html',qulist=qst)
@app.route('/qoutgraph1', methods = ['GET', 'POST'])
def qoutgraph1():
if request.method=='POST':
value=request.form['item']
qconn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
qcur = qconn.cursor()
qcur.execute("SELECT * FROM `demandforecastinputdata")
qsdata = qcur.fetchall()
qdata = pd.DataFrame(qsdata)
#graph1
adata=qdata['TotalDemand']
x_axis=qdata['Date'].astype(str)
#predictedgraph1
pcur = qconn.cursor()
pcur.execute("SELECT * FROM `forecastoutputq` WHERE `Model`='"+value+"'")
psdata = pcur.fetchall()
edata = pd.DataFrame(psdata)
eedata=edata['TotalDemand'].astype(float)
ldata=eedata.values
nur = qconn.cursor()
nur.execute("SELECT MIN(`Date`) AS 'MIN' FROM `forecastoutputq` WHERE `Model`='"+value+"'")
MIN=nur.fetchone()
pdata=[]
i=0
k=0
a="null"
while(x_axis[i]<MIN['MIN']):
pdata.append(a)
i=i+1
k=k+1
ata=np.concatenate((pdata,ldata),axis=0)
#x axis
fcur = qconn.cursor()
fcur.execute("SELECT `Date` FROM `demandforecastinputdata` WHERE `Date`<'"+MIN['MIN']+"'")
fsdata = fcur.fetchall()
indx = pd.DataFrame(fsdata)
indx=indx['Date'].astype(str).values
index=np.concatenate((indx,edata['Date'].values),axis=0)
#bargraph
bcur = qconn.cursor()
bcur.execute("SELECT * FROM `forecastoutputq` WHERE `Model`='"+value+"'")
bsdata = bcur.fetchall()
bdata = pd.DataFrame(bsdata)
btdf=bdata['Date'].astype(str)
btre11 = np.array([])
btres11 = np.append(btre11,btdf)
b1tdf1=bdata[['Spain']] #spain
b1tr1 = np.array([])
b1tr11 = np.append(b1tr1, b1tdf1)
b2tdf1=bdata[['Austria']] #austria
b2tr1 = np.array([])
b2tr11 = np.append(b2tr1, b2tdf1)
b3tdf1=bdata[['Japan']] #japan
b3tr1 = np.array([])
b3tr11 = np.append(b3tr1, b3tdf1)
b4tdf1=bdata[['Hungary']] #hungry
b4tr1 = np.array([])
b4tr11 = np.append(b4tr1, b4tdf1)
b5tdf1=bdata[['Germany']] #germany
b5tr1 = np.array([])
b5tr11 = np.append(b5tr1, b5tdf1)
b6tdf1=bdata[['TotalDemand']] #total
b6tr1 = np.array([])
b6tr11 = np.append(b6tr1, b6tdf1)
#comparisonbar
ccur = qconn.cursor()
ccur.execute("SELECT * FROM `forecastoutputq` WHERE `Model`='"+value+"'")
csdata = ccur.fetchall()
cdata = pd.DataFrame(csdata)
ctdf=cdata['Date'].astype(str)
ctre11 = np.array([])
ctres11 = np.append(ctre11,ctdf)
c1tdf1=cdata[['RatioIncrease']] #ratioincrease
c1tr1 = np.array([])
c1tr11 = np.append(c1tr1, c1tdf1)
qcur.execute("SELECT * FROM `summaryerror`")
sdata = qcur.fetchall()
mape = pd.DataFrame(sdata)
qconn.close()
return render_template('qdashboard.html',mon=value,qulist=qst,mape=mape.to_html(index=False),say=1,pdata=ata,adata=adata.values,x_axis=index,frm=len(qdata)-1,to=k,x13=btres11,x14=ctres11,y13=b1tr11,y14=b2tr11,y15=b3tr11,y16=b4tr11,y17=b5tr11,y18=b6tr11,y19=c1tr11)
@app.route("/yearlysimulation",methods = ['GET','POST'])
def yearlysimulation():
if request.method == 'POST':
gdp=0
pi=0
ms=0
adv=0
gdp_dis=request.form.get('gdp_dis')
pi_dis=request.form.get('pi_dis')
ms_dis=request.form.get('ms_dis')
adv_dis=request.form.get('adv_dis')
min=request.form.get('min')
max=request.form.get('max')
mue=request.form.get('mue')
sig=request.form.get('sig')
cval=request.form.get('cval')
min1=request.form.get('min1')
max1=request.form.get('max1')
mue1=request.form.get('mue1')
sig1=request.form.get('sig1')
cval1=request.form.get('cval1')
min2=request.form.get('min2')
max2=request.form.get('max2')
mue2=request.form.get('mue2')
sig2=request.form.get('sig2')
cval2=request.form.get('cval2')
min3=request.form.get('min3')
max3=request.form.get('max3')
mue3=request.form.get('mue3')
sig3=request.form.get('sig3')
cval3=request.form.get('cval3')
itr= int(request.form.get('itr'))
frm = request.form.get('from')
sfrm=int(frm[:4])
to = request.form.get('to')
sto=int(to[:4])
kwargs={}
atrtable=[]
if request.form.get('gdp'):
gdp=1
atrtable.append('Gdp')
if gdp_dis == 'gdp_dis1':
min=request.form.get('min')
max=request.form.get('max')
kwargs['Gdp_dis']='Uniform'
kwargs['gdpvalues']=[min,max]
if gdp_dis == 'gdp_dis2':
mue=request.form.get('mue')
sig=request.form.get('sig')
kwargs['Gdp_dis']='Normal'
kwargs['gdpvalues']=[mue,sig]
if gdp_dis == 'gdp_dis3':
kwargs['Gdp_dis']='Random'
pass
if gdp_dis == 'gdp_dis4':
cval=request.form.get('cval')
kwargs['Gdp_dis']='Constant'
kwargs['gdpvalues']=[cval]
if request.form.get('pi'):
pi=1
atrtable.append('Pi')
if pi_dis == 'pi_dis1':
min1=request.form.get('min1')
max1=request.form.get('max1')
kwargs['Pi_dis']='Uniform'
kwargs['pivalues']=[min1,max1]
if pi_dis == 'pi_dis2':
mue1=request.form.get('mue1')
sig1=request.form.get('sig1')
kwargs['Pi_dis']='Normal'
kwargs['pivalues']=[mue1,sig1]
if pi_dis == 'pi_dis3':
kwargs['Pi_dis']='Random'
pass
if pi_dis == 'pi_dis4':
cval1=request.form.get('cval1')
kwargs['Pi_dis']='Constant'
kwargs['pivalues']=[cval1]
if request.form.get('ms'):
ms=1
atrtable.append('Ms')
if ms_dis == 'ms_dis1':
min=request.form.get('min2')
max=request.form.get('max2')
kwargs['Ms_dis']='Uniform'
kwargs['msvalues']=[min2,max2]
if ms_dis == 'ms_dis2':
mue=request.form.get('mue2')
sig=request.form.get('sig2')
kwargs['Ms_dis']='Normal'
kwargs['msvalues']=[mue2,sig2]
if ms_dis == 'ms_dis3':
kwargs['Ms_dis']='Random'
pass
if ms_dis == 'ms_dis4':
cval=request.form.get('cval2')
kwargs['Ms_dis']='Constant'
kwargs['msvalues']=[cval2]
if request.form.get('adv'):
adv=1
atrtable.append('Adv')
if adv_dis == 'adv_dis1':
min=request.form.get('min3')
max=request.form.get('max3')
kwargs['Adv_dis']='Uniform'
kwargs['advvalues']=[min3,max3]
if adv_dis == 'adv_dis2':
mue=request.form.get('mue3')
sig=request.form.get('sig3')
kwargs['Adv_dis']='Normal'
kwargs['advvalues']=[mue3,sig3]
if adv_dis == 'adv_dis3':
kwargs['Adv_dis']='Random'
pass
if adv_dis == 'adv_dis4':
cval=request.form.get('cval3')
kwargs['Adv_dis']='Constant'
kwargs['advvalues']=[cval3]
#print(kwargs)
#print(atrtable)
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS `stech` (`gdp` VARCHAR(1),`pi` VARCHAR(1), `ms` VARCHAR(1),`adv` VARCHAR(1),`itr` VARCHAR(5),`sfrm` VARCHAR(10),`sto` VARCHAR(10))")
cur.execute("DELETE FROM `stech`")
con.commit()
cur.execute("INSERT INTO `stech` VALUES('"+str(gdp)+"','"+str(pi)+"','"+str(ms)+"','"+str(adv)+"','"+str(itr)+"','"+str(sfrm)+"','"+str(sto)+"')")
con.commit()
data = pd.DataFrame(Yeardata)
#print(data)
data.columns
xvar=pd.concat([data['GDP'],data['Pi_Exports'],data['Market_Share'],data['Advertisement_Expense']],axis=1)
yvar=pd.DataFrame(data['TotalDemand'])
regr = linear_model.LinearRegression()
regr.fit(xvar,yvar)
# predict=regr.predict(xvar)
#Error Measures
def ME(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(y_true - y_pred)
#MAE
def MAE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs(y_true - y_pred))
#MAPE
def MAPE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_pred)) * 100
def sim(iteration,data,startyear,endyear,atrtable,Gdp_dis=None,gdpvalues=None,Adv_dis=None,advvalues=None,Ms_dis=None,msvalues=None,Pi_dis=None,pivalues=None):
preddata=pd.DataFrame()
simdata=pd.DataFrame()
#Errordf=pd.DataFrame()
Errormsr=pd.DataFrame()
date=pd.date_range(start=pd.datetime(startyear, 1, 1), end=pd.datetime(endyear+1, 1, 1),freq='A')
date=pd.DataFrame(date.strftime("%Y"))
#Fetching The Orignal Data Of Available Years of the Orignal Data That We Have Actually
m=len(date)
Arrayofdates=data['Date']
vari=[]
for var in Arrayofdates:
vari.append(var[:4])
Arrayofdates=pd.DataFrame(vari)
dates=[]
Fetchdata=[]
for i in range(0,m):
years=date.loc[i]
for j in range(0,len(Arrayofdates)):
if int(Arrayofdates.loc[j])==int(years):
da=data['TotalDemand'].loc[j]
Fetchdata.append(da) #Gives Data In the Given Range That we have actually
dates.extend(years) #Gives Years that we have data
for i in range(0,iteration):
df=pd.DataFrame()
#for The Gdp
S='flag'
for row in atrtable:
if row=='Gdp':
S='Gdp'
if S=='Gdp':
for row in Gdp_dis:
if row=='Normal':
gdpdf=pd.DataFrame(np.random.normal(gdpvalues[0],gdpvalues[1],m))
elif row=='Uniform':
gdpdf=pd.DataFrame(np.random.normal(gdpvalues[0],gdpvalues[1],m))
elif row=='Constant':
gdpdf=pd.DataFrame(np.random.choice([gdpvalues[0]],m))
else:
gdpdf=pd.DataFrame(np.random.uniform(-4,4,m))
else:
gdpdf=pd.DataFrame(np.random.uniform(0,0,m))
# for the pi dataframe
O='flag'
for row in atrtable:
if row=='Pi':
O='Pi'
if O=='Pi':
for row in Pi_dis:
if row=='Normal':
pidf=pd.DataFrame(np.random.normal(pivalues[0],pivalues[1],m))
elif row=='Uniform':
pidf=pd.DataFrame(np.random.normal(pivalues[0],pivalues[1],m))
elif row=='Constant':
pidf=pd.DataFrame(np.random.choice([pivalues[0]],m))
else:
pidf=pd.DataFrame(np.random.random_integers(80,120,m))
else:
pidf=pd.DataFrame(np.random.uniform(0,0,m))
#for the Adv Dataframe
N='flag'
for row in atrtable:
if row=='Adv':
N='Adv'
if N=='Adv':
for row in Adv_dis:
if row=='Normal':
advdf=pd.DataFrame(np.random.normal(advvalues[0],advvalues[1],m))
elif row=='Uniform':
advdf=pd.DataFrame(np.random.normal(advvalues[0],advvalues[1],m))
elif row=='Constant':
advdf=pd.DataFrame(np.random.choice([advvalues[0]],m))
else:
advdf=pd.DataFrame(np.random.random_integers(500000,1000000,m))
else:
advdf=pd.DataFrame(np.random.uniform(0,0,m))
#for the Ms dataframe
U='flag'
for row in atrtable:
if row=='Ms':
U='Ms'
if U=='Ms':
for row in Ms_dis:
if row=='Normal':
msdf=pd.DataFrame(np.random.normal(msvalues[0],msvalues[1],m))
elif row=='Uniform':
msdf=pd.DataFrame(np.random.normal(msvalues[0],msvalues[1],m))
elif row=='Constant':
msdf=pd.DataFrame(np.random.choice([msvalues[0]],m))
else:
msdf=pd.DataFrame(np.random.uniform(0.1,0.5,m))
else:
msdf=pd.DataFrame(np.random.uniform(0,0,m))
#Concatenating All the dataframes for Simulation Data
df=pd.concat([gdpdf,pidf,msdf,advdf],axis=1)
simid=pd.DataFrame(np.random.choice([i+1],m))
dd=pd.concat([simid,gdpdf,pidf,advdf,msdf],axis=1)
dd.columns=['Year','Gdp','Pi','Adv','Ms']
simdata=pd.concat([simdata,dd],axis=0)
#Predicting the Data And store in pred data through onhand Regression Method
dfs=pd.DataFrame(regr.predict(df))
datatable=pd.concat([simid,date,dfs],axis=1)
datatable.columns=['simid','Year','Total_Demand(Tonnes)']
preddata=pd.concat([datatable,preddata],axis=0)
datas=list()
#Geting Data With Respective Dates
# print(datatable)
for row in dates:
# print(dates)
datas.extend(datatable.loc[datatable['Year'] ==row, 'Total_Demand(Tonnes)'])
kkk=pd.DataFrame(datas)
me=ME(Fetchdata,kkk)
mae=MAE(Fetchdata,kkk)
mape=MAPE(Fetchdata,kkk)
dfe=pd.DataFrame([me,mae,mape],index=['ME','MAE','MAPE']).T
Errormsr=pd.concat([Errormsr,dfe],axis=0).reset_index(drop=True)
return preddata,simdata,Errormsr
preddata,simdata,Errormsr=sim(itr,data,sfrm,sto,atrtable,**kwargs)
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
preddata.to_sql(con=engine, name='predicteddata',index=False, if_exists='replace')
engine2 = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
simdata.to_sql(con=engine2, name='simulationdata',index=False, if_exists='replace')
con.commit()
engine3 = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Errormsr.to_sql(con=engine3, name='simerror',index=False, if_exists='replace')
con.commit()
cnr=con.cursor()
cnr.execute("SELECT * FROM `simerror`")
sdata = cnr.fetchall()
simerror = pd.DataFrame(sdata)
con.close()
return render_template('ysimulation.html',sayy=1,simerror=simerror.to_html(index=False))
return render_template('ysimulation.html')
##PROCURMENT PLANNING
@app.route('/procurementplanning')
def procurementplanning():
return render_template('vendorselection_criterianumberask.html')
@app.route("/criteriagenerate", methods=['GET','POST'])
def criteriagenerate():
if request.method == 'POST':
global cnmbr
global vnmbr
cnmbr = int(request.form['cnmbr'])
vnmbr = int(request.form['vnmbr'])
if cnmbr == 0 or vnmbr==0:
return render_template('criterianumberask.html',warning='Data Invalid')
cmainlist=[]
global cnames
cnames = []
for i in range (1,cnmbr+1):
lst=[]
name='cname'+str(i)
lst.append(i)
lst.append(name)
cmainlist.append(lst)
cnames.append(name)
vmainlist=[]
global vnames
vnames = []
for i in range (1,vnmbr+1):
lst=[]
name='vname'+str(i)
lst.append(i)
lst.append(name)
vmainlist.append(lst)
vnames.append(name)
return render_template('vendorselection_criteriagenerate.html',cmainlist=cmainlist,vmainlist=vmainlist)
return render_template('vendorselection_criterianumberask.html')
@app.route("/criteriagenerated", methods=['GET','POST'])
def criteriagenerated():
if request.method == 'POST':
global criterianames
criterianames=[]
for name in cnames:
criterianame = request.form[name]
criterianames.append(criterianame)
global vendornames
vendornames=[]
for name in vnames:
vendorname = request.form[name]
vendornames.append(vendorname)
mcrlst=[]
cn=len(criterianames)
k=1
global maincriteriaoption
maincriteriaoption=[]
global maincritriacri
maincritriacri=[]
for i in range(cn-1):
for j in range (i+1,cn):
cri='criteriaorder'+str(k)
opt='coption'+str(k)
crlst=[k,cri,criterianames[i],criterianames[j],opt]
mcrlst.append(crlst)
k=k+1
maincriteriaoption.append(opt)
maincritriacri.append(cri)
mvrlst=[]
vn=len(vendornames)
k=1
global mainvendoroption
mainvendoroption=[]
global mainvendorcri
mainvendorcri=[]
for z in criterianames:
mvrlst1=[]
vcri=[]
vopt=[]
for i in range(vn-1):
for j in range (i+1,vn):
cri='vendororder'+z+str(k)
opt='voption'+z+str(k)
vrlst=[k,cri,vendornames[i],vendornames[j],opt]
mvrlst1.append(vrlst)
k=k+1
vcri.append(cri)
vopt.append(opt)
mvrlst.append(mvrlst1)
mainvendorcri.append(vcri)
mainvendoroption.append(vopt)
return render_template('vendorselection_maincriteria.html',mcrlst=mcrlst,mvrlst=mvrlst)
return render_template('vendorselection_criteriagenerated.html')
def tablecreator(imp,val,crit):
n=len(imp)
for i in range(n):
if imp[i]==1:
val[i]=float(1/val[i])
fdata=pd.DataFrame(columns=[crit],index=[crit])
i=0
k=0
for index in fdata.index:
j=0
for columns in fdata.columns:
if i==j:
fdata[index][columns]=1
if i<j:
fdata[index][columns]=round((float(val[k])),2)
fdata[columns][index]=round((1/val[k]),2)
k=k+1
j=j+1
i=i+1
return fdata
@app.route("/criteriaread", methods=['GET','POST'])
def criteriaread():
if request.method == 'POST':
importances = []
values = []
for name1 in maincritriacri:
imp = int(request.form[name1])
importances.append(imp)
for name2 in maincriteriaoption:
val = int(request.form[name2])
values.append(val)
#global maincriteriadata
maincriteriadata=tablecreator(importances,values,criterianames)
mainimportances=[]
for crioption in mainvendorcri:
importance=[]
for option1 in crioption:
impc = int(request.form[option1])
importance.append(impc)
mainimportances.append(importance)
mainvalues=[]
for vendoroption in mainvendoroption:
vvalues=[]
for option2 in vendoroption:
valuev = int(request.form[option2])
vvalues.append(valuev)
mainvalues.append(vvalues)
maindf=[]
for z in range(len(criterianames)):
df=tablecreator(mainimportances[z],mainvalues[z],vendornames)
maindf.append(df)
dictmain={'crit':maincriteriadata}
names=criterianames
dfs=maindf
dictionary=dict((n,d) for (n,d) in zip(names,dfs))
def ahpmain(dictmain):
global wt_Crit
wt_Crit=[]
key=[]
key=list(dictmain.keys())
for i in key:
Crit=np.dot(dictmain[i],dictmain[i])
row_sum=[]
for j in range(len(Crit)):
row_sum.append(sum(Crit[j]))
wt_Crit.append([s/sum(row_sum) for s in row_sum])
Crit=[]
return wt_Crit
def ahp(dictmain,dictionary):
global output
main= ahpmain(dictmain)
submain= ahpmain(dictionary)
dd=pd.DataFrame(submain).T
df=pd.DataFrame(main).T
output=np.dot(dd,df)
return output,dd
yaxis,dd=ahp(dictmain,dictionary)
yax=pd.DataFrame(yaxis,index=vendornames,columns=['Score']).sort_values('Score',ascending=False).T
ynames=yax.columns
yval=yax.T.values
dd.index=vendornames
dd.columns=names
dd=dd.T
opq23=[]
for column in dd.columns:
opq21=[]
opq22=[]
opq21.append(column)
for val in dd[column]:
opq22.append(val)
opq21.append(opq22)
opq23.append(opq21)
return render_template('vendorselection_ahp_final_output.html',ynames=ynames,yval=yval,dd=opq23,names=names)
return render_template('vendorselection_criteriagenerated.html')
#DETERMINISTIC STARTS
@app.route("/spt")
def spt():
return render_template('SinglePeriod.html')
@app.route("/ppbreak")
def ppbreak():
return render_template('pbreak.html')
@app.route('/pbrk', methods=['GET','POST'])
def pbrk():
return render_template('pbrk.html')
@app.route('/eoq', methods=['GET','POST'])
def eoq():
##setUpCost::setting up cost prior(>>setUpCost;<<moving rate)
AnnulaUnitsDemand=100##purchase demand of product per year
FixedCost=500 ##cost fixed for the product
AnnHoldingcost=0.25 ##remaining goods cost
UnitCost=445 ##purchasing cost
LeadTime=10 ##time b/w initiation and completion of a production process.
SafetyStock=100##extra stock
if request.method == 'POST':
AnnulaUnitsDemand= request.form['AnnulaUnitsDemand']
FixedCost=request.form['FixedCost']
AnnHoldingcost=request.form['AnnHoldingcost']
UnitCost=request.form['UnitCost']
LeadTime=request.form['LeadTime']
SafetyStock=request.form['SafetyStock']
AnnulaUnitsDemand=float(AnnulaUnitsDemand)
FixedCost=float(FixedCost)
AnnHoldingcost=float(AnnHoldingcost)
UnitCost=float(UnitCost)
LeadTime=float(LeadTime)
SafetyStock=float(SafetyStock)
sgap=1
pgap=1
HoldingCost=AnnHoldingcost*UnitCost
EOQ=round((math.sqrt((2*AnnulaUnitsDemand*FixedCost)/(HoldingCost*pgap))*sgap),2)
REOQ=round((math.sqrt((2*AnnulaUnitsDemand*FixedCost)/(HoldingCost*pgap))*sgap),0)
totOrderCost=round((FixedCost*AnnulaUnitsDemand/EOQ),2)
totHoldCost=round(((HoldingCost*EOQ*pgap)/2),2)
TotalCost=round((totOrderCost+totHoldCost),2)
NumOrders=round((AnnulaUnitsDemand/EOQ),2)
OrderTime=round((365/NumOrders),2)
ReorderPoint=round((((AnnulaUnitsDemand/365)*LeadTime)+SafetyStock),0)
count=round((EOQ*.75),0)
qtylist1=[]
hclist=[]
sclist=[]
mtlist=[]
tclist=[]
while (count < EOQ):
qtylist1.append(count)
hclist.append(round((count/2*HoldingCost),2))
sclist.append(round((AnnulaUnitsDemand/count*FixedCost),2))
mtlist.append(round((AnnulaUnitsDemand*UnitCost),2))
tclist.append(round((count/2*HoldingCost+AnnulaUnitsDemand/count*FixedCost),2))
count +=2
qtylist1.append(EOQ)
hclist.append(totHoldCost)
sclist.append(totOrderCost)
tclist.append(totHoldCost+totOrderCost)
while (count < (EOQ*2)):
qtylist1.append(count)
hclist.append(round((count/2*HoldingCost),2))
sclist.append(round((AnnulaUnitsDemand/count*FixedCost),2))
mtlist.append(round((AnnulaUnitsDemand*UnitCost),2))
tclist.append(round((count/2*HoldingCost+AnnulaUnitsDemand/count*FixedCost),2))
count +=2
val=0
for i in range(len(tclist)):
if(EOQ==qtylist1[i]):
val=i
# sstock=int(math.sqrt((LeadTime^2)+(int(ReorderPoint)^2)))
return render_template('eoq.html',NumOrders=NumOrders,OrderTime=OrderTime,
ReorderPoint=ReorderPoint,HoldCost=totHoldCost,TotalCost=TotalCost,
EOQ=EOQ,REOQ=REOQ,
sclist=sclist,hclist=hclist,tclist=tclist,val=val,qtylist1=qtylist1,
AnnulaUnitsDemand=AnnulaUnitsDemand,FixedCost=FixedCost,
AnnHoldingcost=AnnHoldingcost,UnitCost=UnitCost,LeadTime=LeadTime,
SafetyStock=SafetyStock)
########################EEEEppppppppppQQQQQQ############
########################EEEEppppppppppQQQQQQ############
@app.route('/eproduction', methods=['GET','POST'])
def eproduction():
AnnulaUnitsDemand=100
Prodrate=125
FixedCost=500
AnnHoldingcost=0.1
UnitCost=25000
LeadTime=10
SafetyStock=100
if request.method == 'POST':
AnnulaUnitsDemand= request.form['AnnulaUnitsDemand']
Prodrate=request.form['Prodrate']
FixedCost=request.form['FixedCost']
AnnHoldingcost=request.form['AnnHoldingcost']
UnitCost=request.form['UnitCost']
LeadTime=request.form['LeadTime']
SafetyStock=request.form['SafetyStock']
AnnulaUnitsDemand=int(AnnulaUnitsDemand)
Prodrate=int(Prodrate)
FixedCost=int(FixedCost)
AnnHoldingcost=float(AnnHoldingcost)
UnitCost=int(UnitCost)
LeadTime=int(LeadTime)
SafetyStock=int(SafetyStock)
if(Prodrate<=AnnulaUnitsDemand):
return render_template('eproduction.html',warning='Production date should not be least than Annual Demand',
AnnulaUnitsDemand=AnnulaUnitsDemand,FixedCost=FixedCost,
AnnHoldingcost=AnnHoldingcost,UnitCost=UnitCost,Prodrate=Prodrate,
LeadTime=LeadTime,SafetyStock=SafetyStock
)
pgap=round((1-(AnnulaUnitsDemand/Prodrate)),2)
HoldingCost=float(AnnHoldingcost*UnitCost)
EOQ=round((math.sqrt((2*AnnulaUnitsDemand*FixedCost)/(HoldingCost*pgap))),2)
REOQ=round((math.sqrt((2*AnnulaUnitsDemand*FixedCost)/(HoldingCost*pgap))),0)
totOrderCost=round((FixedCost*AnnulaUnitsDemand/EOQ),2)
totHoldCost=round(((HoldingCost*EOQ*pgap)/2),2)
TotalCost=round((totOrderCost+totHoldCost),2)
NumOrders=round((AnnulaUnitsDemand/EOQ),2)
OrderTime=round((365/NumOrders),2)
ReorderPoint=round((((AnnulaUnitsDemand/365)*LeadTime)+SafetyStock),0)
count=EOQ*.75
qtylist1=[]
hclist=[]
sclist=[]
mtlist=[]
tclist=[]
while (count < EOQ):
qtylist1.append(int(count))
hclist.append(round((count/2*HoldingCost*pgap),2))
sclist.append(round((AnnulaUnitsDemand/count*FixedCost),2))
mtlist.append(round((AnnulaUnitsDemand*UnitCost),2))
tclist.append(round(((count/2*HoldingCost*pgap+AnnulaUnitsDemand/count*FixedCost)),2))
count +=2
qtylist1.append(EOQ)
hclist.append(totHoldCost)
sclist.append(totOrderCost)
tclist.append(totOrderCost+totHoldCost)
while (count < (EOQ*1.7)):
qtylist1.append(int(count))
hclist.append(round((count/2*HoldingCost*pgap),2))
sclist.append(round((AnnulaUnitsDemand/count*FixedCost),2))
mtlist.append(round((AnnulaUnitsDemand*UnitCost),2))
tclist.append(round(((count/2*HoldingCost*pgap+AnnulaUnitsDemand/count*FixedCost)),2))
count +=2
val=0
for i in range(len(tclist)):
if(EOQ==qtylist1[i]):
val=i
return render_template('eproduction.html',NumOrders=NumOrders,OrderTime=OrderTime,
ReorderPoint=ReorderPoint,HoldCost=totHoldCost,TotalCost=TotalCost,
EOQ=EOQ,REOQ=REOQ,
sclist=sclist,hclist=hclist,tclist=tclist,val=val,qtylist1=qtylist1,
AnnulaUnitsDemand=AnnulaUnitsDemand,FixedCost=FixedCost,
AnnHoldingcost=AnnHoldingcost,UnitCost=UnitCost,Prodrate=Prodrate,
LeadTime=LeadTime,SafetyStock=SafetyStock
)
######################EEEEppppppppppQQQQQQ############
######################EEEEppppppppppQQQQQQ############
@app.route('/eoq_backorders', methods=['GET','POST'])
def eoq_backorders():
AnnulaUnitsDemand=12000
shortcost=1.1
FixedCost=8000
AnnHoldingcost=0.3
UnitCost=1
LeadTime=10
SafetyStock=100
if request.method == 'POST':
AnnulaUnitsDemand= request.form['AnnulaUnitsDemand']
shortcost=request.form['shortcost']
FixedCost=request.form['FixedCost']
AnnHoldingcost=request.form['AnnHoldingcost']
UnitCost=request.form['UnitCost']
LeadTime=request.form['LeadTime']
SafetyStock=request.form['SafetyStock']
AnnulaUnitsDemand=int(AnnulaUnitsDemand)
shortcost=int(shortcost)
FixedCost=int(FixedCost)
AnnHoldingcost=float(AnnHoldingcost)
UnitCost=int(UnitCost)
LeadTime=int(LeadTime)
SafetyStock=int(SafetyStock)
HoldingCost=float(AnnHoldingcost*UnitCost)
sgap=(shortcost+HoldingCost)/shortcost
EOQ=round((math.sqrt((2*AnnulaUnitsDemand*FixedCost)/HoldingCost))*(math.sqrt(sgap)),2)
REOQ=round(math.sqrt((2*AnnulaUnitsDemand*FixedCost)/(HoldingCost)*sgap),0)
totbackorder=EOQ*(HoldingCost/(shortcost+HoldingCost))
totOrderCost=round(((FixedCost*AnnulaUnitsDemand)/EOQ),2)
totHoldCost=round(((HoldingCost*((EOQ-totbackorder)**2))/(2*EOQ)),2)
totshortcost=round((shortcost*(totbackorder**2)/(2*EOQ)),2)
TotalCost=round((totOrderCost+totHoldCost+totshortcost),2)
NumOrders=round((AnnulaUnitsDemand/EOQ),2)
OrderTime=round((365/NumOrders),2)
ReorderPoint=round((((AnnulaUnitsDemand/365)*LeadTime)+SafetyStock),0)
count= EOQ*.75
qtylist1=[]
hclist=[]
sclist=[]
mtlist=[]
shlist=[]
tclist=[]
while (count < EOQ):
qtylist1.append(int((count)))
hclist.append(round(((HoldingCost*((count-totbackorder)**2))/(2*count)),2))
sclist.append(round((AnnulaUnitsDemand/count*FixedCost),2))
mtlist.append(round((AnnulaUnitsDemand*UnitCost),2))
shlist.append(round((shortcost*((totbackorder)**2)/(2*count)),2))
tclist.append(round(((((HoldingCost*((count-totbackorder)**2))/(2*count))+AnnulaUnitsDemand/count*FixedCost)+shortcost*((totbackorder)**2)/(2*count)),2))
count +=2
qtylist1.append(EOQ)
hclist.append(totHoldCost)
sclist.append(totOrderCost)
shlist.append(totshortcost)
tclist.append(totOrderCost+totshortcost+totHoldCost)
while (count < (EOQ*1.7)):
qtylist1.append(int((count)))
hclist.append(round(((HoldingCost*((count-totbackorder)**2))/(2*count)),2))
sclist.append(round((AnnulaUnitsDemand/count*FixedCost),2))
mtlist.append(round((AnnulaUnitsDemand*UnitCost),2))
shlist.append(round((shortcost*((totbackorder)**2)/(2*count)),2))
tclist.append(round(((((HoldingCost*((count-totbackorder)**2))/(2*count))+AnnulaUnitsDemand/count*FixedCost)+shortcost*((totbackorder)**2)/(2*count)),2))
count +=2
val=0
for i in range(len(tclist)):
if(EOQ==qtylist1[i]):
val=i
return render_template('eoq_backorders.html',NumOrders=NumOrders,OrderTime=OrderTime,
ReorderPoint=ReorderPoint,HoldCost=totHoldCost,TotalCost=TotalCost,
EOQ=EOQ,REOQ=REOQ,
shlist=shlist,sclist=sclist,hclist=hclist,tclist=tclist,val=val,qtylist1=qtylist1,
AnnulaUnitsDemand=AnnulaUnitsDemand,FixedCost=FixedCost,
AnnHoldingcost=AnnHoldingcost,UnitCost=UnitCost,shortcost=shortcost,
LeadTime=LeadTime,SafetyStock=SafetyStock)
#################pbreak######################
@app.route("/pbreak_insert", methods=['GET','POST'])
def pbreak_insert():
if request.method == 'POST':
quantity = request.form.getlist("quantity[]")
price = request.form.getlist("price[]")
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_classification',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
curr = conn.cursor()
curr.execute("CREATE TABLE IF NOT EXISTS `pbreaktable` (quantity int(8),price int(8))")
curr.execute("DELETE FROM `pbreaktable`")
conn.commit()
say=1
for i in range(len(quantity)):
quantity_clean = quantity[i]
price_clean = price[i]
if quantity_clean and price_clean:
curr.execute("INSERT INTO `pbreaktable`(`quantity`,`price`) VALUES('"+quantity_clean+"','"+price_clean+"')")
conn.commit()
else:
say=0
if say==0:
message="Some values were not inserted!"
else:
message="All values were inserted!"
return(message)
@app.route('/view', methods=['GET','POST'])
def view():
if request.method == 'POST':
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_classification',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
curr = conn.cursor()
curr.execute("SELECT * FROM `pbreaktable`")
res = curr.fetchall()
ress=pd.DataFrame(res)
return render_template('pbrk.html',username=username,ress =ress.to_html())
@app.route('/pbreakcalculate', methods=['GET','POST'])
def pbreakcalculate():
AnnulaUnitsDemand=10
FixedCost=1
AnnHoldingcost=0.1
UnitCost=445
LeadTime=10
SafetyStock=100
if request.method == 'POST':
if request.form['AnnulaUnitsDemand']:
AnnulaUnitsDemand= request.form['AnnulaUnitsDemand']
AnnulaUnitsDemand=float(AnnulaUnitsDemand)
if request.form['FixedCost']:
FixedCost=request.form['FixedCost']
FixedCost=float(FixedCost)
if request.form['AnnHoldingcost']:
AnnHoldingcost=request.form['AnnHoldingcost']
AnnHoldingcost=float(AnnHoldingcost)
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_classification',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
curr = conn.cursor()
curr.execute("SELECT * FROM `pbreaktable`")
res = curr.fetchall()
ress=pd.DataFrame(res)
conn.close()
datatable=pd.DataFrame(columns=['Quantity','Price','EOQ','TotalCost'])
mainlist=[]
Qu=ress['quantity']
Qm=0
for index, i in ress.iterrows():
tcl=[]
quantity = i['quantity']
price = i['price']
HoldingCost1=AnnHoldingcost*price
eoq1=round((math.sqrt((2*AnnulaUnitsDemand*FixedCost)/(HoldingCost1))),2)
REOQ=round(eoq1,0)
totOrderCost1=round((FixedCost*AnnulaUnitsDemand/eoq1),2)
totHoldCost1=round(((HoldingCost1*eoq1)/2),2)
totalcost1=float(round((totOrderCost1+totHoldCost1),2))
lst=[quantity,price,eoq1,totalcost1]
a=pd.DataFrame(lst).T
a.columns=['Quantity','Price','EOQ','TotalCost']
datatable=pd.concat([datatable,a],ignore_index=True)
name='TotalCost (Price='+str(a['Price'][0])+')'
tcl.append(name)
Qmin=1
Qmax=Qu[Qm]
qtylist2=[]
tclist1=[]
while (Qmin < Qmax):
qtylist2.append(Qmin)
tclist1.append(round((Qmin/2*totHoldCost1+AnnulaUnitsDemand/Qmin*FixedCost),2))
Qmin +=2
Qmin=Qmax+1
qtylist2.append(eoq1)
tclist1.append(totalcost1)
tcl.append(tclist1)
mainlist.append(tcl)
Eu=datatable['EOQ']
Qu=datatable['Quantity']
Tu=datatable['TotalCost']
minlst=[]
for i in range(len(Eu)):
if i ==0:
if Eu[i]<=Qu[i]:
minlst.append(i)
else:
if Eu[i]<=Qu[i] and Eu[i]>Qu[i-1]:
minlst.append(i)
if len(minlst)==0:
minnval='Solution not feasible'
else:
minval=Tu[minlst[0]]
minnval=Eu[minlst[0]]
for j in minlst:
if Tu[j]<minval:
minval=Tu[j]
minnval=Eu[j]
val1=0
for i in range(len(tclist1)):
if (round(minnval))==qtylist2[i]:
val1=i
minival=round(minval)
minnival=round(minnval)
NumOrders=round((AnnulaUnitsDemand/minnval),2)
OrderTime=round((365/NumOrders),2)
ReorderPoint=round((((AnnulaUnitsDemand/365)*LeadTime)+SafetyStock),0)
return render_template('pbreak.html',
NumOrders=NumOrders,OrderTime=OrderTime,REOQ=REOQ,ReorderPoint=ReorderPoint,
AnnulaUnitsDemand=AnnulaUnitsDemand,FixedCost=FixedCost,
AnnHoldingcost=AnnHoldingcost,UnitCost=UnitCost,LeadTime=LeadTime,
SafetyStock=SafetyStock,minnval=minnval,minval=minval,minival=minival,minnival=minnival,
datatable=datatable.to_html(index=False),mainlist=mainlist,
val1=val1,tclist1=tclist1,qtylist2=qtylist2)
#################Demand problalstic######################
@app.route('/demand', methods=['GET', 'POST'])
def demand():
cost=10
price=12
salvage=2
if request.method == 'POST':
cost=request.form['cost']
price=request.form['price']
salvage=request.form['salvage']
cost=int(cost)
price=int(price)
salvage=int(salvage)
data=pd.read_csv(localpath+"\\Demand.csv")
data = pd.DataFrame(data)
cdf=[]
sum=0
for row in data['Prob']:
sum=sum+row
cdf.append(sum)
cumm_freq=(pd.DataFrame(cdf)).values##y-axis
overcost=cost-salvage
undercost=price-cost
CSl=undercost/(undercost+overcost)
k=[row>CSl for row in cumm_freq]
count=1
for row in k:
if row==False:
count=count+1
demand=(data['Demand']).values
w=data['Demand'].loc[count]##line across x-axis
val=0
for i in range(len(cumm_freq)):
if(w==demand[i]):
val=i
return render_template('demand.html',cost=cost,price=price,salvage=salvage,
cumm_freq=cumm_freq,demand=demand,val=val)
@app.route('/normal', methods=['GET', 'POST'])
def normal():
cost=10
price=12
salvage=9
sd=2
if request.method == 'POST':
cost=request.form['cost']
price=request.form['price']
salvage=request.form['salvage']
cost=int(cost)
price=int(price)
salvage=int(salvage)
data=pd.read_csv(localpath+"\\Demand.csv")
data = pd.DataFrame(data)
overcost1=cost-salvage
undercost1=price-cost
CSl=undercost1/(undercost1+overcost1)
zz=st.norm.ppf(CSl)##x-line
z=float(format(zz, '.2f'))
# Expecteddemand=round(mea+(z*sd))
mean = 0; sd = 1; variance = np.square(sd)
x = np.arange(-4,4,.01)##x-axis
f =(np.exp(-np.square(x-mean)/2*variance)/(np.sqrt(2*np.pi*variance)))##y-axis
val=0
for i in range(len(f)):
if(z==round((x[i]),2)):
val=i
return render_template('normal.html',x=x,f=f,val=val,cost=cost,price=price,salvage=salvage)
@app.route('/utype', methods=['GET','POST'])
def utype():
cost=10
price=12
salvage=2
mini=1
maxi=10
if request.method == 'POST':
cost=request.form['cost']
price=request.form['price']
salvage=request.form['salvage']
mini=request.form['mini']
maxi=request.form['maxi']
cost=int(cost)
price=int(price)
salvage=int(salvage)
mini=int(mini)
maxi=int(maxi)
data=pd.read_csv(localpath+"\\Demand.csv")
data = pd.DataFrame(data)
overcost=cost-salvage
undercost=price-cost
CSl=undercost/(undercost+overcost)
expdemand1=round(mini+((maxi-mini)*CSl))
# a=[mini,0]
# b=[mini,100]
# c=[maxi,0]
# d=[maxi,100]
# width = c[0] - b[0]
# height = d[1] - a[1]
lims = np.arange(0,maxi,1)
val=0
for i in range(len(lims)):
if(expdemand1==lims[i]):
val=i
return render_template('utype.html',x=lims,f=lims,val=val,cost=cost,price=price,salvage=salvage,mini=mini,maxi=maxi)
@app.route('/outputx', methods=['GET', 'POST'])
def outputx():
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_classification',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("SELECT * FROM `abc`")
all_data = cur.fetchall()
all_data = pd.DataFrame(all_data)
A_ccat=.8
B_ccat=.95
A_ucat=.1
B_ucat=.25
tot_cost=all_data['Cost'].sum()
tot_usage=all_data['Annual Usage'].sum()
all_data['perc_cost']=all_data['Cost']/tot_cost
all_data['perc_usage']=all_data['Annual Usage']/tot_usage
all_data.sort_values(by=['perc_cost'], inplace=True, ascending=False)
sort_data=all_data.reset_index()
sort_data['cum_cperc']=np.nan
sort_data['cum_uperc']=np.nan
sort_data['Class']=''
for i in range(len(sort_data)):
if(i==0):
sort_data.set_value(i, 'cum_cperc', sort_data['perc_cost'][i])
sort_data.set_value(i, 'cum_uperc', sort_data['perc_usage'][i])
# cperc_data.append(all_data['perc_cost'][i])
sort_data.set_value(i,'Class','A')
else:
sort_data.set_value(i, 'cum_cperc', sort_data['perc_cost'][i]+sort_data['cum_cperc'][i-1])
sort_data.set_value(i, 'cum_uperc', sort_data['perc_usage'][i]+sort_data['cum_uperc'][i-1])
if(sort_data['cum_cperc'][i]<=A_ccat and sort_data['cum_uperc'][i]<=A_ucat):
sort_data.set_value(i,'Class','A')
elif(sort_data['cum_cperc'][i]<=B_ccat and sort_data['cum_uperc'][i]<=B_ucat):
sort_data.set_value(i,'Class','B')
else:
sort_data.set_value(i,'Class','C')
x7=sort_data[['cum_cperc']]
x1=x7*100
x3=np.round(x1)
x2=np.array([])
x5 = np.append(x2,x3)
y7= sort_data[['cum_uperc']]
y1=y7*100
y3=np.round(y1)
y2=np.array([])
y5 = np.append(y2,y3)
###############% of Total cost//
a= sort_data[(sort_data['Class']=='A')][['perc_cost']]
j=a.sum()
k=j*100
pd.DataFrame(k)
kf=k[0]
b= sort_data[(sort_data['Class']=='B')][['perc_cost']]
n=b.sum()
m=n*100
pd.DataFrame(m)
mf=m[0]
c= sort_data[(sort_data['Class']=='C')][['perc_cost']]
o=c.sum()
p=o*100
pd.DataFrame(p)
pf=p[0]
tes=k,m,p
t2 = np.array([])
te2 = np.append(t2,tes)
###################Items // Annual Usage
# z=sort_data[['Product number']]
# z1=z.sum()
f= sort_data[(sort_data['Class']=='A')][['Product number']]
v=f.sum()
pd.DataFrame(v)
vif=v[0]
f1= sort_data[(sort_data['Class']=='B')][['Product number']]
u=f1.sum()
pd.DataFrame(u)
uif=u[0]
f2= sort_data[(sort_data['Class']=='C')][['Product number']]
vf=f2.sum()
pd.DataFrame(vf)
kif=vf[0]
#################% of Total units // Annual Usage
t= sort_data[(sort_data['Class']=='A')][['perc_usage']]
i=t.sum()
p1=i*100
pd.DataFrame(p1)
nf=p1[0]
l= sort_data[(sort_data['Class']=='B')][['perc_usage']]
t=l.sum()
q1=t*100
| pd.DataFrame(q1) | pandas.DataFrame |
import json
import pandas as pd
from vvc.utils import json_utils
def to_df(json_file):
count_summary = {}
time_summary = {}
with open(json_file) as json_data:
data = json.load(json_data)
for frame_id, objects in data['frames'].items():
# Extract counts
if frame_id not in count_summary:
count_summary[frame_id] = {}
for obj in objects['objects']:
tag = obj['tag']
if tag not in count_summary[frame_id]:
count_summary[frame_id][tag] = 0
count_summary[frame_id][tag] += 1
# Extract running time
if frame_id not in time_summary:
time_summary[frame_id] = {}
for key, value in objects['timestamps'].items():
time_summary[frame_id][key] = value
df = pd.DataFrame.from_dict(count_summary, orient='index')
df = df.fillna(0)
df = df.set_index(pd.to_numeric(df.index))
df = df.sort_index(kind='mergesort')
df = df.reindex(sorted(df.columns), axis=1)
exp = pd.DataFrame()
# Set the values for each perspective
for column in df.columns:
for fb_side in ['front', 'back']:
for lr_side in ['left', 'right']:
tag = column # + '_' + fb_side + '_' + lr_side
exp[tag] = df[column]
exp = exp.sort_index(kind='mergesort')
times = | pd.DataFrame.from_dict(time_summary, orient='index') | pandas.DataFrame.from_dict |
import os
import copy
import numpy as np
import pandas as pd
import itertools
from tqdm import tqdm
from abc import ABC, abstractmethod
from collections.abc import Iterable, Mapping
from sklearn.model_selection import KFold, GroupKFold, StratifiedKFold
from sklearn.preprocessing import LabelEncoder
from sklearn.utils import shuffle
from .stage_base import StageBase
from .pipeline import Pipeline
from ..models.model import ModelBase, PyTorchModel, SklearnModel, TensorFlowModel
from .load_data import DataFrameLoaderStage
from .preprocessing import PreprocessingStageBase
from .training_stage import ModelTrainingStage, SupervisedTrainingContext
from .evaluation_stage import EvaluationStage, SupervisedEvaluationContext
from .prediction_stage import ModelPredictionStage, PredictionContext
from ..utils.utils import get_sklearn_scoring_func, get_tensorflow_loss_func, get_torch_loss_func, get_any_scoring_func
from ..log.logger import Logger
class GenerateCVFoldsStage(StageBase):
def __init__(self, strategy, strategy_args):
super().__init__()
self._strategy = strategy.lower()
self._strategy_args = strategy_args
@classmethod
def _random_kfold(cls, data, args):
if 'num_folds' not in args.keys():
raise ValueError("{} with 'random' strategy must provide a 'num_folds' strategy arg".format(type(cls).__name__))
if 'seed' not in args.keys():
raise ValueError("{} with 'random' strategy must provide a 'seed' strategy arg".format(type(cls).__name__))
kf = KFold(n_splits=args['num_folds'], shuffle=True, random_state=args['seed'])
return [x for x in kf.split(data)]
@classmethod
def _random_grouped_kfold(cls, data, args):
if 'num_folds' not in args.keys():
raise ValueError("{} with 'random_grouped' strategy must provide a 'num_folds' strategy arg".format(type(cls).__name__))
if 'seed' not in args.keys():
raise ValueError("{} with 'random_grouped' strategy must provide a 'seed' strategy arg".format(type(cls).__name__))
if 'group_by' not in args.keys():
raise ValueError("{} with 'random_grouped' strategy must provide a 'group_by' strategy arg".format(type(cls).__name__))
data = shuffle(data, random_state=args['seed']).reset_index(drop=True) # Shuffle the data first because GroupKFold doesn't support it
group_le = LabelEncoder()
groups = group_le.fit_transform(data[args['group_by']].values)
Logger.getInst().info("Shuffling and grouping data in folds by column: {}, found {} groups".format(args['group_by'], len(group_le.classes_)))
gkf = GroupKFold(n_splits=args['num_folds'])
return [x for x in gkf.split(data, groups=groups)]
@classmethod
def _stratified_kfold(cls, data, args):
if 'num_folds' not in args.keys():
raise ValueError("{} with 'stratified' strategy must provide a 'num_folds' strategy arg".format(type(cls).__name__))
if 'percentile_bins' not in args.keys() and 'bin_edges' not in args.keys():
raise ValueError("{} with 'stratified' strategy must provide either a 'percentile_bins' or 'bin_edges' strategy arg".format(type(cls).__name__))
if 'seed' not in args.keys():
Logger.getInst().info("{} with 'stratified' strategy can randomly shuffle the data if a 'seed' integer strategy arg is provided. No shuffling used".format(type(cls).__name__))
if 'stratify_on' not in args.keys():
raise ValueError("{} with 'stratified' strategy must provide a 'stratify_on' strategy arg".format(type(cls).__name__))
if 'seed' in args.keys():
skf = StratifiedKFold(n_splits=args['num_folds'], shuffle=True, random_state=args['seed'])
else:
skf = StratifiedKFold(n_splits=args['num_folds'])
if 'percentile_bins' in args.keys():
y = pd.qcut(data[args['stratify_on']].argsort(kind='stable'), q=args['percentile_bins'], labels=range(args['percentile_bins'])).tolist()
else:
y = np.digitize(data[args['stratify_on']], bins=args['bin_edges'])
return list(skf.split(data, y))
@classmethod
def _stratified_grouped_kfold(cls, data, args):
if 'num_folds' not in args.keys():
raise ValueError("{} with 'stratified_grouped' strategy must provide a 'num_folds' strategy arg".format(type(cls).__name__))
if 'percentile_bins' not in args.keys() and 'bin_edges' not in args.keys():
raise ValueError("{} with 'stratified_grouped' strategy must provide either a 'percentile_bins' or 'bin_edges' strategy arg".format(type(cls).__name__))
if 'seed' not in args.keys():
Logger.getInst().info("{} with 'stratified_grouped' strategy can randomly shuffle the data if a 'seed' integer strategy arg is provided. No shuffling used".format(type(cls).__name__))
if 'stratify_on' not in args.keys():
raise ValueError("{} with 'stratified_grouped' strategy must provide a 'stratify_on' strategy arg".format(type(cls).__name__))
if 'group_by' not in args.keys():
raise ValueError("{} with 'stratified_grouped' strategy must provide a 'group_by' strategy arg".format(type(cls).__name__))
group_le = LabelEncoder()
groups = group_le.fit_transform(data[args['group_by']].values)
grouped_data = data.groupby(by=groups, group_keys=True).mean()
grouped_stratify_on = | pd.Series(data[args['stratify_on']]) | pandas.Series |
from functools import partial
from pathlib import Path
import multiprocessing
import glob
import tqdm
import pandas as pd
import numpy as np
import torch
import torchaudio
# fastai2_audio
# add flac to supported audio types
import mimetypes
mimetypes.types_map[".flac"] = "audio/flac"
from fastai2_audio.core.all import get_audio_files
from libreasr.lib.utils import sanitize_str
PRINT_DROP = False
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
def save(df, path, print_fun=print):
df.to_csv(path, index=False)
print_fun(f"df saved to {path}")
def process_one(file, get_labels):
rows = []
try:
if file.suffix == ".m4a":
raise Exception("no audio file")
aud, sr = torchaudio.load(file)
assert aud.size(0) >= 1 and aud.size(1) >= 1
xlen = int((aud.size(1) / float(sr)) * 1000.0)
labels = get_labels(file, duration=xlen)
for (xstart, spanlen, label, ylen) in labels:
if ylen >= 2:
bad = False
else:
bad = True
if spanlen == -1:
spanlen = xlen
rows.append((str(file.absolute()), xstart, spanlen, label, ylen, sr, bad))
except Exception as e:
pass
finally:
if len(rows) == 0:
xstart, xlen, label, ylen, sr, bad = 0, 0, "", 0, -1, True
rows.append((str(file.absolute()), xstart, xlen, label, ylen, sr, bad))
return rows
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("path", type=str, help="path to the dataset")
parser.add_argument(
"dataset",
type=str,
help="which dataset: common-voice | tatoeba | tf-speech | librispeech | yt",
)
parser.add_argument(
"--workers", default=1, type=int, help="how many pool workers to create"
)
parser.add_argument(
"--block-size",
default=2,
type=int,
help="in case of vtt format, how many sentences to collect together (min: 2)",
)
parser.add_argument(
"--save-every-pcent",
default=5,
type=int,
help="save resulting df every N% of all files",
)
parser.add_argument(
"--print-every-pcent",
default=5,
type=int,
help="print info every N% of all files",
)
parser.add_argument(
"--lang", default="en", type=str, help="language",
)
parser.add_argument(
"--out",
default="asr-dataset.csv",
type=str,
help="name of the resulting csv file",
)
parser.add_argument(
"--soundfile",
type=str2bool,
const=True,
default=False,
nargs="?",
help="use torchaudio soundfile implementation",
)
args = parser.parse_args()
if args.soundfile:
torchaudio.set_audio_backend("soundfile")
path = Path(args.path)
dataset = args.dataset
p = path
save_path = path / args.out
# create df
# see if exists
cols = [
"file",
"xstart",
"xlen",
"label",
"ylen",
"sr",
"bad",
]
if save_path.exists():
df = pd.read_csv(save_path)
print(f"> df restored from {save_path}")
else:
df = pd.DataFrame(columns=cols)
print("> df NOT restored (not found?)")
# grab all audio files
files = get_audio_files(p)
print("> raw files:", len(files))
# filter out files that are already in the df
files = pd.Series([str(x) for x in files])
res = files.isin(df.file)
files = [Path(x) for x in files[~res].tolist()]
print("> filtered files:", len(files))
# get_labels for each dataset format
if dataset == "common-voice":
label_df = pd.read_csv(path / "validated.tsv", delimiter="\t")
def get_labels(file, **kwargs):
n = file.stem + ".mp3"
l = label_df[label_df.path == n].sentence.iloc[0]
return [(0, -1, l, len(l))]
elif dataset == "tatoeba":
fname = glob.glob(str(path) + "/dataset_*.csv")[0]
label_df = | pd.read_csv(fname) | pandas.read_csv |
"""
<NAME>
<EMAIL>
<EMAIL>
"""
"""
This is used to generate images containing data from a Slifer Lab NMR cooldown.
The NMR analysis toolsuite produces a file called "global_analysis.csv" which this program needs
in tandem with the raw DAQ .csv to form an image sequence that captures the cooldown datastream.
"""
import pandas, os, numpy, multiprocessing, numpy, time, matplotlib, sys
from matplotlib import pyplot as plt
sys.path.insert(1, '..')
import variablenames
# Sept 14 2020 data
"""
rootdir = "../datasets/sep_2020/data_record_9-14-2020/video/"
flist = ['data_record_9-14-2020_abridged.csv', 'data_record_9-15-2020_abridged.csv']
daqdatafile = ["../datasets/sep_2020/rawdata/"+i for i in flist]
csvdirectory = rootdir+"graph_data/"
globalcsv = rootdir+"global_analysis.csv"
globalcsv2 = rootdir+"global_analysis_2.csv"
yfitsub = 'Third order Polynomial 0 Subtraction'
karlmethod = rootdir+"saveme_9_14.csv"
spline_df_location = rootdir+"spline_df_saved_9_14.csv"
rawsig_ym, rawsig_YM = -4, 4
fitsub_xm, fitsub_XM = 32.4,33.4
fitsub_ym, fitsub_YM= -.2, 1.5
poor_fit_ym, poor_fit_YM = -1.6,-.8
"""
# Dec 3 2020 Data
rootdir = "../datasets/dec_2020/data_record_12-3-2020/"
daqdatafile = '../datasets/dec_2020/rawdata/data_record_12-3-2020_abriged.csv'
csvdirectory = rootdir+"graph_data/"
globalcsv2 = rootdir+"global_analysis_2.csv"
yfitsub = 'Third order Polynomial 0 Subtraction'
karlmethod = rootdir+'saveme_12_3_20.csv'
spline_df_location = rootdir+'spline_df.csv'
fitsub_xm, fitsub_XM = 32.55,33.45
fitsub_ym, fitsub_YM = -.4, .2
rawsig_ym, rawsig_YM = -2, 2
poor_fit_ym, poor_fit_YM = -1,1
# Dec 4 2020 Data
"""
rootdir = "../datasets/dec_2020/data_record_12-4-2020/analysis/polarization/"
csvdirectory = rootdir+"graph_data/"
globalcsv2 = rootdir+"global_analysis_2.csv"
yfitsub = 'Third order Polynomial 0 Subtraction'
karlmethod = rootdir+'saveme_12_4_20.csv'
spline_df_location = rootdir+'spline_df.csv'
fitsub_xm, fitsub_XM = 32.55,33.45
fitsub_ym, fitsub_YM = -.075, .075
rawsig_ym, rawsig_YM = -2, 2
poor_fit_ym, poor_fit_YM = -1,1
"""
# Dec 7 2020
"""
rootdir = "../datasets/dec_2020/data_record_12-7-2020/analysis/Enhanced/"
csvdirectory = rootdir+"graph_data/"
globalcsv2 = rootdir+"global_analysis_2.csv"
yfitsub = 'Fit 1 Subtraction'
karlmethod = rootdir+'saveme_12_7_20.csv'
spline_df_location = rootdir+'spline_df.csv'
fitsub_xm, fitsub_XM = 31.85,32.55
fitsub_ym, fitsub_YM = -.3, .2
rawsig_ym, rawsig_YM = -2, 2
poor_fit_ym, poor_fit_YM = -.6,-.1
"""
# Dec 8 2020
"""rootdir = "../datasets/dec_2020/data_record_12-8-2020/analysis/enhanced/"
csvdirectory = rootdir+"graph_data/"
globalcsv2 = rootdir+"global_analysis_2.csv"
yfitsub = 'Third order Polynomial 0 Subtraction'
karlmethod = rootdir+'saveme_12_8_20.csv'
spline_df_location = rootdir+'spline_df.csv'
fitsub_xm, fitsub_XM = 212.45,212.94
fitsub_ym, fitsub_YM = -.01, .02
rawsig_ym, rawsig_YM = -.3, .3
poor_fit_ym, poor_fit_YM = -.018,-.01
"""
# Dec 9 2020
"""
rootdir = "../datasets/dec_2020/data_record_12-9-2020/video/"
csvdirectory = rootdir+"graph_data/"
globalcsv2 = rootdir+"global_analysis_2.csv"
yfitsub = 'Third order Polynomial 0 Subtraction'
karlmethod = rootdir+'saveme_12_9_20.csv'
spline_df_location = rootdir+'spline_df.csv'
fitsub_xm, fitsub_XM = 212.45,212.94
fitsub_ym, fitsub_YM = -.01, .02
rawsig_ym, rawsig_YM = -.3, .3
poor_fit_ym, poor_fit_YM = -.018,-.01
poor_fit_ym, poor_fit_YM = -.005,-.03
"""
# Dec 10 2020 data
"""
csvdirectory = "../datasets/dec_2020/data_record_12-10-2020/video_analysis/graph_data/"
globalcsv2 = "../datasets/dec_2020/data_record_12-10-2020/video_analysis/global_analysis_2.csv"
yfitsub = 'Third order Polynomial 0 Subtraction'
karlmethod = 'datasets/2020_12_10/saveme_12_10_20.csv'
spline_df_location = 'datasets/2020_12_10/spline_df.csv'
fitsub_xm, fitsub_XM = 32.6,33.5
fitsub_ym, fitsub_YM = -.2, .25
rawsig_ym, rawsig_YM = -4, 3.5
poor_fit_ym, poor_fit_YM = -.5,0
"""
# Dec 11 2020 data
"""
rootdir = "../datasets/dec_2020/data_record_12-11-2020/video/"
daqdatafile = "../datasets/dec_2020/rawdata/data_record_12-11-2020_abridged.csv"
csvdirectory = rootdir+"graph_data/"
globalcsv = rootdir+"global_analysis.csv"
globalcsv2 = rootdir+"global_analysis_2.csv"
yfitsub = 'Third order Polynomial 0 Subtraction'
karlmethod = rootdir+'saveme_12_9_20.csv'
spline_df_location = rootdir+'spline_df.csv'
fitsub_xm, fitsub_XM = 32.6,33.5
fitsub_ym, fitsub_YM = -.2, .25
rawsig_ym, rawsig_YM = -4, 3.5
poor_fit_ym, poor_fit_YM = -.5,0
"""
# Sept 12 2020 data
"""
rootdir = "../datasets/sep_2020/data_record_9-12-2020/video/"
daqdatafile = "../datasets/sep_2020/rawdata/data_record_9-12-2020_abridged.csv"
csvdirectory = rootdir+"graph_data/"
globalcsv = rootdir+"global_analysis.csv"
globalcsv2 = rootdir+"global_analysis_2.csv"
yfitsub = 'Third order Polynomial 0 Subtraction'
karlmethod = rootdir+'saveme_9_12_20.csv'
spline_df_location = rootdir+'spline_df.csv'
fitsub_xm, fitsub_XM = 32.6,33.5
fitsub_ym, fitsub_YM = -.2, .25
rawsig_ym, rawsig_YM = -4, 3.5
poor_fit_ym, poor_fit_YM = -.5,.4
"""
# Sept 11 2020 data
"""
rootdir = "../datasets/sep_2020/data_record_9-11-2020/video/"
daqdatafile = ["../datasets/sep_2020/rawdata/data_record_9-11-2020_abridged.csv", "../datasets/sep_2020/rawdata/data_record_9-10-2020_abridged.csv"]
csvdirectory = rootdir+"graph_data/"
globalcsv = rootdir+"global_analysis.csv"
globalcsv2 = rootdir+"global_analysis_2_redo.csv"
yfitsub = 'Third order Polynomial 0 Subtraction'
karlmethod = rootdir+'saveme_9_12_20.csv'
spline_df_location = rootdir+'spline_df.csv'
fitsub_xm, fitsub_XM = 32.6,33.5
fitsub_ym, fitsub_YM = -.2, .25
rawsig_ym, rawsig_YM = -4, 3.5
poor_fit_ym, poor_fit_YM = -.5,.4
"""
# Sept 12 2020 data #FOR TE FIXING
"""
rootdir = "../datasets/sep_2020/data_record_9-12-2020_old_analysis/TE/"
daqdatafile = "../datasets/sep_2020/rawdata/data_record_9-12-2020_abridged.csv"
csvdirectory = rootdir+"graph_data/"
globalcsv = "../datasets/sep_2020/data_record_9-12-2020_old_analysis/TE/912_536pTE/global_analysis.csv"
globalcsv2 = "../datasets/sep_2020/data_record_9-12-2020_old_analysis/TE/912_536pTE/global_analysis_with_extra_stuff.csv"
yfitsub = 'Third order Polynomial 0 Subtraction'
karlmethod = rootdir+'saveme_9_12_20.csv'
spline_df_location = rootdir+'spline_df.csv'
fitsub_xm, fitsub_XM = 32.6,33.5
fitsub_ym, fitsub_YM = -.2, .25
rawsig_ym, rawsig_YM = -4, 3.5
poor_fit_ym, poor_fit_YM = -.5,.4
"""
# Sept 11 2020 data
"""
rootdir = "../datasets/sep_2020/data_record_9-11-2020/video/"
daqdatafile = "../datasets/sep_2020/rawdata/data_record_9-11-2020_abridged.csv"
csvdirectory = rootdir+"graph_data/"
globalcsv = rootdir+"global_analysis.csv"
globalcsv2 = rootdir+"global_analysis_2.csv"
yfitsub = 'Third order Polynomial 0 Subtraction'
karlmethod = rootdir+'saveme_9_11_20.csv'
spline_df_location = rootdir+'spline_df.csv'
fitsub_xm, fitsub_XM = 31.95,32.85
fitsub_ym, fitsub_YM = -.2, .25
rawsig_ym, rawsig_YM = -4, 3.5
poor_fit_ym, poor_fit_YM = -.5,.4
"""
# Sept 13 2020 Dat
"""
rootdir = "../datasets/sep_2020/data_record_9-13-2020/video/"
daqdatafile = "../datasets/sep_2020/rawdata/data_record_9-13-2020_abridged.csv"
csvdirectory = rootdir+"graph_data/"
globalcsv = rootdir+"global_analysis.csv"
globalcsv2 = rootdir+"global_analysis_2.csv"
yfitsub = 'Third order Polynomial 0 Subtraction'
karlmethod = rootdir+'saveme_9_13_20.csv'
spline_df_location = rootdir+'spline_df.csv'
fitsub_xm, fitsub_XM = 31.95,32.85
fitsub_ym, fitsub_YM = -.05, .5
rawsig_ym, rawsig_YM = -4, 3.5
poor_fit_ym, poor_fit_YM = -.5,.4
"""
# Sept 15 2020 Data #enhanced
"""
rootdir = "../datasets/sep_2020/data_record_9-13-2020/video/"
daqdatafile = ["../datasets/sep_2020/rawdata/data_record_9-15-2020_abridged.csv", "../datasets/sep_2020/rawdata/data_record_9-14-2020_abridged.csv"]
csvdirectory = rootdir+"graph_data/"
globalcsv = "../datasets/sep_2020/data_record_9-15-2020/video/global_analysis.csv"
globalcsv2 = "../datasets/sep_2020/data_record_9-15-2020/video/global_analysis_long_fixed.csv"
yfitsub = 'Third order Polynomial 0 Subtraction'
karlmethod = rootdir+'saveme_9_13_20.csv'
spline_df_location = rootdir+'spline_df.csv'
fitsub_xm, fitsub_XM = 31.95,32.85
fitsub_ym, fitsub_YM = -.05, .5
rawsig_ym, rawsig_YM = -4, 3.5
poor_fit_ym, poor_fit_YM = -.5,.4
"""
# Sept 15 2020 Data #te
"""
rootdir = "../datasets/sep_2020/data_record_9-13-2020/video/"
daqdatafile = ["../datasets/sep_2020/rawdata/data_record_9-15-2020_abridged.csv", "../datasets/sep_2020/rawdata/data_record_9-14-2020_abridged.csv"]
csvdirectory = rootdir+"graph_data/"
globalcsv = "../datasets/sep_2020/data_record_9-14-2020_old_analysis/700pte/7p_lab/global_analysis.csv"
globalcsv2 = "../datasets/sep_2020/data_record_9-14-2020_old_analysis/700pte/7p_lab/global_analysis_long_fixed.csv"
yfitsub = 'Third order Polynomial 0 Subtraction'
karlmethod = rootdir+'saveme_9_13_20.csv'
spline_df_location = rootdir+'spline_df.csv'
fitsub_xm, fitsub_XM = 31.95,32.85
fitsub_ym, fitsub_YM = -.05, .5
rawsig_ym, rawsig_YM = -4, 3.5
poor_fit_ym, poor_fit_YM = -.5,.4"""
# Dec 10 2020 Data null
"""
rootdir = "../datasets/dec_2020/data_record_12-10-2020/Complete_analysis/null_pure/"
daqdatafile = "../datasets/dec_2020/rawdata/data_record_12-10-2020_abridged.csv"
csvdirectory = rootdir+"graph_data/"
globalcsv = rootdir+"global_analysis.csv"
globalcsv2 = rootdir+"global_analysis_2.csv"
yfitsub = 'Third order Polynomial 0 Subtraction'
karlmethod = rootdir+'saveme_12_10_20.csv'
spline_df_location = rootdir+'spline_df.csv'
fitsub_xm, fitsub_XM = 32.6,33.5
fitsub_ym, fitsub_YM = -.2, .25
rawsig_ym, rawsig_YM = -4, 3.5
poor_fit_ym, poor_fit_YM = -.5,0
"""
# Dec 10 2020 Data
"""
rootdir = "../datasets/dec_2020/data_record_12-10-2020/Complete_analysis/enhanced_pure/"
daqdatafile = "../datasets/dec_2020/rawdata/data_record_12-10-2020_abridged.csv"
csvdirectory = rootdir+"graph_data/"
globalcsv = rootdir+"global_analysis.csv"
globalcsv2 = rootdir+"global_analysis_2.csv"
yfitsub = 'Third order Polynomial 0 Subtraction'
karlmethod = rootdir+'saveme_12_10_20.csv'
spline_df_location = rootdir+'spline_df.csv'
fitsub_xm, fitsub_XM = 32.6,33.5
fitsub_ym, fitsub_YM = -.2, .25
rawsig_ym, rawsig_YM = -4, 3.5
poor_fit_ym, poor_fit_YM = -.5,0
"""
# Dec 11 2020 Data
"""
rootdir = "../datasets/dec_2020/data_record_12-11-2020/video/"
daqdatafile = "../datasets/dec_2020/rawdata/data_record_12-11-2020_abridged.csv"
csvdirectory = rootdir+"graph_data/"
globalcsv = rootdir+"global_analysis.csv"
globalcsv2 = rootdir+"global_analysis_2.csv"
yfitsub = 'Fit 1 Subtraction'
karlmethod = rootdir+'saveme_12_11_20.csv'
spline_df_location = rootdir+'spline_df.csv'
fitsub_xm, fitsub_XM = 32.6,33.5
fitsub_ym, fitsub_YM = -.2, .25
rawsig_ym, rawsig_YM = -4, 3.5
poor_fit_ym, poor_fit_YM = -.5,0
"""
# Dec 19 2019 Data
"""
rootdir = "../datasets/dec_2019/vme_data/data_record_12-19-2019/"
daqdatafile = rootdir+"../rawdata/data_record_12-19-2019_abridged.csv"
csvdirectory = rootdir+"Results/enhanced/graph_data/"
globalcsv = rootdir+"enhanced_global_analysis.csv"
globalcsv2 = rootdir+"global_analysis_2.csv"
yfitsub = 'Fit 1 Subtraction'
karlmethod = rootdir+'saveme_12_19_19.csv'
spline_df_location = rootdir+'spline_df.csv'
fitsub_xm, fitsub_XM = 212.7,213.15
fitsub_ym, fitsub_YM = -.02, .04
rawsig_ym, rawsig_YM = -.2, .2
poor_fit_ym, poor_fit_YM = -.5,0
"""
# Dec 21 2019 Data
rootdir = "../datasets/dec_2019/vme_data/data_record_12-21-2019/"
daqdatafile = rootdir+"../rawdata/data_record_12-21-2019_abridged.csv"
csvdirectory = rootdir+"results/enhanced/graph_data/"
globalcsv = rootdir+"enhanced_global_analysis.csv"
globalcsv2 = rootdir+"global_analysis_2.csv"
yfitsub = 'Fit 1 Subtraction'
karlmethod = rootdir+'saveme_12_19_19.csv'
spline_df_location = rootdir+'spline_df.csv'
fitsub_xm, fitsub_XM = 211.7,212.5
fitsub_ym, fitsub_YM = -.02, .06
rawsig_ym, rawsig_YM = -.2, .2
poor_fit_ym, poor_fit_YM = -.5,0
dump = "../dump3/"
thermistor1 ="CCS.F11 (K)"
thermistor2 ="CCS.F10 (K)"
thermistor3 ="CCX.T1 (K)"
thermistor4 ="CX.T2 (K)"
NMR_Variable = "Diode Tune (V)"
NMR_Tune = 'Phase Tune (V)'
NMR_Performance_Metric = NMR_Variable#'IFOFF (V)'
columns_to_absolutely_save = [thermistor1, NMR_Tune, NMR_Variable, thermistor2,
thermistor3, "UCA Voltage (V)", "Mmwaves Frequency (GHz)",
thermistor4]
raw = "Potential (V)"
x = "MHz"
bl = "BL Potential (V)"
def forkitindexer(filelist):
"""
Return a list of tuples of indecies that divide the passed
list into almost equal slices
"""
p = int(8*multiprocessing.cpu_count()/10)
lenset = len(filelist)
modulus = int(lenset%p)
floordiv = int(lenset/p)
slicer = [[floordiv*i, floordiv*(i+1)] for i in range(p-1)]
slicer.append([floordiv*(p-1), p*floordiv+int(modulus)-1])
return slicer
def plotter(files, indexes, times, ga_fixed, id_num, deltas, timesteps, deltastime, tkbackend):
deltasx = 'time'
deltasy = 'sum'
if tkbackend == 'on':
pass
elif tkbackend == 'off':
matplotlib.use('Agg')
s,f = indexes
todo = files[s:f]
timedeltas = []
for i, val in enumerate(todo):
t1 = time.time()
fig, ax = plt.subplots(nrows=2, ncols=3, figsize=(16, 8), constrained_layout=True)
fig.suptitle(str(times[s+i]))
with open(csvdirectory+val, 'r') as f:
df = pandas.read_csv(f)
ss = ga_fixed.loc[times[s+i], 'sigstart']
sf = ga_fixed.loc[times[s+i], 'sigfinish']
signal_removed_df = df[(df[x]>ss) & (df[x]<sf)]
ax[0,0].scatter(df[x], df[yfitsub], label='Fit Subtracted Signal', color='blue')
ax[0,0].scatter(signal_removed_df[x], signal_removed_df[yfitsub], label='User Selected Region', color='red')
ax[0,0].legend(loc='best')
ax[0,0].set_title("Fit Subtracted Signal")
ax[0,0].set_ylabel('Volts (V)')
ax[0,0].set_xlabel('Frequency (MHz)')
ax[0,0].set_ylim(fitsub_ym, fitsub_YM)
ax[0,1].set_title('Temperature')
ax[0,1].scatter(ga_fixed.index.tolist(), ga_fixed[thermistor2], color='red', label=thermistor2)
ax[0,1].scatter(ga_fixed.index.tolist(), ga_fixed[thermistor3], color='orange', label=thermistor3)
ax[0,1].scatter(ga_fixed.index.tolist(), ga_fixed[thermistor1], color='green', label=thermistor1)
ax[0,1].scatter(ga_fixed.index.tolist(), ga_fixed[thermistor4], color='blue', label=thermistor4)
ax[0,1].set_ylim(-.5, 7)
ax[0,1].set_ylabel('Kelvin (K)')
ax[0,1].set_xlabel('Time')
ax[1,0].set_title("Raw Sweeps")
ax[1,0].scatter(df[x], df[bl], label='Baseline', color='blue')
ax[1,0].scatter(df[x], df[raw], label =''.join(list(val)[:-4]), color = 'red')
ax[1,0].set_ylim(rawsig_ym, rawsig_YM)
ax[1,0].set_ylabel('Volt')
ax[1,0].set_xlabel('Frequency (MHz)')
ax[1,1].scatter(ga_fixed.index.tolist(), ga_fixed['data_area'], color='green', label='Enhanced Data Area')
ax[1,1].set_title("Data Area")
#ax[1,1].set_ylim(-.025,.05)
ax[1,1].scatter(timesteps[s+i], ga_fixed.loc[times[s+i], 'data_area'], color='magenta', label='Current sweep')
ax[1,1].set_ylabel('Volt-Area')
ax[1,1].set_xlabel('Time')
ax[0,2].scatter(ga_fixed.index.tolist(), ga_fixed[NMR_Performance_Metric], label=NMR_Performance_Metric)
ax[0,2].scatter(timesteps[s+i], ga_fixed.loc[times[s+i],NMR_Performance_Metric], color='magenta', label="Current Sweep")
ax[0,2].grid(True)
ax[0,2].legend(loc='best')
ax[0,2].set_title("VME & Microwave Stuff")
ax[0,2].set_ylabel('Volts (V)')
ax[0,2].set_xlabel('Time')
#ax[1,2].set_ylim(poor_fit_ym, poor_fit_YM)
ax[1,2].scatter(deltastime, deltas[deltasy], label="Signal-Wing avg value")
if timesteps[s+i] in deltastime:
ax[1,2].scatter(timesteps[s+i], deltas.loc[times[s+i], deltasy], color='magenta', label="Current Sweep")
ax[1,2].grid(True)
ax[1,2].legend(loc='best')
ax[1,2].set_title("Poor-fit indicator prototype")
ax[1,2].set_xlabel('Time')
ax[0,1].scatter(timesteps[s+i], ga_fixed.loc[times[s+i], thermistor2], color='magenta', label="Current Sweep")
ax[0,1].scatter(timesteps[s+i], ga_fixed.loc[times[s+i], thermistor3], color='blue')
ax[0,1].scatter(timesteps[s+i], ga_fixed.loc[times[s+i], thermistor1], color='magenta')
ax[0,1].scatter(timesteps[s+i], ga_fixed.loc[times[s+i], thermistor4], color='magenta')
ax[0,0].grid(True)
ax[1,0].grid(True)
ax[1,1].grid(True)
ax[0,1].grid(True)
ax[0,1].legend(loc='best')
ax[0,0].legend(loc='best')
ax[1,0].legend(loc='best')
ax[1,1].legend(loc='best')
plt.savefig(dump+str("{0:05d}".format(s+i)))
plt.clf()
plt.close('all')
t2 = time.time()
timedeltas.append(t2-t1)
print('ID:', id_num, ":", (i+1), "of", len(todo), '['+str(round((i+1)*100/len(todo),4))+'%]', "ETA: ", round((len(todo)-(i+1))*numpy.mean(timedeltas),1), 's')
def get_csv_files():
csvs = []
for root, dirs, files in os.walk(csvdirectory):
for f in files:
if f.endswith('.csv'):
csvs.append(''.join(list(f)[:-4])) # removes suffixes
return csvs
def get_global_analysis():
with open(globalcsv2, 'r') as f:
df = pandas.read_csv(f)
name = 'name'
# Set the indexer as the user-defined name of the analyzed instance
# which reflects the file names gotten in the function get_csv_files()
dffixed = df.set_index(name)
return dffixed
def sync_timestamps_with_csv_filenames(dffixed, csvs):
timesteps = []
keys = []
for i, index in enumerate(csvs):
try:
timesteps.append(dffixed.loc[index, 'time'])
keys.append(index+'.csv')
except KeyError as e:
print("Key error", e, "file exists, but no entry in global analysis.")
continue
corrected_DF = pandas.DataFrame(dict(zip(['keys', 'time'],[keys, timesteps])))
sorted_df = corrected_DF.sort_values(by='time')
return sorted_df
def cutter(ga_csv, sorted_df, tolerance):
import cutter as cutter2
minn = -.3
maxx=-.23
deltasx = 'time'
deltasy = 'sum'
deltasmin = 'spline min'
deltasmax = 'spline max'
edited = input("do you need to subsect (CUT) the data? [Y/N]: ")
edited = True if edited.upper() == 'Y' else False
if edited:
try:
with open(spline_df_location, 'r') as f:
deltas = pandas.read_csv(f)
except:
deltas = cutter2.main(tolerance=tolerance, neededpath=karlmethod, global_analysis=globalcsv2)
with open(spline_df_location, 'w') as f:
deltas.to_csv(f)
else:
with open(karlmethod, 'r') as f:
deltas = pandas.read_csv(f)
deltas = deltas.sort_values(by=deltasx)
deltas[deltasx] = pandas.to_datetime(deltas[deltasx],format="%Y-%m-%d %H:%M:%S")
ga_csv['time'] = pandas.to_datetime(ga_csv['time'], format="%Y-%m-%d %H:%M:%S")
asd = columns_to_absolutely_save
for i in asd:
ga_csv[i] = pandas.to_numeric(ga_csv[i], errors='coerce')
ga_csv.replace(to_replace='Off\n', value=dict(zip(asd,[numpy.nan for a in asd])), inplace=True)
ga_csv.replace(to_replace='Off', value=dict(zip(asd,[numpy.nan for a in asd])), inplace=True)
ga_csv = ga_csv.fillna(0)
ga_csv = ga_csv.sort_values(by='time')
# Sycronize indecies
deltas = deltas.set_index(deltasx)
ga_fixed = ga_csv.set_index('time')
sorted_df['time'] = pandas.to_datetime(sorted_df['time'],format="%Y-%m-%d %H:%M:%S")
sorted_df = sorted_df.set_index('time')
# Merge the dataframes
ga_fixed = ga_fixed.merge(deltas,left_index=True, right_index=True, how = 'right')
#ga_fixed = ga_fixed.merge(sorted_df,left_index=True, right_index=False, how = 'right')
ga_fixed = ga_fixed.join(sorted_df)
# Take "Cuts" (in python/R/SQL language: take a subset based on VALUE critera)
if edited:
deltas =deltas[(deltas[deltasy]>deltas[deltasmin])&(deltas[deltasy]<deltas[deltasmax])]
ga_fixed =ga_fixed[(ga_fixed[deltasy]>ga_fixed[deltasmin])&(ga_fixed[deltasy]<ga_fixed[deltasmax])]
deltastime = deltas.index.tolist()
timesteps = ga_fixed.index.tolist()
sorted_df = pandas.DataFrame({'time':timesteps, 'keys':ga_fixed['keys'].to_list()})
trashlist = []
for index,value in enumerate(ga_fixed['keys']):
try:
trashlist.append(''.join(list(value)[:-4]))
except:
trashlist.append('')
with open(rootdir+"global_analysis_cleaned.csv", 'w') as f:
ga_fixed.to_csv(f, columns=[])
else:
deltastime = deltas.index.tolist()
timesteps = ga_fixed.index.tolist()
return ga_fixed, deltas, timesteps, deltastime, sorted_df
def fetch_df(path, delimiter=','):
try:
with open(path, 'r') as f:
df = pandas.read_csv(f, delimiter=delimiter)
return df
except FileNotFoundError:
print("Can not find path")
def merger(primary_path:str, secondary_path:str, desired_columns:list, shared_column="time"):
"""
--> Primary path is the global analysis file produced by analyzing NMR spectra
--> Secondary path is the raw-data file recorded by the DAQ.
--> Desired columns is a list of columns that you want to MIGRATE from the
raw-data file INTO the global analysis file.
--> Shared column needs to be some form of timestamp.
"""
primary_df = fetch_df(primary_path)
print(primary_df)
primary_df[shared_column] = | pandas.to_datetime(primary_df[variablenames.vd_GA_timecol], format="%Y-%m-%d %H:%M:%S") | pandas.to_datetime |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
import time
from sklearn.model_selection import train_test_split
import string
import nltk
from nltk.corpus import stopwords
plt.style.use(style='seaborn')
#%matplotlib inline
df=pd.read_csv('all-data.csv',encoding = "ISO-8859-1")
print(df.head())
y=df['Sentiment'].values
x=df['News Headline'].values
(x_train,x_test,y_train,y_test)=train_test_split(x,y,test_size=0.4)
# Train
df1=pd.DataFrame(x_train)
df1=df1.rename(columns={0:'news'})
df2=pd.DataFrame(y_train)
df2=df2.rename(columns={0:'sentiment'})
df_train=pd.concat([df1,df2],axis=1)
print(df_train.head())
# Test
df3= | pd.DataFrame(x_test) | pandas.DataFrame |
"""Test the enrichment of the entire dataset, or specific clusters against gene ontologies associated with complexes"""
import re
import os
import pandas as pd
import numpy as np
from collections import defaultdict
from scipy import stats
from utilities.database_map_and_filter import ortholog_map, uniprot_go_genes
from utilities.statistical_tests import fischers_test, apply_oneway_anova
from loguru import logger
from GEN_Utils import FileHandling
logger.info('Import OK')
cluster_path = f'results/lysate_denaturation/clustering/clustered.xlsx'
background_path = f'results/lysate_denaturation/normalised/normalised_summary.xlsx'
ontology_path = 'results/lysate_denaturation/gene_ontology_datasets/go_term_summary.xlsx'
size_path = 'results/lysate_denaturation/gene_ontology_datasets/size_summary.xlsx'
output_folder = 'results/lysate_denaturation/go_enrichment/'
resource_folder = 'resources/bioinformatics_databases/'
if not os.path.exists(output_folder):
os.makedirs(output_folder)
# -----------------Read in standard components-----------------
# clustered data
clustered_data = pd.read_excel(f'{cluster_path}', sheet_name='summary')
clustered_data.drop([col for col in clustered_data.columns.tolist() if 'Unnamed: ' in col], axis=1, inplace=True)
# raw data as background
background_genes = pd.read_excel(f'{background_path}', sheet_name='raw')['Proteins'].unique()
# read in ontologies
ontology_genes = | pd.read_excel(f'{ontology_path}') | pandas.read_excel |
#can choose to import in global namespace
from classes import INSTINCT_process,Split_process,SplitRun_process,Unify_process,INSTINCT_userprocess
from getglobals import PARAMSET_GLOBALS
from misc import get_param_names
from .misc import file_peek,get_difftime
import hashlib
import pandas as pd
import os
from pipe_shapes import *
from .pipe_shapes import *
#custom modification of process to hash files (I use this for FormatFG and FormatGT)
#############
#this will load in the attributes that are shared by both INSTINCT processes and jobs.
class HashableFile:
def getfilehash(self):
if self.__class__.__name__=="FormatFG":
path = PARAMSET_GLOBALS['project_root']+ "lib/user/Data/FileGroups/" + self.parameters['file_groupID']
elif self.__class__.__name__=="FormatGT":
dirpath = PARAMSET_GLOBALS['project_root']+ "lib/user/Data/GroundTruth/"+self.parameters['signal_code']
path = dirpath + "/"+self.parameters['signal_code']+"_" + self.ports[0].parameters['file_groupID']
if not os.path.exists(path): #if GT file doesn't exist, create an empty file
GT = pd.DataFrame(columns = ["StartTime","EndTime","LowFreq","HighFreq","StartFile","EndFile","label","Type","SignalCode"])
#import code
#code.interact(local=locals())
os.makedirs(dirpath,exist_ok=True)
GT.to_csv(path,index=False)
def hashfile(path):
buff_size = 65536
sha1 = hashlib.sha1()
with open(path, 'rb') as f:
while True:
data = f.read(buff_size)
if not data:
break
sha1.update(data)
return sha1.hexdigest()
#####
####custom function to find the right decimation level from format FG. Usually just the port, but if it is looped, it won't be..
def find_decimation_level(obj,portnum):
if obj.ports[portnum].__class__.__name__ == "CombineExtLoop":
return obj.ports[portnum].ports[0].parameters['target_samp_rate']
else:
return obj.ports[portnum].parameters['target_samp_rate']
class SplitED(Split_process,INSTINCT_process):
outfile = 'FileGroupFormat.csv.gz'
def run(self):
inFile = self.ports[0].outfilegen()
FG_dict = file_peek(inFile,fn_type = object,fp_type = object,st_type = object,\
dur_type = 'float64',comp_type = 'gzip')
FG = pd.read_csv(inFile, dtype=FG_dict,compression='gzip')
if self.splits == 1:
FG.to_csv(self.outfilegen(),index=False,compression='gzip')
else:
row_counts = len(FG['DiffTime'])
breaks = int(row_counts/self.splits)
blist = numpy.repeat(range(0,self.splits),breaks)
bdiff = row_counts - len(blist)
extra = numpy.repeat(self.splits-1,bdiff)
flist = list(blist.tolist() + extra.tolist())
FG.loc[[x==self.split_ID-1 for x in flist]].to_csv(self.outfilegen(),index=False,compression='gzip')
class RunED(Split_process,SplitRun_process,INSTINCT_process):
#need to define an outpath that is based on splitED... default goes off of last port.
outfile = 'DETx.csv.gz'
SplitInitial=SplitED
def run(self):
if 'verbose' in self.arguments:
if self.arguments['verbose']=='y':
verbose = 'y'
else:
verbose = 'n'
else:
verbose = 'n'
#import code
#code.interact(local=locals())
#param_names grandfathered in, should just have R parse dictionaries as a standard
self.cmd_args=[PARAMSET_GLOBALS['SF_foc'] + "/" + find_decimation_level(self,0),self.outpath(),self.outpath(),\
os.path.basename(self.input().path),'1',self.arguments['cpu'],self.arguments['file_chunk_size'],verbose,\
'method1',self.parameters['methodID'] + '-' + self.parameters['methodvers'],self.param_string,get_param_names(self.parameters)] #params
self.run_cmd()
#do this manually instead of using run_cmd to be compatible with prvs method
#rework ED wrapper to work with python dict before reworking run_cmd to work with wrapper
class EventDetector(Unify_process,INSTINCT_process):
outfile = 'DETx.csv.gz'
SplitRun = RunED
def run(self):
EDdict = {'StartTime': 'float64', 'EndTime': 'float64','LowFreq': 'float64', 'HighFreq': 'float64', 'StartFile': 'category','EndFile': 'category','ProcessTag': 'category'}
dataframes = [None] * int(self.arguments['splits'])
for k in range(int(self.arguments['splits'])):
dataframes[k] = pd.read_csv(self.outpath() +'/DETx' + str(k+1) + "_" + self.arguments['splits'] + '.csv.gz',dtype=EDdict)
ED = pd.concat(dataframes,ignore_index=True)
ED['ProcessTag2']=ED.ProcessTag.str.split('_', 1).map(lambda x: x[0])
#determin PT changes
statustest=[None]*len(ED['ProcessTag'])
for n in range(len(ED['StartTime'])-1):
statustest[n]=(ED['ProcessTag'][n]==ED['ProcessTag'][n+1])
#will need a catch in here for if this situation is not present
chED= ED.loc[[x==False for x in statustest]]
statustest2=[None]*len(chED['ProcessTag'])
for n in range(len(chED['StartTime'])-1):
statustest2[n]=(chED['ProcessTag2'].values[n]==chED['ProcessTag2'].values[n+1])
chED2= chED.loc[[x==True for x in statustest2]]
indecesED = chED2.index.append(chED2.index+1)
#import code
#code.interact(local=locals())
if indecesED.empty:
EDfin = ED[['StartTime','EndTime','LowFreq','HighFreq','StartFile','EndFile']]
ED.to_csv(self.outfilegen(),index=False,compression='gzip')
else:
EDfin = ED.loc[indecesED._values]
#reduce this to just file names to pass to Energy detector (FG style)
FG_cols = ['FileName','FullPath','StartTime','Duration','DiffTime','Deployment','SegStart','SegDur']
FG_dict = {'FileName': 'string','FullPath': 'category', 'StartTime': 'string','Duration': 'float','Deployment':'string','SegStart':'float','SegDur':'float','DiffTime':'int'}
#import code
#code.interact(local=locals())
FG = pd.read_csv(self.ports[0].outpath() +'/FileGroupFormat.csv.gz', dtype=FG_dict, usecols=FG_cols)
FG = FG[FG.DiffTime.isin(EDfin['ProcessTag2'].astype('int32'))&FG.FileName.isin(EDfin['StartFile'])] #subset based on both of these: if a long difftime, will only
#take the relevant start files, but will also go shorter than two files in the case of longer segments.
#recalculate difftime based on new files included. <- metacomment: not sure why we need to do this?
FG['StartTime'] = pd.to_datetime(FG['StartTime'], format='%Y-%m-%d %H:%M:%S')
FG = get_difftime(FG)
#save FG
FG.to_csv(self.outpath() + '/EDoutCorrect.csv.gz',index=False,compression='gzip')
if 'verbose' in self.arguments:
if self.arguments['verbose']=='y':
verbose = 'y'
else:
verbose = 'n'
else:
verbose = 'n'
#run second stage of EventDetector method
self.cmd_args=[PARAMSET_GLOBALS['SF_foc'] + "/" + find_decimation_level(self,0),self.outpath(),self.outpath(),\
'EDoutCorrect.csv.gz','2',self.arguments['cpu'],self.arguments['file_chunk_size'],verbose,\
'method1',self.parameters['methodID'] + '-' + self.parameters['methodvers'],self.param_string,get_param_names(self.parameters)]
self.process_ID = self.__class__.__name__ #needs to be specified here since it's a wrapper, otherwise assumed as class name
self.run_cmd()
ED = ED.drop(columns="ProcessTag")
ED = ED.drop(columns="ProcessTag2")
EDdict2 = {'StartTime': 'float64', 'EndTime': 'float64','LowFreq': 'float64', 'HighFreq': 'float64', 'StartFile': 'category','EndFile': 'category','DiffTime': 'int'}
#now load in result,
EDpatches = pd.read_csv(self.outpath()+'/DETx_int.csv.gz',dtype=EDdict2)
PatchList = [None] * len(EDpatches['DiffTime'].unique().tolist())
for n in range(len(EDpatches['DiffTime'].unique().tolist())):
nPatch = [EDpatches['DiffTime'].unique().tolist()[n]]
EDpatchN=EDpatches.loc[EDpatches['DiffTime'].isin(nPatch),]
FGpatch = FG[FG['DiffTime']==(n+1)]
FirstFile = EDpatchN.iloc[[0]]['StartFile'].astype('string').iloc[0]
LastFile = EDpatchN.iloc[[-1]]['StartFile'].astype('string').iloc[0]
BeginRangeStart= FGpatch.iloc[0]['SegStart']
BeginRangeEnd = BeginRangeStart+FGpatch.iloc[0]['SegDur']/2
LastRangeStart= FGpatch.iloc[-1]['SegStart']
LastRangeEnd = LastRangeStart+FGpatch.iloc[-1]['SegDur']/2
EDpatchN = EDpatchN[((EDpatchN['StartTime'] > BeginRangeEnd) & (EDpatchN['StartFile'] == FirstFile)) | (EDpatchN['StartFile'] != FirstFile)]
EDpatchN = EDpatchN[((EDpatchN['StartTime'] < LastRangeEnd) & (EDpatchN['StartFile'] == LastFile)) | (EDpatchN['StartFile'] != LastFile)]
EDpatchN=EDpatchN.drop(columns="DiffTime")
ED1 = ED.copy()[(ED['StartTime'] <= BeginRangeEnd) & (ED['StartFile'] == FirstFile)] #get all before patch
ED2 = ED.copy()[(ED['StartTime'] >= LastRangeEnd) & (ED['StartFile'] == LastFile)] #get all after patch
ED3 = ED.copy()[(ED['StartFile'] != FirstFile) & (ED['StartFile'] != LastFile)]
ED = pd.concat([ED1,ED2,ED3],ignore_index=True)
EDpNfiles = pd.Series(EDpatchN['StartFile'].append(EDpatchN['EndFile']).unique()) #switched to numpy array on an unknown condition, pd.Series forces it to stay this datatype. Needs testing
FandLfile = [FirstFile,LastFile]
internalFiles = EDpNfiles[EDpNfiles.isin(FandLfile)==False]
if len(internalFiles)>0:
#subset to remove internal files from patch from ED
ED = ED[(ED.StartFile.isin(internalFiles)==False)&(ED.EndFile.isin(internalFiles)==False)]
#here, subset all the detections within EDpatchN: find any sound files that are not start and end file, and remove them from ED
#hint: isin to find files in EDpN, and isin to subset ED.
#ED = ED[(ED.StartFile.isin(EDpatchN['StartFile'])==False)
#save ED patch
PatchList[n]=EDpatchN
EDpatchsub = pd.concat(PatchList,ignore_index=True)
#combine ED and EDpatch
ED = pd.concat([EDpatchsub,ED],ignore_index=True)
ED = ED.sort_values(['StartFile','StartTime'], ascending=[True,True])
os.remove(self.outpath() + '/DETx_int.csv.gz')
os.remove(self.outpath() + '/EDoutCorrect.csv.gz')
ED.to_csv(self.outfilegen(),index=False,compression='gzip')
class SplitFE(Split_process,INSTINCT_process):
outfile = 'DETx.csv.gz'
def run(self):
inFile = self.ports[0].outfilegen()
DETdict = {'StartTime': 'float64', 'EndTime': 'float64','LowFreq': 'float64', 'HighFreq': 'float64', 'StartFile': 'category','EndFile': 'category'}
DET = | pd.read_csv(inFile, dtype=DETdict,compression='gzip') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 9 12:44:52 2019
@author: Jarvis
AQ Map fuctions libary for compter project
"""
#All the imports
#pip install folium
#pip install vincent
#pip install mpld3
import folium
from folium import plugins
#needed to get plot in popup
import vincent
import json
import datetime
import time
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import glob
#for random data generator
import random
import math
import codecs
#get AQ Fuctios
import sys
from folium.plugins import TimestampedGeoJson
#color bars
import branca.colormap as cm
#import folium
from folium import IFrame
import os
import mpld3
import csv
sys.path.append("..") #Import varaibles in run from AQ_Plot_server directory
#sys.path.append(sys.path[0][0:sys.path[0].find("AQ_run")]) #Import varaiblesif run from home directory
import variables as V #IMport the file names, you dont want to type them out
#set varables for all fuctions
#colors used for the color bar and the data plots
colors=["green","greenyellow","yellow","gold","orange","salmon","red","purple"]
#get data
def Walkdata(loc):
try:
#read the data
df=pd.read_csv(loc,header=4,error_bad_lines=False,usecols=[0,1,2,3,4,5,6,7])
#df=pd.DataFrame({'time':data['time'] 'PM2':data['pm2'],'PM10':data['pm10'],'PM1':data['pm2'], 'RH':data['RH'],'lat':data['lat'],'lon':data['lon']})
# print("Data check 1",df.head(3))
df.index=pd.to_datetime(df.index,yearfirst=True)
#for col in df.columns:
# df[col].iloc(df[df[col]=="None"].index,col)=np.nan()
df.set_index('time', inplace=True)
# print(df.index)
#print("Data check 2",df.head(3))
#read in info from csv
with open(loc) as f:
reader=csv.reader(f)
info={}
i=0
for row in reader:
i=i+1
if i<5:
# print("row",i)
# print(row)
rowinfo=rowinfo=list(filter(None,row[1:len(row)]))
print(rowinfo)
info[row[0]]=rowinfo
if i==4:
return df, info
except Exception as e:
print("Error in reading file ",loc," /n please check file")
print('Error on line {}'.format(sys.exc_info()[-1].tb_lineno))
print(type(e))
print(e.args)
# vectorized haversine function
def gendist(data):
"""
Genrate the distance between two point and add them as a new row called dist
"""
earth_radius=6371
diffs=[]
#reset indec, to get the number not time
print("gen dist")
# print(data["lat"].iloc[3])
for ind in range(0,len(data)):
# print(ind)
#if not first point and last point
try:
if ind != 0 and ind != len(data)-1:
# print(data["lat"][ind])
lat1=data["lat"].iloc[ind]
lat2=data["lat"].iloc[ind+1]
lon1=data["lon"].iloc[ind]
lon2=data["lon"].iloc[ind+1]
lat1, lon1, lat2, lon2 = np.radians([lat1, lon1, lat2, lon2])
dlat=lat2-lat1
dlon=lon2-lon1
a = np.sin((dlat)/2.0)**2 + np.cos(lat1) * np.cos(lat2) * np.sin((dlon)/2.0)**2
diff=earth_radius * 2 * np.arcsin(np.sqrt(a))*1000
else:
diff="nan"
diffs.append(diff)
except IndexError:
diff="nan"
diffs.append(diff)
except Exception as e:
print("Error in GPS distance, check columns names")
print('Error on line {}'.format(sys.exc_info()[-1].tb_lineno))
print(type(e))
print(e.args)
pass
data.insert(len(data.columns),"dist",diffs,True)
return data
def Staticsitedate(df,val,location,m,locname):
"""
Plot static data, take in the data df,
located the data in Geolocation from the file name and plot it
"""
#get date from name in the GPS data
# print(df.head(5))
#plot the data as popup
Goefol="Data//GeoLocations.csv"
# Geolocs=pd.read_csv(Goefol,header=0,encoding = 'unicode_escape',error_bad_lines=False)
with codecs.open(Goefol, "br",encoding="utf8", errors='ignore') as file:
Geolocs = pd.read_table(file, delimiter=",")
#Geolocs.set_index("Site")
# print(Geolocs)
#find the site information in the Geolocsation
for i,ind in enumerate(Geolocs["Site"]):
if ind==location:
try:
Lat=Geolocs["Lat"][i]
lon=Geolocs["lon"][i]
info={}
info["Site"]=Geolocs["Site"][i]
info["Sensor"]=Geolocs["Sensor"][i]
info["start date"]=Geolocs["start date"][i]
info["end date"]=Geolocs["end date"][i]
info["Link"]=Geolocs["Link"][i]
print("siteinfo",Lat,lon)
except:
print("Site infromation error")
pass
try:
popup=plotdataPopInfo(df,val,info,locname)
except:
print("Popup error")
popup=info["Site"]
folium.Marker(location=[Lat,lon],
popup=popup,
#get icon and color based of mean value , of the first value
icon=folium.Icon(color= genfill_color(df[val[0]].mean(),100))
).add_to(m)
print("Markger Generated")
def Stationrydata(data):
"""
Fuction: Run through GPS data files, find stationry data cuts its of the old data.
Then it run though the Stationry data to find if there are multiple statiory spots,
if so it splits the stationory data into dic elements.
"""
print("-------------------------")
print("plot Stationydata")
print("-------------------------")
#get dist data
data=gendist(data) #get the date
data=data[~data.duplicated(keep='first')]
#print("Distance", data["dist"])
Statdata=pd.DataFrame(index=data.index,columns=data.columns) #create a stationy data frame arraynumber
Statdata["StatGroup"]=0 #StaticDataGroups
save=pd.DataFrame()
# Nanlatlon=pd.DataFrame(index=data.index,columns=data.columns) #create a GPS error dataframe array
#pDataFrame(columns=data.columns)
a=0 #set an index for the save
#loop through the data
SG=0 #StatGroups
try:
for index,row in data.iterrows():
diff=row["dist"]
# print(diff)
if diff != "nan":
if a==0:
save=pd.DataFrame()
# rec="on"
if diff<5:
#dict1.update()
a=a+1
save[index]=row.T
if a>5: #if 5 stationry points in row
# print(Statdata.loc[index])
# print(row)
if a==6:
save=save.T
save.index.name="time"
#print(save)
Statdata.loc[save.index]=save
# print(Statdata.loc[save.index])
Statdata["StatGroup"]=SG
else:
row["StatGroup"]=SG
# if (Statdata.loc[index])>1:
Statdata.loc[index]=row.T
else:
SG+=1
a=0 #reset index
print("Stationary Data")
print(Statdata.head(4))
Newdata=Statdata[Statdata['dist'].notnull()]
#cut the data from old array
data=data[Statdata['dist'].isnull()]
# checks
# print("OldD",data.head(4))
# print("NewD",Newdata.head(4))
#of there is some still data
#chaeck if Still data is close to one another in time
#If it is split the data into diffrenct section
Stilldic={} #dic to add the splits
Satname="Sat" #place holdername
Satgroups=Newdata.groupby("StatGroup")
for group in Satgroups:
print(group)
print(Satname+"_"+str(group[0]))
Stilldic[Satname+"_"+str(group[0])]=group[1]
"""
satnum=1 #set number to add to satname for diffrent station data sets
saveindex=0 #set a index to deal with multiple silld
save=Newdata.index[0] #place holder to get the function working
for index,row in Newdata.iterrows():
if index != 0:
timediff=divmod((index-save).total_seconds(),60)
if timediff[1] >120: #if greater than 2 mins split
#add to still data to dic
Stilldic[Satname+str(satnum)]=pd.DataFrame(Newdata.ilox[saveindex:index])
satnum=satnum+1 #add to the satnum
#save the index for next interval
save=index
if len(Stilldic) <1: # if there only 1 sill data, then add it to the dict. This is to make future code what loop through a dic easyer
Stilldic[Satname]=Newdata
"""
print("Got GPS Stationy data")
print("Still Dic")
# print(Newdata.head(3))
except Exception as e:
print("No stationry GPS data ")
print('Error on line {}'.format(sys.exc_info()[-1].tb_lineno))
print(type(e))
print(e.args)
Stilldic="ERROR"
pass
return Stilldic
#color map
def colormap(m,index,caption):
"""
Def a color map, need the map m, an array of colors with a matching index and caption for the name.
"""
CB= cm.StepColormap(colors, vmin=0,vmax=50,index=index, caption=caption )
m.add_child(CB)
def genfill_color(val,ref):
"""
Generate colors for to fill cirles based on data values
need a color lits and index defined before the use
"""
val=val/ref
col=""
try:
if val <= 0.05:
col=colors[0]
elif (val >= 0.05 and val <0.1):
col=colors[1]
elif (val >0.1 and val <0.15):
col=colors[2]
elif (val >0.15 and val <0.20):
col=colors[3]
elif (val >= 0.20 and val <0.25):
col=colors[4]
elif (val >= 0.25 and val <0.3):
col=colors[5]
elif(val >=0.3 and val <0.4 ):
col=colors[6]
elif val>0.4:
col=colors[7]
return col
except Exception as e:
print("Error in genfill_color")
print('Error on line {}'.format(sys.exc_info()[-1].tb_lineno))
print(type(e))
print(e.args)
pass
def GenPMCircles(data,val,group,outlinecolor):
"""
Generate walk map data as circles data, need data, the varaibles and the map group i.e for diffrent dates
And an outlinecolor for
"""
data=data.dropna()
try:
for index,row in data.iterrows():
ref=100
folium.Circle(location=[row['lat'],row['lon']],
radius=8,
popup=("Time <br>"+str(index)+" "+val+"+:"+str(round(row[val],2))+"ug/m^2"+" Temp:"+str(round(row['DHT-T'],2))+"C"+" RH:"+str(round(row['DHT-RH'],1))+"(%)"),
fill_color=genfill_color(row[val],ref),
color=genfill_color(row[val],ref),
fill_opacity=0.8,
opacity=0.9,
).add_to(group)
except Exception as e:
print("-----------Error in GPS Data cirlce generation----------")
print(val)
print('Error on line {}'.format(sys.exc_info()[-1].tb_lineno))
print(type(e))
print(e.args)
pass
def DataCircle(df,lat, lon,group,val):
"""
Data circle fuction but this time iver with a data plot of mean data
"""
#generate data cirles with plots of the data or mean depending on length
#print(df)
try:
if len(df)>1 and len(df)<10: #just plot mean data, too small for a time series
popup="Mean <br> Time <br>"+str(min(df.index))+"_"+str(max(df.index))+"PM2.5:"+str(round(df[val[0]],2))+"ug/m^2"+" PM10:"+str(round(df[val[1]],2))+"ug/m^2" +" Temp:"+str(round(df['DHT-T'],2))+"C"+" RH:"+str(round(df['DHT-RH'],1))+"(%)"
if "BinCount" in df.colomns:
ref=1000
popup=popup+"Partical Count:"+str(round(df['BinCount'],2))
fill_color=genfill_color(df[val[0]],ref)
else:
fill_color=genfill_color(df[val[0]],100)
df=df.mean()
folium.Circle(location=[df['lat'],df['lon']],
radius=8,
fill_color=fill_color,
color="grey",
fill_opacity=0.8,
opacity=0.9,
).add_to(group)
else:
#if more than 10 data points, plot the time series of the data
folium.Circle(location=[lat,lon],
popup=plotdataPop(df,val),
radius=10, fill=True,color='black'
).add_to(group)
except Exception as e:
print("Error in GPS Data cirlce generation")
print('Error on line {}'.format(sys.exc_info()[-1].tb_lineno))
print(type(e))
print(e.args)
pass
def DataMarker(df,val,lat, lon,group):
folium.Marker(location=[lat,lon],
popup=plotdataPop(df,val),
icon=folium.Icon(color= genfill_color(df[val[0]].mean(),100))
).add_to(group)
def DataMarkerInfo(df,val,lat, lon,group,info,locname):
#2019 data makrder #
folium.Marker(location=[lat,lon],
popup=plotdataPopInfo(df,[val],info,locname),
icon=folium.Icon(color= genfill_color(df[val].mean(),100))
).add_to(group)
def plotdataPopInfo(df,vals,info,locname):
"""
Popup data ploter with information columns
Takes in the data desired to plot,
the varaibles and the infomration about the location and sensor for the popup
returned the popup
"""
print("-------------------------")
print("plot Popup")
print("-------------------------")
#def data beased on values
figname=""
df.index=pd.to_datetime(df.index)
try:
#df=df.copy()
width=600#len(df.index)
if width <500: #if there not much data, still make is big enoguh
width=400
#if more values are wanted to be plotted, add fadding
alpha=1
if len(vals) >1:
alpha=0.8
fig, ax = plt.subplots(figsize=(8,4))
ax = df[vals].plot(ax=ax, legend=True, alpha=alpha)
ax.set_ylabel('Mass concentration (uu g/m^3)')
ax.set_xlabel('')
# ax.xaxis.set_major_locator(mdates.MinuteLocator(interval=60*3)) #to get a tick every 6 hours
#ax.format_xdata = plt.DateFormatter('%H:%M')
ax.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M')) #optional formatting
# ax.set_title(info["Site"])
ax.grid()
#create a htlmfile for the plot
date= | pd.to_datetime(df.index[0]) | pandas.to_datetime |
import numpy as np
import pandas as pd
from shapely.geometry import Point, Polygon, GeometryCollection
import geopandas
from geopandas import GeoDataFrame, GeoSeries, base, read_file, sjoin
from pandas.util.testing import assert_frame_equal
import pytest
@pytest.fixture()
def dfs(request):
polys1 = GeoSeries(
[
Polygon([(0, 0), (5, 0), (5, 5), (0, 5)]),
Polygon([(5, 5), (6, 5), (6, 6), (5, 6)]),
Polygon([(6, 0), (9, 0), (9, 3), (6, 3)]),
]
)
polys2 = GeoSeries(
[
Polygon([(1, 1), (4, 1), (4, 4), (1, 4)]),
Polygon([(4, 4), (7, 4), (7, 7), (4, 7)]),
Polygon([(7, 7), (10, 7), (10, 10), (7, 10)]),
]
)
df1 = GeoDataFrame({"geometry": polys1, "df1": [0, 1, 2]})
df2 = GeoDataFrame({"geometry": polys2, "df2": [3, 4, 5]})
if request.param == "string-index":
df1.index = ["a", "b", "c"]
df2.index = ["d", "e", "f"]
if request.param == "named-index":
df1.index.name = "df1_ix"
df2.index.name = "df2_ix"
if request.param == "multi-index":
i1 = ["a", "b", "c"]
i2 = ["d", "e", "f"]
df1 = df1.set_index([i1, i2])
df2 = df2.set_index([i2, i1])
if request.param == "named-multi-index":
i1 = ["a", "b", "c"]
i2 = ["d", "e", "f"]
df1 = df1.set_index([i1, i2])
df2 = df2.set_index([i2, i1])
df1.index.names = ["df1_ix1", "df1_ix2"]
df2.index.names = ["df2_ix1", "df2_ix2"]
# construction expected frames
expected = {}
part1 = df1.copy().reset_index().rename(columns={"index": "index_left"})
part2 = (
df2.copy()
.iloc[[0, 1, 1, 2]]
.reset_index()
.rename(columns={"index": "index_right"})
)
part1["_merge"] = [0, 1, 2]
part2["_merge"] = [0, 0, 1, 3]
exp = pd.merge(part1, part2, on="_merge", how="outer")
expected["intersects"] = exp.drop("_merge", axis=1).copy()
part1 = df1.copy().reset_index().rename(columns={"index": "index_left"})
part2 = df2.copy().reset_index().rename(columns={"index": "index_right"})
part1["_merge"] = [0, 1, 2]
part2["_merge"] = [0, 3, 3]
exp = pd.merge(part1, part2, on="_merge", how="outer")
expected["contains"] = exp.drop("_merge", axis=1).copy()
part1["_merge"] = [0, 1, 2]
part2["_merge"] = [3, 1, 3]
exp = pd.merge(part1, part2, on="_merge", how="outer")
expected["within"] = exp.drop("_merge", axis=1).copy()
return [request.param, df1, df2, expected]
@pytest.mark.skipif(not base.HAS_SINDEX, reason="Rtree absent, skipping")
class TestSpatialJoin:
@pytest.mark.parametrize("dfs", ["default-index", "string-index"], indirect=True)
def test_crs_mismatch(self, dfs):
index, df1, df2, expected = dfs
df1.crs = {"init": "epsg:4326", "no_defs": True}
with pytest.warns(UserWarning):
sjoin(df1, df2)
@pytest.mark.parametrize(
"dfs",
[
"default-index",
"string-index",
"named-index",
"multi-index",
"named-multi-index",
],
indirect=True,
)
@pytest.mark.parametrize("op", ["intersects", "contains", "within"])
def test_inner(self, op, dfs):
index, df1, df2, expected = dfs
res = sjoin(df1, df2, how="inner", op=op)
exp = expected[op].dropna().copy()
exp = exp.drop("geometry_y", axis=1).rename(columns={"geometry_x": "geometry"})
exp[["df1", "df2"]] = exp[["df1", "df2"]].astype("int64")
if index == "default-index":
exp[["index_left", "index_right"]] = exp[
["index_left", "index_right"]
].astype("int64")
if index == "named-index":
exp[["df1_ix", "df2_ix"]] = exp[["df1_ix", "df2_ix"]].astype("int64")
exp = exp.set_index("df1_ix").rename(columns={"df2_ix": "index_right"})
if index in ["default-index", "string-index"]:
exp = exp.set_index("index_left")
exp.index.name = None
if index == "multi-index":
exp = exp.set_index(["level_0_x", "level_1_x"]).rename(
columns={"level_0_y": "index_right0", "level_1_y": "index_right1"}
)
exp.index.names = df1.index.names
if index == "named-multi-index":
exp = exp.set_index(["df1_ix1", "df1_ix2"]).rename(
columns={"df2_ix1": "index_right0", "df2_ix2": "index_right1"}
)
exp.index.names = df1.index.names
assert_frame_equal(res, exp)
@pytest.mark.parametrize(
"dfs",
[
"default-index",
"string-index",
"named-index",
"multi-index",
"named-multi-index",
],
indirect=True,
)
@pytest.mark.parametrize("op", ["intersects", "contains", "within"])
def test_left(self, op, dfs):
index, df1, df2, expected = dfs
res = sjoin(df1, df2, how="left", op=op)
if index in ["default-index", "string-index"]:
exp = expected[op].dropna(subset=["index_left"]).copy()
elif index == "named-index":
exp = expected[op].dropna(subset=["df1_ix"]).copy()
elif index == "multi-index":
exp = expected[op].dropna(subset=["level_0_x"]).copy()
elif index == "named-multi-index":
exp = expected[op].dropna(subset=["df1_ix1"]).copy()
exp = exp.drop("geometry_y", axis=1).rename(columns={"geometry_x": "geometry"})
exp["df1"] = exp["df1"].astype("int64")
if index == "default-index":
exp["index_left"] = exp["index_left"].astype("int64")
# TODO: in result the dtype is object
res["index_right"] = res["index_right"].astype(float)
elif index == "named-index":
exp[["df1_ix"]] = exp[["df1_ix"]].astype("int64")
exp = exp.set_index("df1_ix").rename(columns={"df2_ix": "index_right"})
if index in ["default-index", "string-index"]:
exp = exp.set_index("index_left")
exp.index.name = None
if index == "multi-index":
exp = exp.set_index(["level_0_x", "level_1_x"]).rename(
columns={"level_0_y": "index_right0", "level_1_y": "index_right1"}
)
exp.index.names = df1.index.names
if index == "named-multi-index":
exp = exp.set_index(["df1_ix1", "df1_ix2"]).rename(
columns={"df2_ix1": "index_right0", "df2_ix2": "index_right1"}
)
exp.index.names = df1.index.names
assert_frame_equal(res, exp)
def test_empty_join(self):
# Check empty joins
polygons = geopandas.GeoDataFrame(
{
"col2": [1, 2],
"geometry": [
Polygon([(0, 0), (1, 0), (1, 1), (0, 1)]),
Polygon([(1, 0), (2, 0), (2, 1), (1, 1)]),
],
}
)
not_in = geopandas.GeoDataFrame({"col1": [1], "geometry": [Point(-0.5, 0.5)]})
empty = sjoin(not_in, polygons, how="left", op="intersects")
assert empty.index_right.isnull().all()
empty = sjoin(not_in, polygons, how="right", op="intersects")
assert empty.index_left.isnull().all()
empty = sjoin(not_in, polygons, how="inner", op="intersects")
assert empty.empty
@pytest.mark.parametrize("dfs", ["default-index", "string-index"], indirect=True)
def test_sjoin_invalid_args(self, dfs):
index, df1, df2, expected = dfs
with pytest.raises(ValueError, match="'left_df' should be GeoDataFrame"):
sjoin(df1.geometry, df2)
with pytest.raises(ValueError, match="'right_df' should be GeoDataFrame"):
sjoin(df1, df2.geometry)
@pytest.mark.parametrize(
"dfs",
[
"default-index",
"string-index",
"named-index",
"multi-index",
"named-multi-index",
],
indirect=True,
)
@pytest.mark.parametrize("op", ["intersects", "contains", "within"])
def test_right(self, op, dfs):
index, df1, df2, expected = dfs
res = sjoin(df1, df2, how="right", op=op)
if index in ["default-index", "string-index"]:
exp = expected[op].dropna(subset=["index_right"]).copy()
elif index == "named-index":
exp = expected[op].dropna(subset=["df2_ix"]).copy()
elif index == "multi-index":
exp = expected[op].dropna(subset=["level_0_y"]).copy()
elif index == "named-multi-index":
exp = expected[op].dropna(subset=["df2_ix1"]).copy()
exp = exp.drop("geometry_x", axis=1).rename(columns={"geometry_y": "geometry"})
exp["df2"] = exp["df2"].astype("int64")
if index == "default-index":
exp["index_right"] = exp["index_right"].astype("int64")
res["index_left"] = res["index_left"].astype(float)
elif index == "named-index":
exp[["df2_ix"]] = exp[["df2_ix"]].astype("int64")
exp = exp.set_index("df2_ix").rename(columns={"df1_ix": "index_left"})
if index in ["default-index", "string-index"]:
exp = exp.set_index("index_right")
exp = exp.reindex(columns=res.columns)
exp.index.name = None
if index == "multi-index":
exp = exp.set_index(["level_0_y", "level_1_y"]).rename(
columns={"level_0_x": "index_left0", "level_1_x": "index_left1"}
)
exp.index.names = df2.index.names
if index == "named-multi-index":
exp = exp.set_index(["df2_ix1", "df2_ix2"]).rename(
columns={"df1_ix1": "index_left0", "df1_ix2": "index_left1"}
)
exp.index.names = df2.index.names
assert_frame_equal(res, exp, check_index_type=False)
@pytest.mark.skipif(not base.HAS_SINDEX, reason="Rtree absent, skipping")
class TestSpatialJoinNYBB:
def setup_method(self):
nybb_filename = geopandas.datasets.get_path("nybb")
self.polydf = read_file(nybb_filename)
self.crs = self.polydf.crs
N = 20
b = [int(x) for x in self.polydf.total_bounds]
self.pointdf = GeoDataFrame(
[
{"geometry": Point(x, y), "pointattr1": x + y, "pointattr2": x - y}
for x, y in zip(
range(b[0], b[2], int((b[2] - b[0]) / N)),
range(b[1], b[3], int((b[3] - b[1]) / N)),
)
],
crs=self.crs,
)
def test_geometry_name(self):
# test sjoin is working with other geometry name
polydf_original_geom_name = self.polydf.geometry.name
self.polydf = self.polydf.rename(columns={"geometry": "new_geom"}).set_geometry(
"new_geom"
)
assert polydf_original_geom_name != self.polydf.geometry.name
res = sjoin(self.polydf, self.pointdf, how="left")
assert self.polydf.geometry.name == res.geometry.name
def test_sjoin_left(self):
df = sjoin(self.pointdf, self.polydf, how="left")
assert df.shape == (21, 8)
for i, row in df.iterrows():
assert row.geometry.type == "Point"
assert "pointattr1" in df.columns
assert "BoroCode" in df.columns
def test_sjoin_right(self):
# the inverse of left
df = sjoin(self.pointdf, self.polydf, how="right")
df2 = sjoin(self.polydf, self.pointdf, how="left")
assert df.shape == (12, 8)
assert df.shape == df2.shape
for i, row in df.iterrows():
assert row.geometry.type == "MultiPolygon"
for i, row in df2.iterrows():
assert row.geometry.type == "MultiPolygon"
def test_sjoin_inner(self):
df = sjoin(self.pointdf, self.polydf, how="inner")
assert df.shape == (11, 8)
def test_sjoin_op(self):
# points within polygons
df = sjoin(self.pointdf, self.polydf, how="left", op="within")
assert df.shape == (21, 8)
assert df.loc[1]["BoroName"] == "Staten Island"
# points contain polygons? never happens so we should have nulls
df = sjoin(self.pointdf, self.polydf, how="left", op="contains")
assert df.shape == (21, 8)
assert np.isnan(df.loc[1]["Shape_Area"])
def test_sjoin_bad_op(self):
# AttributeError: 'Point' object has no attribute 'spandex'
with pytest.raises(ValueError):
sjoin(self.pointdf, self.polydf, how="left", op="spandex")
def test_sjoin_duplicate_column_name(self):
pointdf2 = self.pointdf.rename(columns={"pointattr1": "Shape_Area"})
df = sjoin(pointdf2, self.polydf, how="left")
assert "Shape_Area_left" in df.columns
assert "Shape_Area_right" in df.columns
@pytest.mark.parametrize("how", ["left", "right", "inner"])
def test_sjoin_named_index(self, how):
# original index names should be unchanged
pointdf2 = self.pointdf.copy()
pointdf2.index.name = "pointid"
polydf = self.polydf.copy()
polydf.index.name = "polyid"
res = sjoin(pointdf2, polydf, how=how)
assert pointdf2.index.name == "pointid"
assert polydf.index.name == "polyid"
# original index name should pass through to result
if how == "right":
assert res.index.name == "polyid"
else: # how == "left", how == "inner"
assert res.index.name == "pointid"
def test_sjoin_values(self):
# GH190
self.polydf.index = [1, 3, 4, 5, 6]
df = sjoin(self.pointdf, self.polydf, how="left")
assert df.shape == (21, 8)
df = sjoin(self.polydf, self.pointdf, how="left")
assert df.shape == (12, 8)
@pytest.mark.xfail
def test_no_overlapping_geometry(self):
# Note: these tests are for correctly returning GeoDataFrame
# when result of the join is empty
df_inner = sjoin(self.pointdf.iloc[17:], self.polydf, how="inner")
df_left = sjoin(self.pointdf.iloc[17:], self.polydf, how="left")
df_right = sjoin(self.pointdf.iloc[17:], self.polydf, how="right")
expected_inner_df = pd.concat(
[
self.pointdf.iloc[:0],
| pd.Series(name="index_right", dtype="int64") | pandas.Series |
import argparse
import os
import logging
from netCDF4 import Dataset
import numpy as np
import pandas as pd
def nc2csv_obs_and_M(src_file_path, dst_dir):
with Dataset(src_file_path) as nc:
if not os.path.exists(dst_dir):
os.mkdir(dst_dir)
stations = nc.variables['station'][:]
date = nc.variables['date'][:]
id_list = []
for i in range(len(date)):
for j in range(37):
id = str(date[i])[:8] + '_' + '{:02d}'.format(j)
id_list.append(id)
ID = | pd.Series(data=id_list, name='Time') | pandas.Series |
from __future__ import absolute_import, division, print_function
import os
import numpy as np
import pandas as pd
import nibabel as nib
from scipy.stats import zscore, gaussian_kde
from sklearn.decomposition import PCA
from matplotlib import pyplot as plt
from matplotlib.cm import ScalarMappable
from matplotlib.colors import Normalize
# Matoplotlib parameters for saving vector figures properly
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
plt.rcParams['svg.fonttype'] = 'none'
__all__ = ["pearsonr_2d", "get_axis_coords", "Dataset"]
# Small value added to some denominators to avoid zero division
EPSILON = 1e-9
def pearsonr_2d(A, B):
"""Calculates row-wise Pearson's correlation between 2 2d-arrays
Parameters
----------
A : 2d-array
shape N x T
B : 2d-array
shape M x T
Returns
-------
R : 2d-array
N x M shaped correlation matrix between all row combinations of A and B
"""
# Check if the 2 input arrays are 2d and have the same column number T
if (A.ndim != 2) or (B.ndim) != 2:
raise ValueError('A and B must be 2d numpy arrays.')
if A.shape[1] != B.shape[1]:
raise ValueError('A and B arrays must have the same shape.')
# Subtract row-wise mean from input arrays
A_mA = A - A.mean(1)[:, None]
B_mB = B - B.mean(1)[:, None]
# Sum of squares across rows
ssA = (A_mA ** 2).sum(1)
ssB = (B_mB ** 2).sum(1)
# Finally get and return correlation coefficient
numerator = np.dot(A_mA, B_mB.T)
denominator = np.sqrt(np.dot(ssA[:, None], ssB[None])) + EPSILON
return numerator / denominator
def get_axis_coords(fig, ax):
"""Gets various coordinates of an axis
within the figure space.
Parameters
----------
fig : matplotlib figure object
ax : matplotlib axis object
Returns
-------
coords : dictionary
Contains the various coordinates:
xmin, xmax, ymin, ymax, W (width), H (height),
xcen (x center), ycen (ycenter)
"""
try:
size_x, size_y = fig.get_size_inches() * fig.dpi
except ValueError:
print('fig must be a matplotlib figure object')
try:
box = ax.bbox
except ValueError:
print('ax must be a matplotlib axis object')
xmin, xmax = box.xmin, box.xmax
ymin, ymax = box.ymin, box.ymax
x0 = xmin / size_x
x1 = xmax / size_x
y0 = ymin / size_y
y1 = ymax / size_y
width = x1 - x0
height = y1 - y0
xc = x0 + width / 2
yc = y0 + height / 2
coords = {'xmin': x0, 'xmax': x1,
'ymin': y0, 'ymax': y1,
'W': width, 'H': height,
'xcen': xc, 'ycen': yc}
return coords
class Dataset(object):
"""Class for generating carpet plot from fMRI data and fitting PCA to it"""
def __init__(self, fmri_file, mask_file, output_dir):
""" Initialize a Dataset object and import data.
Parameters
----------
fmri_file : str
Path to 4d (3d + time) functional MRI data in NIFTI format.
mask_file : str
Path to 3d mask in NIFTI format (e.g. cortical mask).
Must have same coordinate space and data matrix as :fmri:
output_dir : str
Path to folder where results will be saved.
If it doesn't exist, it's created.
"""
self.fmri_file = fmri_file
self.mask_file = mask_file
# Create output directory if it doesn't exist
if not os.path.isdir(output_dir):
try:
os.mkdir(output_dir)
except IOError:
print("Could not create 'output_dir'")
self.output_dir = output_dir
print("\nInitialized Dataset object:")
print(f"\tfMRI file: {fmri_file}")
print(f"\tMask file: {mask_file}")
print(f"\tOutput directory: {output_dir}")
def import_data(self):
""" Loads fMRI and mask data using nibabel.
"""
print("Reading data...")
# Check if input files exist and try importing them with nibabel
if os.path.isfile(self.fmri_file):
try:
fmri_nifti = nib.load(self.fmri_file)
except IOError:
print(f"Could not load {self.fmri_file} using nibabel.")
print("Make sure it's a valid NIFTI file.")
else:
print(f"Could not find {self.fmri_file} file.")
if os.path.isfile(self.mask_file):
try:
mask_nifti = nib.load(self.mask_file)
except IOError:
print(f"Could not load {self.mask_file} using nibabel.")
print("Make sure it's a valid NIFTI file.")
else:
print(f"Could not find {self.mask_file} file.")
# Ensure that data dimensions are correct
data = fmri_nifti.get_fdata()
mask = mask_nifti.get_fdata()
print(f"\tfMRI data read: dimensions {data.shape}")
print(f"\tMask read: dimensions {mask.shape}")
if len(data.shape) != 4:
raise ValueError('fMRI must be 4-dimensional!')
if len(mask.shape) != 3:
raise ValueError('Mask must be 3-dimensional!')
if data.shape[:3] != mask.shape:
raise ValueError('fMRI and mask must be in the same space!')
# read data dimensions, header, and affine
self.x, self.y, self.z, self.t = data.shape
self.header = fmri_nifti.header
self.affine = fmri_nifti.affine
# store data and mask variables as object attributes
self.data = data
self.mask = mask
return
def get_carpet(self, tSNR_thresh=15.0,
reorder_carpet=True, save_carpet=False):
""" Makes a carpet matrix from fMRI data.
A carpet is a 2d matrix shaped voxels x time which contains
the normalized (z-score) BOLD-fMRI signal from within a mask
Parameters
----------
tSNR_thresh : float or None
Voxels with tSNR values below this threshold will be excluded.
To deactivate set to None.
Default: 15.0
reorder_carpet : boolean
Whether to reorder carpet voxels according to their (decreasing)
correlation with the global (mean across voxels) signal
Default: True
save_carpet : boolean
Whether to save the carpet matrix in the output directory.
The file might be large (possibly > 100MB depending on
fMRI data and mask size).
Default: False
"""
# compute fMRI data mean, std, and tSNR across time
data_mean = self.data.mean(axis=-1, keepdims=True)
data_std = self.data.std(axis=-1, keepdims=True)
data_tsnr = data_mean / (data_std + EPSILON)
# Mask fMRI data array with 'mask'
# Also mask voxels below tSNR threshold (if given)
mask = self.mask < 0.5
mask_4d = np.repeat(mask[:, :, :, np.newaxis], self.t, axis=3)
tsnr_mask_4d = np.zeros(mask_4d.shape, dtype=bool)
if tSNR_thresh is not None:
tsnr_mask = data_tsnr.squeeze() < tSNR_thresh
tsnr_mask_4d = np.repeat(tsnr_mask[:, :, :, np.newaxis],
self.t, axis=3)
data_masked = np.ma.masked_where(mask_4d | tsnr_mask_4d, self.data)
# Reshape data in 2-d (voxels x time)
data_2d = data_masked.reshape((-1, self.t))
print(f"fMRI data reshaped to voxels x time {data_2d.shape}.")
# Get indices for non-masked rows (voxels)
indices_valid = np.where(np.any(~np.ma.getmask(data_2d), axis=1))[0]
print(f"{len(indices_valid)} voxels retained after masking.")
# Keep only valid rows in carpet matrix
carpet = data_2d[indices_valid, :].data
print(f"Carpet matrix created with shape {carpet.shape}.")
# Normalize carpet (z-score)
carpet = zscore(carpet, axis=1)
print("Carpet normalized to zero-mean unit-variance.")
# Re-order carpet plot based on correlation with the global signal
if reorder_carpet:
gs = np.mean(carpet, axis=0)
gs_corr = pearsonr_2d(carpet, gs.reshape((1, self.t))).flatten()
sort_index = [int(i) for i in np.flip(np.argsort(gs_corr))]
carpet = carpet[sort_index, :]
print('Carpet reordered.')
# Save carpet to npy file
if save_carpet:
np.save(os.path.join(self.output_dir, 'carpet.npy'), carpet)
print("Carpet saved as 'carpet.npy'.")
self.carpet = carpet
return
def fit_pca2carpet(self, save_pca_scores=False):
""" Fits PCA to carpet matrix and saves the principal
componens (PCs), the explained variance ratios,
and optionally the PCA scores (PCA-tranformed carpet)
Parameters
----------
save_pca_scores : boolean
Whether to save the PCA scores (transformed carpet)
in the output directory. The file might be large
(possibly > 100MB depending on fMRI data and mask size).
Default: False
"""
# Fit PCA
model = PCA(whiten=True)
pca_scores = model.fit_transform(self.carpet)
self.pca_comps = model.components_
self.expl_var = model.explained_variance_ratio_
# Save results to npy files
np.save(os.path.join(self.output_dir, 'PCs.npy'),
self.pca_comps)
np.save(os.path.join(self.output_dir,
'PCA_expl_var.npy'),
self.expl_var)
if save_pca_scores:
np.save(os.path.join(self.output_dir, 'PCA_scores.npy'),
pca_scores)
print("PCA fit to carpet and results saved.")
return
def correlate_with_carpet(self, ncomp=5, flip_sign=True):
""" Correlates the first ncomp principal components (PCs)
with all carpet voxel time-series. Saves the correlation matrix.
Parameters
----------
ncomp : int
Number of PCA components to retain. These first PCs (fPCs)
are correlated with all carpet voxels.
Default: 5
flip_sign : boolean
If True, an fPC (and its correlation values) will be sign-flipped
when the median of its original correlation with carpet voxels is
negative. This enforces the sign of the fPC to match the sign of
the BOLD signal activity for most voxels. The sign-flipped
fPCs are only used for downstream analysis and visualization
(the saved PCA components and scores retain the original sign).
Default: True
"""
# Assert that ncomp can be taken as integer
try:
self.ncomp = int(ncomp)
except ValueError:
print("'ncomp' must be an integer!")
# Pass first ncomp PCs (fPCs) to pandas dataframe and save as csv
comp_names = ['PC' + str(i + 1) for i in range(self.ncomp)]
fPCs = pd.DataFrame(data=self.pca_comps.T[:, :self.ncomp],
columns=comp_names)
fPCs.to_csv(os.path.join(self.output_dir, 'fPCs.csv'), index=False)
# Correlate fPCs with carpet matrix
fPC_carpet_R = pearsonr_2d(self.carpet, fPCs.values.T)
# Save correlation matrix (voxels x ncom) as npy
np.save(os.path.join(self.output_dir, 'fPCs_carpet_corr.npy'),
fPC_carpet_R)
print(f"First {ncomp} PCs correlated with carpet.")
# Construct table reporting various metrics for each fPC
report = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
from datetime import datetime
from multiprocessing import Pool
from functools import partial
from plots import *
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
'''
Notice: This computer software was prepared by Battelle Memorial Institute, hereinafter the Contractor, under Contract
No. DE-AC05-76RL01830 with the Department of Energy (DOE). All rights in the computer software are reserved by DOE on
behalf of the United States Government and the Contractor as provided in the Contract. You are authorized to use this
computer software for Governmental purposes but it is not to be released or distributed to the public. NEITHER THE
GOVERNMENT NOR THE CONTRACTOR MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES ANY LIABILITY FOR THE USE OF THIS
SOFTWARE. This notice including this sentence must appear on any copies of this computer software.
'''
'''
This class implements repo centric methods.
These metrics assume that the data is in the order id,created_at,type,actor.id,repo.id
'''
'''
This method returns the distributon for the diffusion delay.
Question #1
Inputs: DataFrame - Data
eventType - A list of events to filter data on
unit - Time unit for time differences, e.g. "s","d","h"
metadata_file - CSV file with repo creation times. Otherwise use first repo observation as proxy for creation time.
Output: A list (array) of deltas in days
'''
def getRepoDiffusionDelay(df,eventType=None,unit='h',metadata_file = '', plot=False, saveData=False):
if metadata_file != '':
repo_metadata = pd.read_csv(metadata_file)
repo_metadata = repo_metadata[['full_name_h','created_at']]
repo_metadata['created_at'] = pd.to_datetime(repo_metadata['created_at'])
#Standardize Time and Sort Dataframe
df.columns = ['time','event','user','repo']
#Checks for specific event type, uses both Fork and WatchEvent
if eventType is not None:
df = df[df.event.isin(eventType)]
df['time'] = pd.to_datetime(df['time'])
df = df.sort_values(by='time')
if metadata_file != '':
df = df.merge(repo_metadata,left_on='repo',right_on='full_name_h',how='left')
df = df[['repo','created_at','time']].dropna()
df['delta'] = (df['time']-df['created_at']).apply(lambda x: int(x / np.timedelta64(1, unit)))
else:
#Find difference between event time and "creation time" of repo
#Creation time is first seen event
creation_day = df['time'].min()
df['delta'] = (df['time']-creation_day).apply(lambda x: int(x / np.timedelta64(1, unit)))
df = df.iloc[1:]
delta = df['delta'].values
if plot==False:
return delta
##############
## Plotting ##
##############
if eventType is not None:
eventList = []
for ele in eventType:
eventList.append(ele[:-5])
eventType = '/'.join(eventList)
else:
eventType = 'All'
unit_labels = {'s':'Seconds',
'h':'Hours',
'd':'Days'}
##To Save or not
if saveData != False:
plot_histogram(delta,unit_labels[unit] + ' Between '+eventType+' Event and Creation Event','Number of Events','Diffusion Delay',loc=saveData + '_histogram.png')
##plotting line graph
plot_line_graph(delta,'Event Number','Delta between '+eventType+' Event and Creation','Diffusion Delay',labels=eventType,loc=saveData + '_linegraph.png')
else:
print(plot_histogram(delta,unit_labels[unit] + ' Between '+eventType+' Event and Creation Event','Number of Events','Diffusion Delay',loc=saveData))
##plotting line graph
print(plot_line_graph(delta,'Event Number','Delta between '+eventType+' Event and Creation','Diffusion Delay',labels=eventType,loc=saveData))
return delta
'''
This method returns the growth of a repo over time.
Question #2
Input: df - Dataframe of all data for a repo
cumSum - This is a boolean that indicates if the dataframe should be cumuluative over time.
output - A dataframe that describes the repo growth. Indexed on time.
'''
def getRepoGrowth(df, cumSum=False, plot=False, saveData=False):
df.columns = ['time', 'event','user', 'repo']
df['id'] = df.index
df['time'] = pd.to_datetime(df['time'])
df = df.sort_values(by='time')
df.set_index('time', inplace=True)
df['hour'] = df.index.hour
df['day'] = df.index.day
df['month'] = df.index.month
df['year'] = df.index.year
#get daily event counts
p = df[['year', 'month', 'day', 'id']].groupby(['year', 'month', 'day']).count()
p = pd.DataFrame(p).reset_index()
#get cumulative sum of daily event counts
if cumSum == True:
p['id'] = p.cumsum(axis=0)['id']
p.columns = ['year', 'month', 'day', 'value']
p['date'] = p.apply(lambda x: datetime.strptime("{0} {1} {2}".format(x['year'], x['month'], x['day']), "%Y %m %d"), axis=1)
p['date'] = pd.to_datetime(p['date'].dt.strftime('%Y-%m-%d'))
p = p.set_index(p['date'])
del p['year']
del p['month']
del p['day']
del p['date']
p = p.reset_index()
if plot== False:
return p
##############
## Plotting ##
##############
cumTitle = ''
if cumSum:
cumTitle = 'Cumulative Sum of '
if saveData != False:
plot_time_series(p,'Time','Total Number of Events', cumTitle + 'Events Over Time', loc=saveData+'_time_series_cumsum'+str(cumSum)+'.png')
#To mimic PNNL Graph, run with CumSum as False
plot_histogram(p['value'].values,'Events Per Day',cumTitle + 'Total Number of Days','Distribution Over Daily Event Counts', loc=saveData + 'histogram_cumsum' +str(cumSum)+'.png')
else:
print(plot_time_series(p,'Time','Total Number of Events',cumTitle + 'Events Over Time'))
#To mimic PNNL Graph, run with CumSum as False
print(plot_histogram(p['value'].values,'Events Per Day',cumTitle + 'Total Number of Days','Distribution Over Daily Event Counts'))
return p
'''
This method returns the the number of events on a repo before it "dies" (deleted or no activity)
Question #2
Input - Dataframe of a repo
Output - Number of events before death
'''
def getLifetimeDepth(df):
df.columns = ['time','event','user','repo']
df['time'] = pd.to_datetime(df['time'])
df = df.sort_values(by='time')
return len(df)
'''
Time from creation to "death" of repo (deleted or no activity)
Question #2
Input - Dataframe of a repo
Output - Time from creation to "death" (default is days)
'''
def getLifetimeTime(df):
df.columns = ['time', 'event', 'user', 'repo']
df['time'] = | pd.to_datetime(df['time']) | pandas.to_datetime |
import pandas as pd
import numpy as np
import random
from scipy.optimize import minimize
import networkx as nx
import math
from gurobipy import * # Liscence needed, free academic liscence available at https://www.gurobi.com/
# =====================================#
# Component Based Event Simulation
# Stochastic Power Network Failure Under Extreme Weather Events
# =====================================#
#
# Modeling Components:
#
# Weather Event Generation:
# Simulates maximum windspeed exerted on each transmission line of network
#
# Fragility Model:
# Simulates breakage for each transmission line of the network
# # Based on fragility of transmission towers reported in:
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>. Power System Resilience to
# Extreme Weather: Fragility Modelling, Probabilistic Impact Assessment, and Adaptation
# Measures. IEEE Trans Power Syst 2017 32:1-1. doi:10.1109/TPWRS.2016.2641463.
# # and further available from:
# Bennett, <NAME>.; DeCarolis, <NAME>.; Clarens, <NAME>., 2020, "Model and data for
# "Extending energy system modelling to include extreme weather risks and application to hurricane
# events in Puerto Rico"", https://doi.org/10.18130/V3/QB0NPX, University of Virginia Dataverse, V1
#
# Stochastic Network Breakage:
# Simulates breakages of network links
#
# Power Flow Model
# Simulates power flow through any given network
#
# Social Vulnerability Model
# Estimates impact of network loss on social vulnerability based on network power loss
#
#
# Special thanks to <NAME> for contributions to this code
#=====================================#
# =====================================#
# INFER DISTANCES
# =====================================#
def distances(nodes_file ='NetworkNodes.csv', links_file = 'NetworkLinks.csv'):
"""This function takes csv files with data on power network links and nodes and
returns a list of the haversine distances between each pair of nodes in the network.
Used to infer length of transmission line (and subsequnt number of towers) when exact length
or number of towers is not known empirically.
Input: nodes_file: containing information about the network nodes
links_file: containing information about the network links
Output: lengths: a pandas series lising the length of each network link in miles."""
# load data
nodes = pd.read_csv(nodes_file)
links = | pd.read_csv(links_file) | pandas.read_csv |
from __future__ import division
import pytest
import numpy as np
from datetime import timedelta
from pandas import (
Interval, IntervalIndex, Index, isna, notna, interval_range, Timestamp,
Timedelta, compat, date_range, timedelta_range, DateOffset)
from pandas.compat import lzip
from pandas.tseries.offsets import Day
from pandas._libs.interval import IntervalTree
from pandas.tests.indexes.common import Base
import pandas.util.testing as tm
import pandas as pd
@pytest.fixture(scope='class', params=['left', 'right', 'both', 'neither'])
def closed(request):
return request.param
@pytest.fixture(scope='class', params=[None, 'foo'])
def name(request):
return request.param
class TestIntervalIndex(Base):
_holder = IntervalIndex
def setup_method(self, method):
self.index = IntervalIndex.from_arrays([0, 1], [1, 2])
self.index_with_nan = IntervalIndex.from_tuples(
[(0, 1), np.nan, (1, 2)])
self.indices = dict(intervalIndex=tm.makeIntervalIndex(10))
def create_index(self, closed='right'):
return IntervalIndex.from_breaks(range(11), closed=closed)
def create_index_with_nan(self, closed='right'):
mask = [True, False] + [True] * 8
return IntervalIndex.from_arrays(
np.where(mask, np.arange(10), np.nan),
np.where(mask, np.arange(1, 11), np.nan), closed=closed)
def test_constructors(self, closed, name):
left, right = Index([0, 1, 2, 3]), Index([1, 2, 3, 4])
ivs = [Interval(l, r, closed=closed) for l, r in lzip(left, right)]
expected = IntervalIndex._simple_new(
left=left, right=right, closed=closed, name=name)
result = IntervalIndex(ivs, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_intervals(ivs, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_arrays(
left.values, right.values, closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_tuples(
lzip(left, right), closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = Index(ivs, name=name)
assert isinstance(result, IntervalIndex)
tm.assert_index_equal(result, expected)
# idempotent
tm.assert_index_equal(Index(expected), expected)
tm.assert_index_equal(IntervalIndex(expected), expected)
result = IntervalIndex.from_intervals(expected)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_intervals(
expected.values, name=expected.name)
tm.assert_index_equal(result, expected)
left, right = expected.left, expected.right
result = IntervalIndex.from_arrays(
left, right, closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_tuples(
expected.to_tuples(), closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
breaks = expected.left.tolist() + [expected.right[-1]]
result = IntervalIndex.from_breaks(
breaks, closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('data', [[np.nan], [np.nan] * 2, [np.nan] * 50])
def test_constructors_nan(self, closed, data):
# GH 18421
expected_values = np.array(data, dtype=object)
expected_idx = IntervalIndex(data, closed=closed)
# validate the expected index
assert expected_idx.closed == closed
tm.assert_numpy_array_equal(expected_idx.values, expected_values)
result = IntervalIndex.from_tuples(data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_breaks([np.nan] + data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_arrays(data, data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
if closed == 'right':
# Can't specify closed for IntervalIndex.from_intervals
result = IntervalIndex.from_intervals(data)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
@pytest.mark.parametrize('data', [
[],
np.array([], dtype='int64'),
np.array([], dtype='float64'),
np.array([], dtype=object)])
def test_constructors_empty(self, data, closed):
# GH 18421
expected_dtype = data.dtype if isinstance(data, np.ndarray) else object
expected_values = np.array([], dtype=object)
expected_index = IntervalIndex(data, closed=closed)
# validate the expected index
assert expected_index.empty
assert expected_index.closed == closed
assert expected_index.dtype.subtype == expected_dtype
tm.assert_numpy_array_equal(expected_index.values, expected_values)
result = IntervalIndex.from_tuples(data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_breaks(data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_arrays(data, data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
if closed == 'right':
# Can't specify closed for IntervalIndex.from_intervals
result = IntervalIndex.from_intervals(data)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
def test_constructors_errors(self):
# scalar
msg = ('IntervalIndex\(...\) must be called with a collection of '
'some kind, 5 was passed')
with tm.assert_raises_regex(TypeError, msg):
IntervalIndex(5)
# not an interval
msg = ("type <(class|type) 'numpy.int64'> with value 0 "
"is not an interval")
with tm.assert_raises_regex(TypeError, msg):
IntervalIndex([0, 1])
with tm.assert_raises_regex(TypeError, msg):
IntervalIndex.from_intervals([0, 1])
# invalid closed
msg = "invalid options for 'closed': invalid"
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_arrays([0, 1], [1, 2], closed='invalid')
# mismatched closed within intervals
msg = 'intervals must all be closed on the same side'
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_intervals([Interval(0, 1),
Interval(1, 2, closed='left')])
with tm.assert_raises_regex(ValueError, msg):
Index([Interval(0, 1), Interval(2, 3, closed='left')])
# mismatched closed inferred from intervals vs constructor.
msg = 'conflicting values for closed'
with tm.assert_raises_regex(ValueError, msg):
iv = [Interval(0, 1, closed='both'), Interval(1, 2, closed='both')]
IntervalIndex(iv, closed='neither')
# no point in nesting periods in an IntervalIndex
msg = 'Period dtypes are not supported, use a PeriodIndex instead'
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_breaks(
pd.period_range('2000-01-01', periods=3))
# decreasing breaks/arrays
msg = 'left side of interval must be <= right side'
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_breaks(range(10, -1, -1))
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_arrays(range(10, -1, -1), range(9, -2, -1))
def test_constructors_datetimelike(self, closed):
# DTI / TDI
for idx in [pd.date_range('20130101', periods=5),
pd.timedelta_range('1 day', periods=5)]:
result = IntervalIndex.from_breaks(idx, closed=closed)
expected = IntervalIndex.from_breaks(idx.values, closed=closed)
tm.assert_index_equal(result, expected)
expected_scalar_type = type(idx[0])
i = result[0]
assert isinstance(i.left, expected_scalar_type)
assert isinstance(i.right, expected_scalar_type)
def test_constructors_error(self):
# non-intervals
def f():
IntervalIndex.from_intervals([0.997, 4.0])
pytest.raises(TypeError, f)
def test_properties(self, closed):
index = self.create_index(closed=closed)
assert len(index) == 10
assert index.size == 10
assert index.shape == (10, )
tm.assert_index_equal(index.left, Index(np.arange(10)))
tm.assert_index_equal(index.right, Index(np.arange(1, 11)))
tm.assert_index_equal(index.mid, Index(np.arange(0.5, 10.5)))
assert index.closed == closed
ivs = [Interval(l, r, closed) for l, r in zip(range(10), range(1, 11))]
expected = np.array(ivs, dtype=object)
tm.assert_numpy_array_equal(np.asarray(index), expected)
tm.assert_numpy_array_equal(index.values, expected)
# with nans
index = self.create_index_with_nan(closed=closed)
assert len(index) == 10
assert index.size == 10
assert index.shape == (10, )
expected_left = Index([0, np.nan, 2, 3, 4, 5, 6, 7, 8, 9])
expected_right = expected_left + 1
expected_mid = expected_left + 0.5
tm.assert_index_equal(index.left, expected_left)
tm.assert_index_equal(index.right, expected_right)
tm.assert_index_equal(index.mid, expected_mid)
assert index.closed == closed
ivs = [Interval(l, r, closed) if notna(l) else np.nan
for l, r in zip(expected_left, expected_right)]
expected = np.array(ivs, dtype=object)
tm.assert_numpy_array_equal(np.asarray(index), expected)
tm.assert_numpy_array_equal(index.values, expected)
def test_with_nans(self, closed):
index = self.create_index(closed=closed)
assert not index.hasnans
result = index.isna()
expected = np.repeat(False, len(index))
tm.assert_numpy_array_equal(result, expected)
result = index.notna()
expected = np.repeat(True, len(index))
tm.assert_numpy_array_equal(result, expected)
index = self.create_index_with_nan(closed=closed)
assert index.hasnans
result = index.isna()
expected = np.array([False, True] + [False] * (len(index) - 2))
tm.assert_numpy_array_equal(result, expected)
result = index.notna()
expected = np.array([True, False] + [True] * (len(index) - 2))
tm.assert_numpy_array_equal(result, expected)
def test_copy(self, closed):
expected = self.create_index(closed=closed)
result = expected.copy()
assert result.equals(expected)
result = expected.copy(deep=True)
assert result.equals(expected)
assert result.left is not expected.left
def test_ensure_copied_data(self, closed):
# exercise the copy flag in the constructor
# not copying
index = self.create_index(closed=closed)
result = IntervalIndex(index, copy=False)
tm.assert_numpy_array_equal(index.left.values, result.left.values,
check_same='same')
tm.assert_numpy_array_equal(index.right.values, result.right.values,
check_same='same')
# by-definition make a copy
result = IntervalIndex.from_intervals(index.values, copy=False)
tm.assert_numpy_array_equal(index.left.values, result.left.values,
check_same='copy')
tm.assert_numpy_array_equal(index.right.values, result.right.values,
check_same='copy')
def test_equals(self, closed):
expected = IntervalIndex.from_breaks(np.arange(5), closed=closed)
assert expected.equals(expected)
assert expected.equals(expected.copy())
assert not expected.equals(expected.astype(object))
assert not expected.equals(np.array(expected))
assert not expected.equals(list(expected))
assert not expected.equals([1, 2])
assert not expected.equals(np.array([1, 2]))
assert not expected.equals(pd.date_range('20130101', periods=2))
expected_name1 = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name='foo')
expected_name2 = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name='bar')
assert expected.equals(expected_name1)
assert expected_name1.equals(expected_name2)
for other_closed in {'left', 'right', 'both', 'neither'} - {closed}:
expected_other_closed = IntervalIndex.from_breaks(
np.arange(5), closed=other_closed)
assert not expected.equals(expected_other_closed)
def test_astype(self, closed):
idx = self.create_index(closed=closed)
for dtype in [np.int64, np.float64, 'datetime64[ns]',
'datetime64[ns, US/Eastern]', 'timedelta64',
'period[M]']:
pytest.raises(ValueError, idx.astype, dtype)
result = idx.astype(object)
tm.assert_index_equal(result, Index(idx.values, dtype='object'))
assert not idx.equals(result)
assert idx.equals(IntervalIndex.from_intervals(result))
result = idx.astype('interval')
tm.assert_index_equal(result, idx)
assert result.equals(idx)
result = idx.astype('category')
expected = pd.Categorical(idx, ordered=True)
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize('klass', [list, tuple, np.array, pd.Series])
def test_where(self, closed, klass):
idx = self.create_index(closed=closed)
cond = [True] * len(idx)
expected = idx
result = expected.where(klass(cond))
tm.assert_index_equal(result, expected)
cond = [False] + [True] * len(idx[1:])
expected = IntervalIndex([np.nan] + idx[1:].tolist())
result = idx.where(klass(cond))
tm.assert_index_equal(result, expected)
def test_delete(self, closed):
expected = IntervalIndex.from_breaks(np.arange(1, 11), closed=closed)
result = self.create_index(closed=closed).delete(0)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('data', [
interval_range(0, periods=10, closed='neither'),
interval_range(1.7, periods=8, freq=2.5, closed='both'),
interval_range(Timestamp('20170101'), periods=12, closed='left'),
interval_range(Timedelta('1 day'), periods=6, closed='right'),
IntervalIndex.from_tuples([('a', 'd'), ('e', 'j'), ('w', 'z')]),
IntervalIndex.from_tuples([(1, 2), ('a', 'z'), (3.14, 6.28)])])
def test_insert(self, data):
item = data[0]
idx_item = IntervalIndex([item])
# start
expected = idx_item.append(data)
result = data.insert(0, item)
tm.assert_index_equal(result, expected)
# end
expected = data.append(idx_item)
result = data.insert(len(data), item)
tm.assert_index_equal(result, expected)
# mid
expected = data[:3].append(idx_item).append(data[3:])
result = data.insert(3, item)
tm.assert_index_equal(result, expected)
# invalid type
msg = 'can only insert Interval objects and NA into an IntervalIndex'
with tm.assert_raises_regex(ValueError, msg):
data.insert(1, 'foo')
# invalid closed
msg = 'inserted item must be closed on the same side as the index'
for closed in {'left', 'right', 'both', 'neither'} - {item.closed}:
with tm.assert_raises_regex(ValueError, msg):
bad_item = Interval(item.left, item.right, closed=closed)
data.insert(1, bad_item)
# GH 18295 (test missing)
na_idx = IntervalIndex([np.nan], closed=data.closed)
for na in (np.nan, pd.NaT, None):
expected = data[:1].append(na_idx).append(data[1:])
result = data.insert(1, na)
tm.assert_index_equal(result, expected)
def test_take(self, closed):
index = self.create_index(closed=closed)
result = index.take(range(10))
tm.assert_index_equal(result, index)
result = index.take([0, 0, 1])
expected = IntervalIndex.from_arrays(
[0, 0, 1], [1, 1, 2], closed=closed)
tm.assert_index_equal(result, expected)
def test_unique(self, closed):
# unique non-overlapping
idx = IntervalIndex.from_tuples(
[(0, 1), (2, 3), (4, 5)], closed=closed)
assert idx.is_unique
# unique overlapping - distinct endpoints
idx = IntervalIndex.from_tuples([(0, 1), (0.5, 1.5)], closed=closed)
assert idx.is_unique
# unique overlapping - shared endpoints
idx = pd.IntervalIndex.from_tuples(
[(1, 2), (1, 3), (2, 3)], closed=closed)
assert idx.is_unique
# unique nested
idx = IntervalIndex.from_tuples([(-1, 1), (-2, 2)], closed=closed)
assert idx.is_unique
# duplicate
idx = IntervalIndex.from_tuples(
[(0, 1), (0, 1), (2, 3)], closed=closed)
assert not idx.is_unique
# unique mixed
idx = IntervalIndex.from_tuples([(0, 1), ('a', 'b')], closed=closed)
assert idx.is_unique
# duplicate mixed
idx = IntervalIndex.from_tuples(
[(0, 1), ('a', 'b'), (0, 1)], closed=closed)
assert not idx.is_unique
# empty
idx = IntervalIndex([], closed=closed)
assert idx.is_unique
def test_monotonic(self, closed):
# increasing non-overlapping
idx = IntervalIndex.from_tuples(
[(0, 1), (2, 3), (4, 5)], closed=closed)
assert idx.is_monotonic
assert idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# decreasing non-overlapping
idx = IntervalIndex.from_tuples(
[(4, 5), (2, 3), (1, 2)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert idx._is_strictly_monotonic_decreasing
# unordered non-overlapping
idx = IntervalIndex.from_tuples(
[(0, 1), (4, 5), (2, 3)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# increasing overlapping
idx = IntervalIndex.from_tuples(
[(0, 2), (0.5, 2.5), (1, 3)], closed=closed)
assert idx.is_monotonic
assert idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# decreasing overlapping
idx = IntervalIndex.from_tuples(
[(1, 3), (0.5, 2.5), (0, 2)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert idx._is_strictly_monotonic_decreasing
# unordered overlapping
idx = IntervalIndex.from_tuples(
[(0.5, 2.5), (0, 2), (1, 3)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# increasing overlapping shared endpoints
idx = pd.IntervalIndex.from_tuples(
[(1, 2), (1, 3), (2, 3)], closed=closed)
assert idx.is_monotonic
assert idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# decreasing overlapping shared endpoints
idx = pd.IntervalIndex.from_tuples(
[(2, 3), (1, 3), (1, 2)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert idx._is_strictly_monotonic_decreasing
# stationary
idx = IntervalIndex.from_tuples([(0, 1), (0, 1)], closed=closed)
assert idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# empty
idx = IntervalIndex([], closed=closed)
assert idx.is_monotonic
assert idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert idx._is_strictly_monotonic_decreasing
@pytest.mark.xfail(reason='not a valid repr as we use interval notation')
def test_repr(self):
i = IntervalIndex.from_tuples([(0, 1), (1, 2)], closed='right')
expected = ("IntervalIndex(left=[0, 1],"
"\n right=[1, 2],"
"\n closed='right',"
"\n dtype='interval[int64]')")
assert repr(i) == expected
i = IntervalIndex.from_tuples((Timestamp('20130101'),
Timestamp('20130102')),
(Timestamp('20130102'),
Timestamp('20130103')),
closed='right')
expected = ("IntervalIndex(left=['2013-01-01', '2013-01-02'],"
"\n right=['2013-01-02', '2013-01-03'],"
"\n closed='right',"
"\n dtype='interval[datetime64[ns]]')")
assert repr(i) == expected
@pytest.mark.xfail(reason='not a valid repr as we use interval notation')
def test_repr_max_seq_item_setting(self):
super(TestIntervalIndex, self).test_repr_max_seq_item_setting()
@pytest.mark.xfail(reason='not a valid repr as we use interval notation')
def test_repr_roundtrip(self):
super(TestIntervalIndex, self).test_repr_roundtrip()
def test_get_item(self, closed):
i = IntervalIndex.from_arrays((0, 1, np.nan), (1, 2, np.nan),
closed=closed)
assert i[0] == Interval(0.0, 1.0, closed=closed)
assert i[1] == Interval(1.0, 2.0, closed=closed)
assert isna(i[2])
result = i[0:1]
expected = IntervalIndex.from_arrays((0.,), (1.,), closed=closed)
tm.assert_index_equal(result, expected)
result = i[0:2]
expected = IntervalIndex.from_arrays((0., 1), (1., 2.), closed=closed)
tm.assert_index_equal(result, expected)
result = i[1:3]
expected = IntervalIndex.from_arrays((1., np.nan), (2., np.nan),
closed=closed)
tm.assert_index_equal(result, expected)
def test_get_loc_value(self):
pytest.raises(KeyError, self.index.get_loc, 0)
assert self.index.get_loc(0.5) == 0
assert self.index.get_loc(1) == 0
assert self.index.get_loc(1.5) == 1
assert self.index.get_loc(2) == 1
pytest.raises(KeyError, self.index.get_loc, -1)
pytest.raises(KeyError, self.index.get_loc, 3)
idx = IntervalIndex.from_tuples([(0, 2), (1, 3)])
assert idx.get_loc(0.5) == 0
assert idx.get_loc(1) == 0
tm.assert_numpy_array_equal(idx.get_loc(1.5),
np.array([0, 1], dtype='int64'))
tm.assert_numpy_array_equal(np.sort(idx.get_loc(2)),
np.array([0, 1], dtype='int64'))
assert idx.get_loc(3) == 1
pytest.raises(KeyError, idx.get_loc, 3.5)
idx = IntervalIndex.from_arrays([0, 2], [1, 3])
pytest.raises(KeyError, idx.get_loc, 1.5)
def slice_locs_cases(self, breaks):
# TODO: same tests for more index types
index = IntervalIndex.from_breaks([0, 1, 2], closed='right')
assert index.slice_locs() == (0, 2)
assert index.slice_locs(0, 1) == (0, 1)
assert index.slice_locs(1, 1) == (0, 1)
assert index.slice_locs(0, 2) == (0, 2)
assert index.slice_locs(0.5, 1.5) == (0, 2)
assert index.slice_locs(0, 0.5) == (0, 1)
assert index.slice_locs(start=1) == (0, 2)
assert index.slice_locs(start=1.2) == (1, 2)
assert index.slice_locs(end=1) == (0, 1)
assert index.slice_locs(end=1.1) == (0, 2)
assert index.slice_locs(end=1.0) == (0, 1)
assert index.slice_locs(-1, -1) == (0, 0)
index = IntervalIndex.from_breaks([0, 1, 2], closed='neither')
assert index.slice_locs(0, 1) == (0, 1)
assert index.slice_locs(0, 2) == (0, 2)
assert index.slice_locs(0.5, 1.5) == (0, 2)
assert index.slice_locs(1, 1) == (1, 1)
assert index.slice_locs(1, 2) == (1, 2)
index = IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)],
closed='both')
assert index.slice_locs(1, 1) == (0, 1)
assert index.slice_locs(1, 2) == (0, 2)
def test_slice_locs_int64(self):
self.slice_locs_cases([0, 1, 2])
def test_slice_locs_float64(self):
self.slice_locs_cases([0.0, 1.0, 2.0])
def slice_locs_decreasing_cases(self, tuples):
index = IntervalIndex.from_tuples(tuples)
assert index.slice_locs(1.5, 0.5) == (1, 3)
assert index.slice_locs(2, 0) == (1, 3)
assert index.slice_locs(2, 1) == (1, 3)
assert index.slice_locs(3, 1.1) == (0, 3)
assert index.slice_locs(3, 3) == (0, 2)
assert index.slice_locs(3.5, 3.3) == (0, 1)
assert index.slice_locs(1, -3) == (2, 3)
slice_locs = index.slice_locs(-1, -1)
assert slice_locs[0] == slice_locs[1]
def test_slice_locs_decreasing_int64(self):
self.slice_locs_cases([(2, 4), (1, 3), (0, 2)])
def test_slice_locs_decreasing_float64(self):
self.slice_locs_cases([(2., 4.), (1., 3.), (0., 2.)])
def test_slice_locs_fails(self):
index = IntervalIndex.from_tuples([(1, 2), (0, 1), (2, 3)])
with pytest.raises(KeyError):
index.slice_locs(1, 2)
def test_get_loc_interval(self):
assert self.index.get_loc(Interval(0, 1)) == 0
assert self.index.get_loc(Interval(0, 0.5)) == 0
assert self.index.get_loc(Interval(0, 1, 'left')) == 0
pytest.raises(KeyError, self.index.get_loc, Interval(2, 3))
pytest.raises(KeyError, self.index.get_loc,
Interval(-1, 0, 'left'))
def test_get_indexer(self):
actual = self.index.get_indexer([-1, 0, 0.5, 1, 1.5, 2, 3])
expected = np.array([-1, -1, 0, 0, 1, 1, -1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(self.index)
expected = np.array([0, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
index = IntervalIndex.from_breaks([0, 1, 2], closed='left')
actual = index.get_indexer([-1, 0, 0.5, 1, 1.5, 2, 3])
expected = np.array([-1, 0, 0, 1, 1, -1, -1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(index[:1])
expected = np.array([0], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(index)
expected = np.array([-1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
def test_get_indexer_subintervals(self):
# TODO: is this right?
# return indexers for wholly contained subintervals
target = IntervalIndex.from_breaks(np.linspace(0, 2, 5))
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 1, 1], dtype='p')
tm.assert_numpy_array_equal(actual, expected)
target = IntervalIndex.from_breaks([0, 0.67, 1.33, 2])
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(target[[0, -1]])
expected = np.array([0, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
target = IntervalIndex.from_breaks([0, 0.33, 0.67, 1], closed='left')
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 0], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
def test_contains(self):
# Only endpoints are valid.
i = IntervalIndex.from_arrays([0, 1], [1, 2])
# Invalid
assert 0 not in i
assert 1 not in i
assert 2 not in i
# Valid
assert Interval(0, 1) in i
assert Interval(0, 2) in i
assert Interval(0, 0.5) in i
assert Interval(3, 5) not in i
assert Interval(-1, 0, closed='left') not in i
def testcontains(self):
# can select values that are IN the range of a value
i = IntervalIndex.from_arrays([0, 1], [1, 2])
assert i.contains(0.1)
assert i.contains(0.5)
assert i.contains(1)
assert i.contains(Interval(0, 1))
assert i.contains(Interval(0, 2))
# these overlaps completely
assert i.contains(Interval(0, 3))
assert i.contains(Interval(1, 3))
assert not i.contains(20)
assert not i.contains(-20)
def test_dropna(self, closed):
expected = IntervalIndex.from_tuples(
[(0.0, 1.0), (1.0, 2.0)], closed=closed)
ii = IntervalIndex.from_tuples([(0, 1), (1, 2), np.nan], closed=closed)
result = ii.dropna()
tm.assert_index_equal(result, expected)
ii = IntervalIndex.from_arrays(
[0, 1, np.nan], [1, 2, np.nan], closed=closed)
result = ii.dropna()
tm.assert_index_equal(result, expected)
def test_non_contiguous(self, closed):
index = IntervalIndex.from_tuples([(0, 1), (2, 3)], closed=closed)
target = [0.5, 1.5, 2.5]
actual = index.get_indexer(target)
expected = np.array([0, -1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
assert 1.5 not in index
def test_union(self, closed):
index = self.create_index(closed=closed)
other = IntervalIndex.from_breaks(range(5, 13), closed=closed)
expected = IntervalIndex.from_breaks(range(13), closed=closed)
result = index.union(other)
tm.assert_index_equal(result, expected)
result = other.union(index)
tm.assert_index_equal(result, expected)
tm.assert_index_equal(index.union(index), index)
tm.assert_index_equal(index.union(index[:1]), index)
def test_intersection(self, closed):
index = self.create_index(closed=closed)
other = IntervalIndex.from_breaks(range(5, 13), closed=closed)
expected = IntervalIndex.from_breaks(range(5, 11), closed=closed)
result = index.intersection(other)
tm.assert_index_equal(result, expected)
result = other.intersection(index)
tm.assert_index_equal(result, expected)
tm.assert_index_equal(index.intersection(index), index)
def test_difference(self, closed):
index = self.create_index(closed=closed)
tm.assert_index_equal(index.difference(index[:1]), index[1:])
def test_symmetric_difference(self, closed):
idx = self.create_index(closed=closed)
result = idx[1:].symmetric_difference(idx[:-1])
expected = IntervalIndex([idx[0], idx[-1]])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('op_name', [
'union', 'intersection', 'difference', 'symmetric_difference'])
def test_set_operation_errors(self, closed, op_name):
index = self.create_index(closed=closed)
set_op = getattr(index, op_name)
# test errors
msg = ('can only do set operations between two IntervalIndex objects '
'that are closed on the same side')
with tm.assert_raises_regex(ValueError, msg):
set_op(Index([1, 2, 3]))
for other_closed in {'right', 'left', 'both', 'neither'} - {closed}:
other = self.create_index(closed=other_closed)
with tm.assert_raises_regex(ValueError, msg):
set_op(other)
def test_isin(self, closed):
index = self.create_index(closed=closed)
expected = np.array([True] + [False] * (len(index) - 1))
result = index.isin(index[:1])
tm.assert_numpy_array_equal(result, expected)
result = index.isin([index[0]])
tm.assert_numpy_array_equal(result, expected)
other = IntervalIndex.from_breaks(np.arange(-2, 10), closed=closed)
expected = np.array([True] * (len(index) - 1) + [False])
result = index.isin(other)
tm.assert_numpy_array_equal(result, expected)
result = index.isin(other.tolist())
tm.assert_numpy_array_equal(result, expected)
for other_closed in {'right', 'left', 'both', 'neither'}:
other = self.create_index(closed=other_closed)
expected = np.repeat(closed == other_closed, len(index))
result = index.isin(other)
tm.assert_numpy_array_equal(result, expected)
result = index.isin(other.tolist())
tm.assert_numpy_array_equal(result, expected)
def test_comparison(self):
actual = Interval(0, 1) < self.index
expected = np.array([False, True])
tm.assert_numpy_array_equal(actual, expected)
actual = Interval(0.5, 1.5) < self.index
expected = np.array([False, True])
tm.assert_numpy_array_equal(actual, expected)
actual = self.index > Interval(0.5, 1.5)
tm.assert_numpy_array_equal(actual, expected)
actual = self.index == self.index
expected = np.array([True, True])
tm.assert_numpy_array_equal(actual, expected)
actual = self.index <= self.index
tm.assert_numpy_array_equal(actual, expected)
actual = self.index >= self.index
tm.assert_numpy_array_equal(actual, expected)
actual = self.index < self.index
expected = np.array([False, False])
tm.assert_numpy_array_equal(actual, expected)
actual = self.index > self.index
tm.assert_numpy_array_equal(actual, expected)
actual = self.index == IntervalIndex.from_breaks([0, 1, 2], 'left')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index == self.index.values
tm.assert_numpy_array_equal(actual, np.array([True, True]))
actual = self.index.values == self.index
tm.assert_numpy_array_equal(actual, np.array([True, True]))
actual = self.index <= self.index.values
tm.assert_numpy_array_equal(actual, np.array([True, True]))
actual = self.index != self.index.values
tm.assert_numpy_array_equal(actual, np.array([False, False]))
actual = self.index > self.index.values
tm.assert_numpy_array_equal(actual, np.array([False, False]))
actual = self.index.values > self.index
tm.assert_numpy_array_equal(actual, np.array([False, False]))
# invalid comparisons
actual = self.index == 0
tm.assert_numpy_array_equal(actual, np.array([False, False]))
actual = self.index == self.index.left
tm.assert_numpy_array_equal(actual, np.array([False, False]))
with tm.assert_raises_regex(TypeError, 'unorderable types'):
self.index > 0
with tm.assert_raises_regex(TypeError, 'unorderable types'):
self.index <= 0
with pytest.raises(TypeError):
self.index > np.arange(2)
with pytest.raises(ValueError):
self.index > np.arange(3)
def test_missing_values(self, closed):
idx = Index([np.nan, Interval(0, 1, closed=closed),
Interval(1, 2, closed=closed)])
idx2 = IntervalIndex.from_arrays(
[np.nan, 0, 1], [np.nan, 1, 2], closed=closed)
assert idx.equals(idx2)
with pytest.raises(ValueError):
IntervalIndex.from_arrays(
[np.nan, 0, 1], np.array([0, 1, 2]), closed=closed)
tm.assert_numpy_array_equal(isna(idx),
np.array([True, False, False]))
def test_sort_values(self, closed):
index = self.create_index(closed=closed)
result = index.sort_values()
tm.assert_index_equal(result, index)
result = index.sort_values(ascending=False)
tm.assert_index_equal(result, index[::-1])
# with nan
index = IntervalIndex([Interval(1, 2), np.nan, Interval(0, 1)])
result = index.sort_values()
expected = IntervalIndex([ | Interval(0, 1) | pandas.Interval |
#!/usr/local/bin/python3
# <NAME> - <EMAIL>
# Create Markdown of Spotify Play History
from spotipy import Spotify
from spotipy import util
import pandas as pd
# Get authorization token for this user - resfreshes or asks for permission as needed
my_token = util.prompt_for_user_token(username="1238655357", # Michelle's ID
scope="user-read-recently-played", # allows us to see recently played songs
redirect_uri="http://127.0.0.1:12345") # URL in our app
# Object for interacting with spotify user
spotify = Spotify(auth=my_token)
# list of last 50 songs
last_songs_dict = spotify.current_user_recently_played()['items']
print("*** MARKDOWN LIST OF ARTISTS WITH LINKS ***")
# frame to hold artists
artists = | pd.DataFrame() | pandas.DataFrame |
"""
Code was adapted from https://github.com/Britefury/self-ensemble-visual-domain-adapt
"""
"""
Incorporates mean teacher, from:
Mean teachers are better role models: Weight-averaged consistency targets improve semi-supervised deep learning results
<NAME>, <NAME>
https://arxiv.org/abs/1703.01780
"""
from bayes_opt import BayesianOptimization
import data_loaders
from sklearn.model_selection import StratifiedKFold
import time
import numpy as np
from batchup import data_source, work_pool
import network_architectures
import augmentation
import torch, torch.cuda
from torch import nn
from torch.nn import functional as F
import optim_weight_ema
from sklearn.preprocessing import label_binarize
from sklearn import metrics
from sklearn.metrics import confusion_matrix
import os
import pickle
import datetime
import pandas as pd
import cmdline_helpers
import argparse
INNER_K_FOLD = 3
OUTER_K_FOLD = 10
num_epochs = 100
bo_num_iter = 50
init_points = 5
PATIENCE = 5
BEST_EPOCHS_LIST = []
parser = argparse.ArgumentParser()
parser.add_argument('--exp', type=str, default='syndigits_svhn')
args = parser.parse_args()
exp = args.exp
log_file = f'results_exp_meanteacher_hila/log_{exp}_run.txt'
model_file = ''
seed = 0
device = 'cpu'
epoch_size = 'target'
batch_size = 30
torch_device = torch.device(device)
pool = work_pool.WorkerThreadPool(2)
# Setup output
def log(text):
print(text)
if log_file is not None:
with open(log_file, 'a') as f:
f.write(text + '\n')
f.flush()
f.close()
def ensure_containing_dir_exists(path):
dir_name = os.path.dirname(path)
if dir_name != '' and not os.path.exists(dir_name):
os.makedirs(dir_name)
return path
def load_data(exp):
settings = locals().copy()
if exp == 'svhn_mnist':
d_source = data_loaders.load_svhn(zero_centre=False, greyscale=True)
d_target = data_loaders.load_mnist(invert=False, zero_centre=False, pad32=True, val=False)
elif exp == 'mnist_svhn':
d_source = data_loaders.load_mnist(invert=False, zero_centre=False, pad32=True)
d_target = data_loaders.load_svhn(zero_centre=False, greyscale=True, val=False)
elif exp == 'svhn_mnist_rgb':
d_source = data_loaders.load_svhn(zero_centre=False, greyscale=False)
d_target = data_loaders.load_mnist(invert=False, zero_centre=False, pad32=True, val=False, rgb=True)
elif exp == 'mnist_svhn_rgb':
d_source = data_loaders.load_mnist(invert=False, zero_centre=False, pad32=True, rgb=True)
d_target = data_loaders.load_svhn(zero_centre=False, greyscale=False, val=False)
elif exp == 'cifar_stl':
d_source = data_loaders.load_cifar10(range_01=False)
d_target = data_loaders.load_stl(zero_centre=False, val=False)
elif exp == 'stl_cifar':
d_source = data_loaders.load_stl(zero_centre=False)
d_target = data_loaders.load_cifar10(range_01=False, val=False)
elif exp == 'mnist_usps':
d_source = data_loaders.load_mnist(zero_centre=False)
d_target = data_loaders.load_usps(zero_centre=False, scale28=True, val=False)
elif exp == 'usps_mnist':
d_source = data_loaders.load_usps(zero_centre=False, scale28=True)
d_target = data_loaders.load_mnist(zero_centre=False, val=False)
elif exp == 'syndigits_svhn':
d_source = data_loaders.load_syn_digits(zero_centre=False)
d_target = data_loaders.load_svhn(zero_centre=False, val=False)
elif exp == 'svhn_syndigits':
d_source = data_loaders.load_svhn(zero_centre=False, val=False)
d_target = data_loaders.load_syn_digits(zero_centre=False)
else:
print('Unknown experiment type \'{}\''.format(exp))
return
source_x = np.array(list(d_source.train_X[:]) + list(d_source.test_X[:]))
target_x = np.array(list(d_target.train_X[:]) + list(d_target.test_X[:]))
source_y = np.array(list(d_source.train_y[:]) + list(d_source.test_y[:]))
target_y = np.array(list(d_target.train_y[:]) + list(d_target.test_y[:]))
n_classes = d_source.n_classes
print('Loaded data')
return source_x, source_y, target_x, target_y, n_classes
def build_and_train_model(source_train_x_inner, source_train_y_inner, target_train_x_inner,
source_validation_x, source_validation_y, target_validation_x, target_validation_y,
confidence_thresh, teacher_alpha, unsup_weight, cls_balance, learning_rate, arch='', test_model=False,
loss='var', num_epochs=num_epochs, cls_bal_scale=False, cls_bal_scale_range=0.0, cls_balance_loss='bce',
src_affine_std=0.0, src_xlat_range=0.0, src_hflip=False, src_intens_flip=False,
src_gaussian_noise_std=0.0, tgt_affine_std=0.0, tgt_xlat_range=0.0, tgt_hflip=False,
tgt_intens_flip='', tgt_gaussian_noise_std=0.0):
net_class, expected_shape = network_architectures.get_net_and_shape_for_architecture(arch)
settings = locals().copy()
src_intens_scale_range_lower, src_intens_scale_range_upper, src_intens_offset_range_lower, src_intens_offset_range_upper = \
None, None, None, None
tgt_intens_scale_range_lower, tgt_intens_scale_range_upper, tgt_intens_offset_range_lower, tgt_intens_offset_range_upper = \
None, None, None, None
if expected_shape != source_train_x_inner.shape[1:]:
print('Architecture {} not compatible with experiment {}; it needs samples of shape {}, '
'data has samples of shape {}'.format(arch, exp, expected_shape, source_train_x_inner.shape[1:]))
return
student_net = net_class(n_classes).to(torch_device)
teacher_net = net_class(n_classes).to(torch_device)
student_params = list(student_net.parameters())
teacher_params = list(teacher_net.parameters())
for param in teacher_params:
param.requires_grad = False
student_optimizer = torch.optim.Adam(student_params, lr=learning_rate)
teacher_optimizer = optim_weight_ema.OldWeightEMA(teacher_net, student_net, alpha=teacher_alpha)
classification_criterion = nn.CrossEntropyLoss()
print('Built network')
src_aug = augmentation.ImageAugmentation(
src_hflip, src_xlat_range, src_affine_std,
intens_flip=src_intens_flip,
intens_scale_range_lower=src_intens_scale_range_lower, intens_scale_range_upper=src_intens_scale_range_upper,
intens_offset_range_lower=src_intens_offset_range_lower,
intens_offset_range_upper=src_intens_offset_range_upper,
gaussian_noise_std=src_gaussian_noise_std
)
tgt_aug = augmentation.ImageAugmentation(
tgt_hflip, tgt_xlat_range, tgt_affine_std,
intens_flip=tgt_intens_flip,
intens_scale_range_lower=tgt_intens_scale_range_lower, intens_scale_range_upper=tgt_intens_scale_range_upper,
intens_offset_range_lower=tgt_intens_offset_range_lower,
intens_offset_range_upper=tgt_intens_offset_range_upper,
gaussian_noise_std=tgt_gaussian_noise_std
)
def augment(X_src, y_src, X_tgt):
X_src = src_aug.augment(X_src)
X_tgt_stu, X_tgt_tea = tgt_aug.augment_pair(X_tgt)
return X_src, y_src, X_tgt_stu, X_tgt_tea
rampup_weight_in_list = [0]
cls_bal_fn = network_architectures.get_cls_bal_function(cls_balance_loss)
def compute_aug_loss(stu_out, tea_out):
# Augmentation loss
conf_tea = torch.max(tea_out, 1)[0]
unsup_mask = conf_mask = (conf_tea > confidence_thresh).float()
unsup_mask_count = conf_mask_count = conf_mask.sum()
if loss == 'bce':
aug_loss = network_architectures.robust_binary_crossentropy(stu_out, tea_out)
else:
d_aug_loss = stu_out - tea_out
aug_loss = d_aug_loss * d_aug_loss
# Class balance scaling
if cls_bal_scale:
n_samples = unsup_mask.sum()
avg_pred = n_samples / float(n_classes)
bal_scale = avg_pred / torch.clamp(tea_out.sum(dim=0), min=1.0)
if cls_bal_scale_range != 0.0:
bal_scale = torch.clamp(bal_scale, min=1.0 / cls_bal_scale_range, max=cls_bal_scale_range)
bal_scale = bal_scale.detach()
aug_loss = aug_loss * bal_scale[None, :]
aug_loss = aug_loss.mean(dim=1)
unsup_loss = (aug_loss * unsup_mask).mean()
# Class balance loss
if cls_balance > 0.0:
# Compute per-sample average predicated probability
# Average over samples to get average class prediction
avg_cls_prob = stu_out.mean(dim=0)
# Compute loss
equalise_cls_loss = cls_bal_fn(avg_cls_prob, float(1.0 / n_classes))
equalise_cls_loss = equalise_cls_loss.mean() * n_classes
equalise_cls_loss = equalise_cls_loss * unsup_mask.mean(dim=0)
unsup_loss += equalise_cls_loss * cls_balance
return unsup_loss, conf_mask_count, unsup_mask_count
def f_train(X_src, y_src, X_tgt0, X_tgt1):
X_src = torch.tensor(X_src, dtype=torch.float, device=torch_device)
y_src = torch.tensor(y_src, dtype=torch.long, device=torch_device)
X_tgt0 = torch.tensor(X_tgt0, dtype=torch.float, device=torch_device)
X_tgt1 = torch.tensor(X_tgt1, dtype=torch.float, device=torch_device)
student_optimizer.zero_grad()
student_net.train()
teacher_net.train()
src_logits_out = student_net(X_src)
student_tgt_logits_out = student_net(X_tgt0)
student_tgt_prob_out = F.softmax(student_tgt_logits_out, dim=1)
teacher_tgt_logits_out = teacher_net(X_tgt1)
teacher_tgt_prob_out = F.softmax(teacher_tgt_logits_out, dim=1)
# Supervised classification loss
clf_loss = classification_criterion(src_logits_out, y_src)
unsup_loss, conf_mask_count, unsup_mask_count = compute_aug_loss(student_tgt_prob_out, teacher_tgt_prob_out)
loss_expr = clf_loss + unsup_loss * unsup_weight
loss_expr.backward()
student_optimizer.step()
teacher_optimizer.step()
n_samples = X_src.size()[0]
outputs = [float(clf_loss) * n_samples, float(unsup_loss) * n_samples]
return tuple(outputs)
print('Compiled training function')
def f_pred(X_sup):
X_var = torch.tensor(X_sup, dtype=torch.float, device=torch_device)
student_net.eval()
teacher_net.eval()
return (F.softmax(student_net(X_var), dim=1).detach().cpu().numpy(),
F.softmax(teacher_net(X_var), dim=1).detach().cpu().numpy())
def f_eval(X_sup, y_sup):
y_pred_prob_stu, y_pred_prob_tea = f_pred(X_sup)
y_pred_stu = np.argmax(y_pred_prob_stu, axis=1)
y_pred_tea = np.argmax(y_pred_prob_tea, axis=1)
return (float((y_pred_stu != y_sup).sum()), float((y_pred_tea != y_sup).sum()))
def f_pred_for_metrics(X_sup, y_sup):
y_pred_prob_stu, y_pred_prob_tea = f_pred(X_sup)
y_pred_stu = np.argmax(y_pred_prob_stu, axis=1)
y_pred_tea = np.argmax(y_pred_prob_tea, axis=1)
return (y_pred_stu, y_pred_tea, y_pred_prob_stu, y_pred_prob_tea)
print('Compiled evaluation function')
cmdline_helpers.ensure_containing_dir_exists(log_file)
# Report setttings
log(f'confidence_thresh={confidence_thresh}, teacher_alpha={teacher_alpha},\
unsup_weight={unsup_weight}, cls_balance={cls_balance}, learning_rate={learning_rate}, num_epochs={num_epochs}'
f'test_model={test_model}')
print('Training...')
sup_ds = data_source.ArrayDataSource([source_train_x_inner, source_train_y_inner], repeats=-1)
tgt_train_ds = data_source.ArrayDataSource([target_train_x_inner], repeats=-1)
train_ds = data_source.CompositeDataSource([sup_ds, tgt_train_ds]).map(augment)
train_ds = pool.parallel_data_source(train_ds)
if epoch_size == 'large':
n_samples = max(source_train_x_inner.shape[0], target_train_x_inner.shape[0])
elif epoch_size == 'small':
n_samples = min(source_train_x_inner.shape[0], target_train_x_inner.shape[0])
elif epoch_size == 'target':
n_samples = target_train_x_inner.shape[0]
n_train_batches = n_samples // batch_size
source_test_ds = data_source.ArrayDataSource([source_validation_x, source_validation_y])
target_test_ds = data_source.ArrayDataSource([target_validation_x, target_validation_y])
if seed != 0:
shuffle_rng = np.random.RandomState(seed)
else:
shuffle_rng = np.random
train_batch_iter = train_ds.batch_iterator(batch_size=batch_size, shuffle=shuffle_rng)
best_teacher_model_state = {k: v.cpu().numpy() for k, v in teacher_net.state_dict().items()}
best_conf_mask_rate = 0.0
best_src_test_err = 1.0
best_target_tea_err = 1.0
best_epoch = 0
count_no_improve = 0
count_no_improve_flag = False
t_training_1 = time.time()
for epoch in range(num_epochs):
t1 = time.time()
train_res = data_source.batch_map_mean(f_train, train_batch_iter, n_batches=n_train_batches)
train_clf_loss = train_res[0]
unsup_loss_string = 'unsup (tgt) loss={:.6f}'.format(train_res[1])
src_test_err_stu, src_test_err_tea = source_test_ds.batch_map_mean(f_eval, batch_size=batch_size * 2)
tgt_test_err_stu, tgt_test_err_tea = target_test_ds.batch_map_mean(f_eval, batch_size=batch_size * 2)
conf_mask_rate = train_res[-2]
unsup_mask_rate = train_res[-1]
if conf_mask_rate > best_conf_mask_rate:
best_conf_mask_rate = conf_mask_rate
improve = '*** '
best_teacher_model_state = {k: v.cpu().numpy() for k, v in teacher_net.state_dict().items()}
best_target_tea_err = tgt_test_err_tea
best_epoch = epoch
if count_no_improve_flag:
count_no_improve_flag = False
count_no_improve = 0
else:
improve = ''
count_no_improve_flag = True
count_no_improve += 1
unsup_loss_string = '{}, conf mask={:.3%}, unsup mask={:.3%}'.format(
unsup_loss_string, conf_mask_rate, unsup_mask_rate)
t2 = time.time()
log('{}Epoch {} took {:.2f}s: TRAIN clf loss={:.6f}, {}; '
'SRC TEST ERR={:.3%}, TGT TEST student err={:.3%}, TGT TEST teacher err={:.3%}'.format(
improve, epoch, t2 - t1, train_clf_loss, unsup_loss_string, src_test_err_stu, tgt_test_err_stu,
tgt_test_err_tea))
if count_no_improve >= PATIENCE:
break
t_training_2 = time.time()
if test_model:
t_inference_1 = time.time()
src_pred_stu, src_pred_tea, src_prob_stu, src_prob_tea = source_test_ds.batch_map_concat(f_pred_for_metrics, batch_size=batch_size * 2)
t_inference_2 = time.time()
tgt_pred_stu, tgt_pred_tea, tgt_prob_stu, tgt_prob_tea = target_test_ds.batch_map_concat(f_pred_for_metrics, batch_size=batch_size * 2)
src_stu_scores_dict, src_tea_scores_dict = create_metrics_results(source_validation_y, src_pred_stu, src_pred_tea, src_prob_stu, src_prob_tea)
tgt_stu_scores_dict, tgt_tea_scores_dict = create_metrics_results(target_validation_y, tgt_pred_stu, tgt_pred_tea, tgt_prob_stu, tgt_prob_tea)
inference_time_for_1000 = (t_inference_2-t_inference_1)/len(src_pred_stu)*1000
return src_stu_scores_dict, src_tea_scores_dict, tgt_stu_scores_dict, tgt_tea_scores_dict, round(t_training_2-t_training_1, 3), round(inference_time_for_1000, 4)
return best_target_tea_err, best_teacher_model_state, best_epoch
def calc_metrics(sup_y, sup_y_one_hot, pred_y, prob, class_labels):
scores_dict = {}
conf = confusion_matrix(sup_y, pred_y)
tpr_list = []
fpr_list = []
for label in range(conf.shape[0]):
tpr = conf[label][label] / sum(conf[label])
tpr_list.append(tpr)
fpr_numerator = sum([pred_row[label] for pred_row in conf]) - conf[label][label]
fpr_denominator = sum(sum(conf)) - sum(conf[label])
fpr_list.append(fpr_numerator / fpr_denominator)
scores_dict['tpr'] = np.round(np.mean(tpr_list), 4)
scores_dict['fpr'] = np.round(np.mean(fpr_list), 4)
scores_dict['acc'] = np.round(metrics.accuracy_score(sup_y, pred_y), 2)
scores_dict['roc_auc'] = np.round(metrics.roc_auc_score(sup_y, prob, multi_class='ovr'), 4)
scores_dict['precision'] = np.round(metrics.precision_score(sup_y, pred_y, average='macro'), 4)
pred_one_hot = label_binarize(pred_y, classes=class_labels)
scores_dict['recall_precision_auc'] = np.round(metrics.average_precision_score(sup_y_one_hot, pred_one_hot, average="macro"), 4)
scores_dict['err'] = float((pred_y != sup_y).mean())
return scores_dict
def create_metrics_results(sup_y, pred_stu, pred_tea, prob_stu, prob_tea):
class_labels = list(np.unique(sup_y))
sup_y_one_hot = label_binarize(sup_y, classes=class_labels)
stu_scores_dict = calc_metrics(sup_y, sup_y_one_hot, pred_stu, prob_stu, class_labels)
tea_scores_dict = calc_metrics(sup_y, sup_y_one_hot, pred_tea, prob_tea, class_labels)
return stu_scores_dict, tea_scores_dict
def get_arch(exp, arch):
if arch == '':
if exp in {'mnist_usps', 'usps_mnist'}:
arch = 'mnist-bn-32-64-256'
elif exp in {'svhn_mnist', 'mnist_svhn'}:
arch = 'mnist-bn-32-32-64-256'
if exp in {'cifar_stl', 'stl_cifar', 'syndigits_svhn', 'svhn_syndigits', 'svhn_mnist_rgb', 'mnist_svhn_rgb'}:
arch = 'mnist-bn-32-32-64-256-rgb'
return arch
def evaluate_exp(confidence_thresh, teacher_alpha, unsup_weight, cls_balance, learning_rate, arch=''):
arch = get_arch(exp, arch)
cv_source = StratifiedKFold(n_splits=INNER_K_FOLD, shuffle=True)
source_train_validation_list = []
for train_idx, validation_idx in cv_source.split(source_train_x, source_train_y):
source_dict = {}
train_data, validation_data = source_train_x[train_idx], source_train_x[validation_idx]
train_target, validation_target = source_train_y[train_idx], source_train_y[validation_idx]
source_dict['source_train_x'] = train_data
source_dict['source_train_y'] = train_target
source_dict['source_validation_x'] = validation_data
source_dict['source_validation_y'] = validation_target
source_train_validation_list.append(source_dict)
cv_target = StratifiedKFold(n_splits=INNER_K_FOLD, shuffle=True)
target_train_validation_list = []
for train_idx, validation_idx in cv_target.split(target_train_x, target_train_y):
target_dict = {}
train_data, validation_data = target_train_x[train_idx], target_train_x[validation_idx]
validation_target = target_train_y[validation_idx]
target_dict['target_train_x'] = train_data
target_dict['target_validation_x'] = validation_data
target_dict['target_validation_y'] = validation_target
target_train_validation_list.append(target_dict)
target_test_err_list = []
best_epoch_list = []
for cv_idx in range(INNER_K_FOLD):
print(f'start inner cv {cv_idx}')
log(f'start inner cv {cv_idx}')
source_train_x_inner = source_train_validation_list[cv_idx]['source_train_x']
source_train_y_inner = source_train_validation_list[cv_idx]['source_train_y']
source_validation_x = source_train_validation_list[cv_idx]['source_validation_x']
source_validation_y = source_train_validation_list[cv_idx]['source_validation_y']
target_train_x_inner = target_train_validation_list[cv_idx]['target_train_x']
target_validation_x = target_train_validation_list[cv_idx]['target_validation_x']
target_validation_y = target_train_validation_list[cv_idx]['target_validation_y']
if cv_idx == 0:
# Report dataset size
log('Dataset:')
log('SOURCE Train: X.shape={}, y.shape={}'.format(source_train_x_inner.shape, source_train_y_inner.shape))
log('SOURCE Val: X.shape={}, y.shape={}'.format(source_validation_x.shape, source_validation_y.shape))
log('TARGET Train: X.shape={}'.format(target_train_x_inner.shape))
log('TARGET Val: X.shape={}, y.shape={}'.format(target_validation_x.shape, target_validation_y.shape))
best_target_tea_err, best_teacher_model_state, best_epoch = build_and_train_model(
source_train_x_inner, source_train_y_inner, target_train_x_inner,
source_validation_x, source_validation_y, target_validation_x, target_validation_y,
confidence_thresh, teacher_alpha, unsup_weight, cls_balance, learning_rate,
arch)
target_test_err_list.append(best_target_tea_err)
best_epoch_list.append(best_epoch)
# Save network
if model_file != '':
cmdline_helpers.ensure_containing_dir_exists(model_file)
with open(model_file, 'wb') as f:
pickle.dump(best_teacher_model_state, f)
BEST_EPOCHS_LIST.append(int(np.mean(best_epoch_list)))
return -np.mean(target_test_err_list)
def rebuild_and_test_model(params, source_train_x, source_train_y, target_train_x, source_test_x,
source_test_y, target_test_x, target_test_y, arch=''):
log(f"Start rebuild on test set")
arch = get_arch(exp, arch)
best_epoch = np.max(BEST_EPOCHS_LIST)
log(f'best_epoch: {best_epoch}')
results = build_and_train_model(
source_train_x, source_train_y, target_train_x, source_test_x, source_test_y, target_test_x, target_test_y,
arch=arch, test_model=True, num_epochs=best_epoch, **params)
src_stu_scores_dict = results[0]
src_tea_scores_dict = results[1]
tgt_stu_scores_dict = results[2]
tgt_tea_scores_dict = results[3]
training_time = results[4]
inference_time = results[5]
log(f'src_stu_scores_dict: {src_stu_scores_dict}')
log(f'src_tea_scores_dict: {src_tea_scores_dict}')
log(f'tgt_stu_scores_dict: {tgt_stu_scores_dict}')
log(f'tgt_tea_scores_dict: {tgt_tea_scores_dict}')
log(f'training_time: {training_time}')
log(f'inference_time for 1000 instences: {inference_time}')
tgt_tea_scores_dict.update({'training_time': training_time, 'inference_time': inference_time, 'params': params})
tgt_scores = | pd.Series(tgt_tea_scores_dict) | pandas.Series |
import cv2
import face_recognition
import pickle
import os
import numpy as np
import pandas as pd
from datetime import datetime
from scipy.spatial import distance as dist
class Attendance(object):
def __init__(self):
self.ENCODINGS_PATH = "encodings.pkl"
self.ATTENDANCE_FILE = "./Attendance.xlsx"
self.EYE_AR_THRESH = 0.25
self.ATTENDANCE_DATA = pd.read_excel(self.ATTENDANCE_FILE)
if not os.path.exists(self.ENCODINGS_PATH):
self.ENCODINGS,self.NAMES = None, None
print("WARNING: Encodings File not found. Process may fail, please run the train script first.")
print("-----------------------------------------------------------------------------------------------------------------------------------")
else:
self.ENCODINGS,self.NAMES = self.read_encodings()
#Function to read encodings
def read_encodings(self):
data = pickle.loads(open(self.ENCODINGS_PATH, "rb").read())
data = np.array(data)
encodings = [d["encoding"] for d in data]
names=[d["name"] for d in data]
return encodings,names
# Function to calculate EAR Value
def eye_aspect_ratio(self,eye):
A = dist.euclidean(eye[1], eye[5])
B = dist.euclidean(eye[2], eye[4])
C = dist.euclidean(eye[0], eye[3])
ear = (A + B) / (2.0 * C)
return ear
# Function to mark attendance
def mark_attendance(self,name,landmarks):
left_eye = landmarks['left_eye']
right_eye = landmarks['right_eye']
leftEAR = self.eye_aspect_ratio(left_eye)
rightEAR = self.eye_aspect_ratio(right_eye)
# average the eye aspect ratio together for both eyes
ear = (leftEAR + rightEAR) / 2.0
if ear < self.EYE_AR_THRESH:
# Check if entry already exists or not
temp = self.ATTENDANCE_DATA.copy()
temp = temp[temp['Name'] == name]
if len(temp)<1:
# No entry exists, create new entry
data = {
'Name' : name,
'Check-In' : datetime.now(),
'Check-Out' : None
}
self.ATTENDANCE_DATA = self.ATTENDANCE_DATA.append(data,ignore_index = True)
print("INFO: Marked checkin for {}".format(name))
else:
# Entries exist, check if last entry has checked out or not
if | pd.isna(temp.iloc[-1]['Check-Out']) | pandas.isna |
# -*- coding: utf-8 -*-
import logging
import numpy as np
import pandas as pd
import scipy.stats
import statsmodels.stats.proportion
from sklearn.metrics import accuracy_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import KBinsDiscretizer
from dku_data_drift.preprocessing import Preprocessor
from dku_data_drift.model_tools import format_proba_density
from dku_data_drift.model_drift_constants import ModelDriftConstants
logger = logging.getLogger(__name__)
class DriftAnalyzer(object):
def __init__(self, prediction_type=None):
self.prediction_type = prediction_type
self.drift_clf = RandomForestClassifier(n_estimators=100, random_state=1337, max_depth=13, min_samples_leaf=1)
self._original_df = None
self._new_df = None
self._drift_test_X = None
self._drift_test_Y = None
self._model_accessor = None
self.has_predictions = False
self.target = None
self.features_in_drift_model = None
self.sample_size = None
def get_prediction_type(self):
return self.prediction_type
def fit(self, new_df, model_accessor=None, original_df=None, target=None):
"""
Trains a classifier that attempts to discriminate between rows from the provided dataframe and
rows from the dataset originally used to evaluate the model
Returns (columns, classifier)
"""
logger.info("Preparing the drift model...")
if model_accessor is not None and original_df is not None:
raise ValueError('model_accessor and original_df can not be defined at the same time. Please choose one of them.')
if model_accessor is not None and original_df is None and target is None:
self._model_accessor = model_accessor
self.has_predictions = True
self.target = self._model_accessor.get_target_variable()
self.prediction_type = self._model_accessor.get_prediction_type()
original_df = self._model_accessor.get_original_test_df()
df = self.prepare_data_when_having_model(new_df, original_df)
elif model_accessor is None and original_df is not None and target is not None:
self.has_predictions = True
self.target = target
df = self.prepare_data_when_having_target(new_df, original_df)
elif model_accessor is None and original_df is not None and target is None:
df = self.prepare_data_when_without_target(new_df, original_df)
else:
raise NotImplementedError('You need to precise either a model accessor or an original df.')
preprocessor = Preprocessor(df, target=ModelDriftConstants.ORIGIN_COLUMN)
train, test = preprocessor.get_processed_train_test()
drift_train_X = train.drop(ModelDriftConstants.ORIGIN_COLUMN, axis=1)
drift_train_Y = np.array(train[ModelDriftConstants.ORIGIN_COLUMN])
self._drift_test_X = test.drop(ModelDriftConstants.ORIGIN_COLUMN, axis=1) # we will use them later when compute metrics
self._drift_test_Y = np.array(test[ModelDriftConstants.ORIGIN_COLUMN])
self.features_in_drift_model = drift_train_X.columns
logger.info("Fitting the drift model...")
self.drift_clf.fit(drift_train_X, drift_train_Y)
def prepare_data_when_having_model(self, new_df, original_df):
logger.info('Prepare data with model')
if self.target not in original_df:
raise ValueError('The original dataset does not contain target "{}".'.format(self.target))
self._new_df = new_df
self._original_df = original_df
original_df_without_target = original_df.drop(self.target, axis=1)
return self._prepare_data_for_drift_model(new_df, original_df_without_target)
def prepare_data_when_having_target(self, new_df, original_df):
logger.info('Prepare data with target for drift model')
if self.target not in new_df:
raise ValueError('The new dataset does not contain target "{}".'.format(self.target))
if self.target not in original_df:
raise ValueError('The original dataset does not contain target "{}".'.format(self.target))
self._new_df = new_df
self._original_df = original_df
new_df_without_target = new_df.drop(self.target, axis=1)
original_df_without_target = original_df.drop(self.target, axis=1)
return self._prepare_data_for_drift_model(new_df_without_target, original_df_without_target)
def prepare_data_when_without_target(self, new_df, original_df):
logger.info('Prepare data without target for drift model')
return self._prepare_data_for_drift_model(new_df, original_df)
def get_drift_metrics_for_webapp(self):
"""
Return a dict of metrics with a format to be easily used in frontend
"""
if self.features_in_drift_model is None or self.drift_clf is None:
logger.warning('drift_features and drift_clf must be defined')
return {}
logger.info("Computing drift metrics ...")
drift_accuracy, drift_accuracy_lower, drift_accuracy_upper, drift_test_pvalue = self.get_drift_score(output_raw_score=True)
feature_importance_metrics, riskiest_features = self._get_feature_importance_metrics()
if self.prediction_type == ModelDriftConstants.REGRRSSION_TYPE:
kde_dict = self._get_regression_prediction_kde()
fugacity_metrics = {}
label_list = []
elif self.prediction_type == ModelDriftConstants.CLASSIFICATION_TYPE:
logger.info("Compute classification drift metrics for classification")
kde_dict, fugacity_metrics, label_list = self._get_classification_prediction_metrics()
else:
raise ValueError('Prediction type not defined.')
return {'type': self.prediction_type,
'sample_size': self.sample_size,
'feature_importance': feature_importance_metrics,
'drift_accuracy': round(drift_accuracy, 3),
'drift_accuracy_lower': round(drift_accuracy_lower, 3),
'drift_accuracy_upper': round(drift_accuracy_upper, 3),
'drift_test_pvalue': round(drift_test_pvalue, 5),
'kde': kde_dict,
'fugacity': fugacity_metrics,
'label_list': label_list,
'riskiest_features': riskiest_features}
def _get_classification_prediction_metrics(self):
if not self.has_predictions:
raise ValueError('DriftAnalyzer needs a target.')
if self.prediction_type != ModelDriftConstants.CLASSIFICATION_TYPE:
raise ValueError('Can not use this function with a {} model.'.format(self.prediction_type))
if self._model_accessor is not None:
prediction_dict = self.get_predictions_from_original_model(limit=ModelDriftConstants.PREDICTION_TEST_SIZE)
predictions_by_class = {}
for label in prediction_dict.get(ModelDriftConstants.FROM_ORIGINAL).columns:
if 'proba_' in label:
original_proba = np.around(prediction_dict.get(ModelDriftConstants.FROM_ORIGINAL)[label].values, 2).tolist()
new_proba = np.around(prediction_dict.get(ModelDriftConstants.FROM_NEW)[label].values, 2).tolist()
predictions_by_class[label] = {ModelDriftConstants.FROM_ORIGINAL: original_proba, ModelDriftConstants.FROM_NEW: new_proba}
kde_dict = {}
for label in predictions_by_class.keys():
kde_original = format_proba_density(predictions_by_class.get(label).get(ModelDriftConstants.FROM_ORIGINAL))
kde_new = format_proba_density(predictions_by_class.get(label).get(ModelDriftConstants.FROM_NEW))
cleaned_label = label.replace('proba_', ModelDriftConstants.CLASS)
kde_dict[cleaned_label] = {ModelDriftConstants.FROM_ORIGINAL: kde_original, ModelDriftConstants.FROM_NEW: kde_new}
fugacity = self.get_classification_fugacity(reformat=True)
label_list = [label for label in fugacity[0].keys() if label != 'source']
return kde_dict, fugacity, label_list
else:
fugacity = self.get_classification_fugacity()
label_list = fugacity[ModelDriftConstants.CLASS].unique()
return None, fugacity, label_list
def _get_regression_prediction_kde(self):
if not self.has_predictions:
raise ValueError('No target was defined at fit phase.')
if self.prediction_type != ModelDriftConstants.REGRRSSION_TYPE:
raise ValueError('Can not use this function with a {} model.'.format(self.prediction_type))
prediction_dict = self.get_predictions_from_original_model(limit=ModelDriftConstants.PREDICTION_TEST_SIZE)
original_serie = prediction_dict.get(ModelDriftConstants.FROM_ORIGINAL).values
new_serie = prediction_dict.get(ModelDriftConstants.FROM_NEW).values
min_support = float(min(min(original_serie), min(new_serie)))
max_support = float(max(max(original_serie), max(new_serie)))
logger.info("Computed histogram support: [{},{}]".format(min_support, max_support))
kde_original = format_proba_density(original_serie, min_support=min_support, max_support=max_support)
kde_new = format_proba_density(new_serie, min_support=min_support, max_support=max_support)
kde_dict= {
'Prediction': {
ModelDriftConstants.FROM_ORIGINAL: kde_original,
ModelDriftConstants.FROM_NEW: kde_new,
"min_support": min_support,
"max_support": max_support
}
}
return kde_dict
def get_regression_fugacity(self):
"""
TODO refactor
"""
kde_dict = self._get_regression_prediction_kde()
new = kde_dict.get('Prediction').get('new')
old = kde_dict.get('Prediction').get('original')
old_arr = np.array(old).T
df = | pd.DataFrame(new, columns=['val_new', 'new_density']) | pandas.DataFrame |
"""
Data: Temeprature and Salinity time series from SIO Scripps Pier
Salinity: measured in PSU at the surface (~0.5m) and at depth (~5m)
Temp: measured in degrees C at the surface (~0.5m) and at depth (~5m)
- Timestamp included beginning in 1990
"""
# imports
import sys,os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import datetime
from scipy import signal
import scipy.stats as ss
import SIO_modules as SIO_mod
from importlib import reload
reload(SIO_mod)
# read in temp and sal files
sal_data = pd.read_csv('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/SIO_SALT_1916-201905.txt', sep='\t', skiprows = 27)
temp_data = pd.read_csv('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/SIO_TEMP_1916_201905.txt', sep='\t', skiprows = 26)
ENSO_data = pd.read_excel('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/NOAA_ENSO_data.xlsx')
ENSO_data_recent = pd.read_excel('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/NOAA_ENSO_recent_data.xlsx')
precip_data = pd.read_csv('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/NOAA_Precip_data.csv')
PDO_data = pd.read_csv('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/NOAA_PDO_data.csv', skiprows = 1)
path_out = '/Users/MMStoll/Python/Output/Ocean569_Output/SIO_Output/'
# convert year, month, day columns to single DATE column
sal_data['DATE'] = pd.to_datetime(sal_data[['YEAR', 'MONTH', 'DAY']])
temp_data['DATE'] = pd.to_datetime(temp_data[['YEAR', 'MONTH', 'DAY']])
precip_data['Date'] = pd.to_datetime(precip_data['DATE'], format='%Y-%m-%d')
ENSO_data_all = ENSO_data.append(ENSO_data_recent[323:], ignore_index = True)
PDO_data['DATE'] = | pd.to_datetime(PDO_data['Date'], format='%Y%m') | pandas.to_datetime |
import pandas as pd
import numpy as np
from sklearn import svm
from sklearn import model_selection
from sklearn.model_selection import learning_curve
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_val_predict
from sklearn.linear_model import ElasticNetCV
from sklearn.model_selection import KFold
from sklearn import linear_model
import xgboost as xgb
import matplotlib.pyplot as plt
from sklearn.preprocessing import RobustScaler
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LassoCV
from sklearn.linear_model import RidgeCV
from lightgbm import LGBMRegressor
import feature_list
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=-1, train_sizes=np.linspace(.1, 1.0, 5), verbose=0):
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(estimator, X, y, cv=cv,
n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1, color='r')
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color='g')
plt.plot(train_sizes, train_scores_mean, 'o-', color="r", label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g", label="Cross-validation score")
plt.legend(loc="best")
return plt
def select_drop_standand(traindata, testdata, num):
# 选取特征
if num == 1:
selected, select_list = feature_list.select_feature1(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature1(testdata, False)
if num == 2:
selected, select_list = feature_list.select_feature2(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature2(testdata, False)
if num == 3:
selected, select_list = feature_list.select_feature3(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature3(testdata, False)
if num == 4:
selected, select_list = feature_list.select_feature4(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature4(testdata, False)
if num == 5:
selected, select_list = feature_list.select_feature5(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature5(testdata, False)
if num == 6:
selected, select_list = feature_list.select_feature6(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature6(testdata, False)
if num == 7:
selected, select_list = feature_list.select_feature7(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature7(testdata, False)
if num == 8:
selected, select_list = feature_list.select_feature8(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature8(testdata, False)
if num == 9:
selected, select_list = feature_list.select_feature9(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature9(testdata, False)
if num == 10:
selected, select_list = feature_list.select_feature10(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature10(testdata, False)
if num == 11:
selected, select_list = feature_list.select_feature11(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature11(testdata, False)
if num == 12:
selected, select_list = feature_list.select_feature12(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature12(testdata, False)
if num == 13:
selected, select_list = feature_list.select_feature13(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature13(testdata, False)
if num == 14:
selected, select_list = feature_list.select_feature14(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature14(testdata, False)
if num == 15:
selected, select_list = feature_list.select_feature15(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature15(testdata, False)
if num == 16:
selected, select_list = feature_list.select_feature16(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature16(testdata, False)
selected.reset_index(drop=True, inplace=True)
selected_testB_features.reset_index(drop=True, inplace=True)
# 清理空行
selected_nonan = selected.dropna(axis=0, how='any')
train_targets = pd.DataFrame(selected_nonan['charge_energy'], columns=['charge_energy'])
train_nonan_features = selected_nonan.drop(['charge_energy'], axis=1)
train_test_features = pd.concat([train_nonan_features, selected_testB_features], axis=0)
train_test_features.reset_index(drop=True, inplace=True)
# 注意标准化方法,RobustScaler quantile_range=(25.0, 75.0) # 基于分位数标准化 features
select_list.remove('charge_energy')
x_scaler = RobustScaler()
y_scaler = RobustScaler()
n_X_train_test = x_scaler.fit_transform(np.array(train_test_features))
# n_y_train = y_scaler.fit_transform(np.log1p(np.array(train_targets))) # ln(x+1)变换
n_y_train = y_scaler.fit_transform(np.array(train_targets))
n_X_train_test_pd = pd.DataFrame(n_X_train_test, columns=select_list)
n_X_train_test_mer = n_X_train_test_pd.copy()
# 时间维稀疏矩阵
# chargemode_dummies = pd.get_dummies(train_test_features['charge_mode'], prefix='mode', prefix_sep='_')
# hour_dummies = pd.get_dummies(train_test_features['hour'], prefix='hour', prefix_sep='_')
# week_dummies = pd.get_dummies(train_test_features['week'], prefix='week', prefix_sep='_')
# month_dummies = pd.get_dummies(train_test_features['month'], prefix='month', prefix_sep='_')
# if 'phase' in select_list:
# phase_dummies = pd.get_dummies(train_test_features['phase'], prefix='phase', prefix_sep='_')
# n_X_train_test_mer = pd.concat([n_X_train_test_pd, chargemode_dummies, hour_dummies, week_dummies, month_dummies,phase_dummies], axis=1)
# n_X_train_test_mer.drop(['charge_mode', 'hour', 'week', 'month', 'phase'], axis=1, inplace=True)
# else:
# n_X_train_test_mer = pd.concat([n_X_train_test_pd, chargemode_dummies, hour_dummies, week_dummies, month_dummies], axis=1)
# n_X_train_test_mer.drop(['charge_mode', 'hour', 'week', 'month'], axis=1, inplace=True)
n_testB = n_X_train_test_mer.tail(selected_testB_features.shape[0])
n_X_train = n_X_train_test_mer.drop(n_testB.index.tolist())
return n_X_train, n_y_train, n_testB, y_scaler
ram_num = 5
kfolds = KFold(n_splits=10, shuffle=True, random_state=ram_num)
def cv_rmse(model, train, y_train):
rmse = np.sqrt(-cross_val_score(model, train, y_train, scoring="neg_mean_squared_error", cv = kfolds))
return(rmse)
def ridge_selector(k, X, y):
model = make_pipeline(RidgeCV(alphas = [k], cv=kfolds)).fit(X, y)
rmse = cv_rmse(model, X, y).mean()
return(rmse)
def lasso_selector(k, X, y):
model = make_pipeline(LassoCV(max_iter=1e7, alphas = [k],
cv = kfolds)).fit(X, y)
rmse = cv_rmse(model, X, y).mean()
return(rmse)
if __name__ == '__main__':
# 分别读取16个车的数据 features + target
readFile_carfeatures = []
readFile_testfeatures = []
car_train_list = []
car_test_list = []
filenum = 17
for i in range(1,filenum):
readFile_carfeatures.append('../dataset/feature/train_feature/car' + str(i) + '_features.csv')
for i in range(1,filenum):
readFile_testfeatures.append('../dataset/feature/test_feature/car' + str(i) + 'testB_features.csv')
# train features + target
for i in range(len(readFile_carfeatures)):
car_train = pd.read_csv(readFile_carfeatures[i], dtype={'charge_start_time': str, 'charge_end_time': str})
car_train_list.append(car_train)
# test features
for i in range(len(readFile_carfeatures)):
car_test = | pd.read_csv(readFile_testfeatures[i]) | pandas.read_csv |
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import LogisticRegression
import pdb
from sklearn.metrics import *
import matplotlib.pyplot as plt
from sklearn import preprocessing
from sklearn.preprocessing import StandardScaler
import itertools
import json
import pickle
class User:
def __init__(self, id):
self.id = id
self.positive = []
self.negative = []
def add_positive(self, movie_id):
self.positive.append(movie_id)
def add_negative(self, movie_id):
self.negative.append(movie_id)
def get_positive(self):
return self.positive
def get_negative(self):
return self.negative
np.random.seed(1)
class EventsGenerator:
NUM_OF_USERS = 1
def __init__(self, learning_data, buy_probability, opened):
self.learning_data = learning_data
self.buy_probability = buy_probability
self.users = []
self.NUM_OF_OPENED_MOVIES_PER_USER = opened
for id in range(1, self.NUM_OF_USERS+1):
self.users.append(User(id))
def run(self, pairwise=False):
# print (self.users, "hellp")
for user in self.users:
# print (self.learning_data.index)
opened_movies = np.random.choice(self.learning_data.index.values, self.NUM_OF_OPENED_MOVIES_PER_USER)
self.__add_positives_and_negatives_to(user, opened_movies)
return self.__build_events_data()
def __add_positives_and_negatives_to(self, user, opened_movies):
# print (opened_movies)
for movie_id in opened_movies:
if np.random.binomial(1, self.buy_probability.loc[movie_id]):
user.add_positive(movie_id)
else:
user.add_negative(movie_id)
def __build_events_data(self):
events_data = []
for user in self.users:
for positive_id in user.get_positive():
# print(positive_id)
tmp = self.learning_data.loc[positive_id].to_dict()
tmp['outcome'] = 1
events_data += [tmp]
for negative_id in user.get_negative():
tmp = self.learning_data.loc[negative_id].to_dict()
tmp['outcome'] = 0
events_data += [tmp]
# print(events_data)
return | pd.DataFrame(events_data) | pandas.DataFrame |
# this functino is to run the mlp on the 0.5s binned data created by Shashiks
# features: downloaded bytes amount is the feature to be updated.
import pandas as pd
import numpy as np
import os
import math
import argparse
from keras import Sequential
from keras.layers import Dense, BatchNormalization, Dropout, Conv1D, MaxPooling1D, Flatten
from sklearn.metrics import accuracy_score, average_precision_score, recall_score
from sklearn.preprocessing import MinMaxScaler
from sklearn.utils import shuffle
# constant values
YT = 1
FB = 0
V360 = 1
VNormal = 0
# select the train and test ids according to this random state and the seen and unseen nature
def select_train_and_test_id(random_state, is_seen):
# a set means id sets ranging from the 1-10,21-20,...
if is_seen:
test_ind = list(np.arange(1, 51))
train_ind = list(np.arange(1, 51))
else:
num_of_ids_from_one_set = 3
all_videos = np.arange(1, 51)
test_ind = []
# make sure that each video id is present in the test set at least 2 times
for ind_set in range(5):
for sub_id in range(num_of_ids_from_one_set):
test_ind.append(ind_set * 10 + (sub_id + random_state))
train_ind = list(set(list(all_videos)) - set(list(test_ind)))
return train_ind, test_ind
# based on the train and test list get the original and synthesis dataset from the
# processed data
def truncate_train_list(temp_train_df, number_of_traces):
temp_train_df = shuffle(temp_train_df, random_state=123)
temp_train_df.reset_index(inplace=True, drop=True)
len_temp_train_df = temp_train_df.shape[0]
truncated_train_df = temp_train_df.loc[0:np.ceil(number_of_traces * len_temp_train_df) - 1, :]
return truncated_train_df
def get_synth_df(synth_df, indices, num_of_synth_samples):
temp_df_list = []
for i in (indices):
temp_df_list.append(
synth_df.loc[i * num_of_synth_samples:i * num_of_synth_samples + num_of_synth_samples - 1, :])
df = pd.concat(temp_df_list)
# print(df.shape[0])
return df
def get_processed_train_test_synth_orit(train_list, test_list, is_seen, number_of_traces, random_state, platform,path):
main_path = path
data_processed_types = ['original_data', 'synthesized_data']
video_type = ['V360', 'VNormal']
# split the dataset according to the seen condition. first 60% of traces are in the
# train set and the remaining are in the test set
if is_seen:
data = [[], [], [], []]
v_type_path_ori = main_path + '/original_data/' + platform + '/'
v_type_path_synth = main_path + '/synthesized_data/' + platform + '/'
ori_360 = pd.read_csv(v_type_path_ori + 'V360.csv')
ori_normal = pd.read_csv(v_type_path_ori + '/' + 'VNormal.csv')
synth_360 = | pd.read_csv(v_type_path_synth + '/' + 'V360.csv') | pandas.read_csv |
# Created by rahman at 14:51 2020-03-05 using PyCharm
import os
import random
import pandas as pd
import scipy
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, AdaBoostClassifier
from sklearn.linear_model import LogisticRegression
city = 'ny' #'ny'
DATAPATH = '../data/' + city + "/"
classifiers = {
'RF':(RandomForestClassifier, {"n_estimators": 101, "max_depth": 10}),
'GBM': (GradientBoostingClassifier,{'n_estimators':100, 'max_depth': 3}),
'AB':(AdaBoostClassifier, {"n_estimators": 101}),
'LR_SAG_L2penalty':(LogisticRegression, {'solver': 'sag'}),
'LR_liblinear_L2penalty': (LogisticRegression, {'solver': 'liblinear', 'penalty': 'l2'})}
def folder_setup(city):
'''setup folders for each city
Args:
city: city
Returns:
'''
if not os.path.exists('data/'+city):
os.mkdir('data/'+city)
if not os.path.exists('data/'+city+'/emb/'):
os.mkdir('data/'+city+'/emb/')
if not os.path.exists('data/'+city+'/feature/'):
os.mkdir('data/'+city+'/feature/')
#os.mkdir('data/'+city+'/process/')
if not os.path.exists('data/'+city+'/result/'):
os.mkdir('data/'+city+'/result/')
def isFriends(friends,a, b):
friends_a=friends[friends.u1==a].u2
return True if b in friends_a.values else False
def pickPairs(friends, i, SP, MAX_PAIRS,ulist):
'''picks friend and stranger pairs
Args:
friends: friends list (asymetric) [u1, u2]
i: iteration
SP: list of stranger pairs
MAX_PAIRS: number of existing friend pairs
ulist: randomly shuffled user list
Returns:
pairs: [u1,u2,label]
'''
#print " permutation", i
while len(ulist) >= 2:
a = ulist.pop()
b = ulist.pop()
if not isFriends(friends, a,b):
SP.append([a,b])
if len(SP)>=MAX_PAIRS:
return SP
else:
print ("friends found ", a,b)
return SP
def make_allPairs(pairsFile, u_list_file, DATAPATH, friendFile, makeStrangers):
'''gets friend and stranger pairs and writes to "clean_allPairs.csv"
Args:
friends: friends list (asymetric) [u1, u2] unordered pairs, duplicates exist
u_list_file: dataset from which to read uids
Returns:
pairs: [u1,u2,label]
'''
u_list = pd.read_csv(DATAPATH + u_list_file).index.values
friends = pd.read_csv(DATAPATH + friendFile)
# take only pairs {u1, u2} where u1<u2, because {u2, u1} also exist but is a duplicate
smallFriends=friends.loc[(friends.u1< friends.u2) & (friends.u1.isin(u_list))& (friends.u2.isin(u_list))].reset_index(drop=True)
smallFriends["label"] = 1
if makeStrangers:
MAX_PAIRS, SP = len(smallFriends.u1), []
#print MAX_PAIRS
i = 0
while len(SP) < MAX_PAIRS:
SP = pickPairs(friends, i, SP, MAX_PAIRS, random.sample(u_list, k=len(u_list)))
i += 1
#print SP
with open(DATAPATH + "strangers.csv", "wb") as f:
for pair in SP:
f.write(str(pair[0]) + ", " + str(pair[1]) + '\n')
strangers=pd.read_csv(DATAPATH+"strangers.csv", names=['u1', 'u2'])
strangers["label"]=0
allPairs=smallFriends.append(strangers, ignore_index=True)
#print "smallFriends.shape, strangers.shape", smallFriends.shape, strangers.shape, "allPairs.shape", allPairs.shape
assert(len(allPairs)==len(smallFriends)*2 == len(strangers)*2)
allPairs.to_csv(DATAPATH+ pairsFile)#, index=False)
return allPairs
def pair_construct(u_list, friendFile, downSample):
''' construct users pairs
Args:
u_list: user list
friends: file of DF of list of friends
pairFile: store here for future
downSample: Boolean True for word2vec features,
if False downsample later after calculation of overlap based features
Returns:
pair: u1, u2, label
'''
friends = pd.read_csv(DATAPATH + friendFile)
# positive i.e. friend pairs
pair_p = friends.loc[(friends.u1.isin(u_list)) & (friends.u2.isin(u_list))].copy()
# sampling negative pairs , i.e. strangers
pair_n = pd.DataFrame(pd.np.random.choice(u_list, 9 * pair_p.shape[0]), columns=['u1'])
pair_n['u2'] = pd.np.random.choice(u_list, 9 * pair_p.shape[0])
# remove dup user in pair
pair_n = pair_n.loc[pair_n.u1 != pair_n.u2]
# remove asymetric dups
pair_n = pair_n.loc[pair_n.u1 < pair_n.u2]
# remove dups
pair_n = pair_n.drop_duplicates().reset_index(drop=True)
# delete friends inside by setting the columns of the positive pairs to be indexes
pair_n = pair_n.loc[~pair_n.set_index(list(pair_n.columns)).index.isin(pair_p.set_index(list(pair_p.columns)).index)]
# now shuffle and reset the index
pair_n = pair_n.loc[ | pd.np.random.permutation(pair_n.index) | pandas.np.random.permutation |
"""
Misc tools for implementing data structures
"""
try:
import cPickle as pickle
except ImportError: # pragma: no cover
import pickle
import itertools
from numpy.lib.format import read_array, write_array
import numpy as np
import pandas.algos as algos
import pandas.lib as lib
import pandas.tslib as tslib
from pandas.util import py3compat
import codecs
import csv
from pandas.util.py3compat import StringIO, BytesIO
from pandas.core.config import get_option
# XXX: HACK for NumPy 1.5.1 to suppress warnings
try:
np.seterr(all='ignore')
# np.set_printoptions(suppress=True)
except Exception: # pragma: no cover
pass
class PandasError(Exception):
pass
class AmbiguousIndexError(PandasError, KeyError):
pass
def isnull(obj):
'''
Replacement for numpy.isnan / -numpy.isfinite which is suitable
for use on object arrays.
Parameters
----------
arr: ndarray or object value
Returns
-------
boolean ndarray or boolean
'''
if lib.isscalar(obj):
return lib.checknull(obj)
from pandas.core.generic import PandasObject
if isinstance(obj, np.ndarray):
return _isnull_ndarraylike(obj)
elif isinstance(obj, PandasObject):
# TODO: optimize for DataFrame, etc.
return obj.apply(isnull)
elif isinstance(obj, list) or hasattr(obj, '__array__'):
return _isnull_ndarraylike(obj)
else:
return obj is None
isnull_new = isnull
def isnull_old(obj):
'''
Replacement for numpy.isnan / -numpy.isfinite which is suitable
for use on object arrays. Treat None, NaN, INF, -INF as null.
Parameters
----------
arr: ndarray or object value
Returns
-------
boolean ndarray or boolean
'''
if lib.isscalar(obj):
return lib.checknull_old(obj)
from pandas.core.generic import PandasObject
if isinstance(obj, np.ndarray):
return _isnull_ndarraylike_old(obj)
elif isinstance(obj, PandasObject):
# TODO: optimize for DataFrame, etc.
return obj.apply(isnull_old)
elif isinstance(obj, list) or hasattr(obj, '__array__'):
return _isnull_ndarraylike_old(obj)
else:
return obj is None
def use_inf_as_null(flag):
'''
Choose which replacement for numpy.isnan / -numpy.isfinite is used.
Parameters
----------
flag: bool
True means treat None, NaN, INF, -INF as null (old way),
False means None and NaN are null, but INF, -INF are not null
(new way).
Notes
-----
This approach to setting global module values is discussed and
approved here:
* http://stackoverflow.com/questions/4859217/
programmatically-creating-variables-in-python/4859312#4859312
'''
if flag == True:
globals()['isnull'] = isnull_old
else:
globals()['isnull'] = isnull_new
def _isnull_ndarraylike(obj):
from pandas import Series
values = np.asarray(obj)
if values.dtype.kind in ('O', 'S', 'U'):
# Working around NumPy ticket 1542
shape = values.shape
if values.dtype.kind in ('S', 'U'):
result = np.zeros(values.shape, dtype=bool)
else:
result = np.empty(shape, dtype=bool)
vec = lib.isnullobj(values.ravel())
result[:] = vec.reshape(shape)
if isinstance(obj, Series):
result = Series(result, index=obj.index, copy=False)
elif values.dtype == np.dtype('M8[ns]'):
# this is the NaT pattern
result = values.view('i8') == tslib.iNaT
elif issubclass(values.dtype.type, np.timedelta64):
result = -np.isfinite(values.view('i8'))
else:
result = -np.isfinite(obj)
return result
def _isnull_ndarraylike_old(obj):
from pandas import Series
values = np.asarray(obj)
if values.dtype.kind in ('O', 'S', 'U'):
# Working around NumPy ticket 1542
shape = values.shape
if values.dtype.kind in ('S', 'U'):
result = np.zeros(values.shape, dtype=bool)
else:
result = np.empty(shape, dtype=bool)
vec = lib.isnullobj_old(values.ravel())
result[:] = vec.reshape(shape)
if isinstance(obj, Series):
result = | Series(result, index=obj.index, copy=False) | pandas.Series |
import datetime
from datetime import timedelta
from distutils.version import LooseVersion
from io import BytesIO
import os
import re
from warnings import catch_warnings, simplefilter
import numpy as np
import pytest
from pandas.compat import is_platform_little_endian, is_platform_windows
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_categorical_dtype
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Int64Index,
MultiIndex,
RangeIndex,
Series,
Timestamp,
bdate_range,
concat,
date_range,
isna,
timedelta_range,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
create_tempfile,
ensure_clean_path,
ensure_clean_store,
safe_close,
safe_remove,
tables,
)
import pandas.util.testing as tm
from pandas.io.pytables import (
ClosedFileError,
HDFStore,
PossibleDataLossError,
Term,
read_hdf,
)
from pandas.io import pytables as pytables # noqa: E402 isort:skip
from pandas.io.pytables import TableIterator # noqa: E402 isort:skip
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
@pytest.mark.single
class TestHDFStore:
def test_format_kwarg_in_constructor(self, setup_path):
# GH 13291
with ensure_clean_path(setup_path) as path:
with pytest.raises(ValueError):
HDFStore(path, format="table")
def test_context(self, setup_path):
path = create_tempfile(setup_path)
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
finally:
safe_remove(path)
try:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
with HDFStore(path) as tbl:
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
finally:
safe_remove(path)
def test_conv_read_write(self, setup_path):
path = create_tempfile(setup_path)
try:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
tm.assert_series_equal(o, roundtrip("series", o))
o = tm.makeStringSeries()
tm.assert_series_equal(o, roundtrip("string_series", o))
o = tm.makeDataFrame()
tm.assert_frame_equal(o, roundtrip("frame", o))
# table
df = DataFrame(dict(A=range(5), B=range(5)))
df.to_hdf(path, "table", append=True)
result = read_hdf(path, "table", where=["index>2"])
tm.assert_frame_equal(df[df.index > 2], result)
finally:
safe_remove(path)
def test_long_strings(self, setup_path):
# GH6166
df = DataFrame(
{"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10)
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["a"])
result = store.select("df")
tm.assert_frame_equal(df, result)
def test_api(self, setup_path):
# GH4584
# API issue when to_hdf doesn't accept append AND format args
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True)
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True)
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", append=False, format="fixed")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False, format="f")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False)
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_store(setup_path) as store:
path = store._path
df = tm.makeDataFrame()
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=True, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# append to False
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# formats
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format=None)
tm.assert_frame_equal(store.select("df"), df)
with ensure_clean_path(setup_path) as path:
# Invalid.
df = tm.makeDataFrame()
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="f")
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="fixed")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=True, format="foo")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=False, format="bar")
# File path doesn't exist
path = ""
with pytest.raises(FileNotFoundError):
read_hdf(path, "df")
def test_api_default_format(self, setup_path):
# default_format option
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
_maybe_remove(store, "df")
store.put("df", df)
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
store.append("df2", df)
pd.set_option("io.hdf.default_format", "table")
_maybe_remove(store, "df")
store.put("df", df)
assert store.get_storer("df").is_table
_maybe_remove(store, "df2")
store.append("df2", df)
assert store.get_storer("df").is_table
pd.set_option("io.hdf.default_format", None)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
df.to_hdf(path, "df")
with HDFStore(path) as store:
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
df.to_hdf(path, "df2", append=True)
pd.set_option("io.hdf.default_format", "table")
df.to_hdf(path, "df3")
with HDFStore(path) as store:
assert store.get_storer("df3").is_table
df.to_hdf(path, "df4", append=True)
with HDFStore(path) as store:
assert store.get_storer("df4").is_table
pd.set_option("io.hdf.default_format", None)
def test_keys(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
assert len(store) == 3
expected = {"/a", "/b", "/c"}
assert set(store.keys()) == expected
assert set(store) == expected
def test_keys_ignore_hdf_softlink(self, setup_path):
# GH 20523
# Puts a softlink into HDF file and rereads
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A=range(5), B=range(5)))
store.put("df", df)
assert store.keys() == ["/df"]
store._handle.create_soft_link(store._handle.root, "symlink", "df")
# Should ignore the softlink
assert store.keys() == ["/df"]
def test_iter_empty(self, setup_path):
with ensure_clean_store(setup_path) as store:
# GH 12221
assert list(store) == []
def test_repr(self, setup_path):
with ensure_clean_store(setup_path) as store:
repr(store)
store.info()
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store["df"] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, "bah")
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df)
s = store.get_storer("df")
repr(s)
str(s)
@ignore_natural_naming_warning
def test_contains(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
store["foo/bar"] = tm.makeDataFrame()
assert "a" in store
assert "b" in store
assert "c" not in store
assert "foo/bar" in store
assert "/foo/bar" in store
assert "/foo/b" not in store
assert "bar" not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store["node())"] = tm.makeDataFrame()
assert "node())" in store
def test_versioning(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
assert store.root.a._v_attrs.pandas_version == "0.15.2"
assert store.root.b._v_attrs.pandas_version == "0.15.2"
assert store.root.df1._v_attrs.pandas_version == "0.15.2"
# write a file and wipe its versioning
_maybe_remove(store, "df2")
store.append("df2", df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node("df2")._v_attrs.pandas_version = None
with pytest.raises(Exception):
store.select("df2")
def test_mode(self, setup_path):
df = tm.makeTimeDataFrame()
def check(mode):
with ensure_clean_path(setup_path) as path:
# constructor
if mode in ["r", "r+"]:
with pytest.raises(IOError):
HDFStore(path, mode=mode)
else:
store = HDFStore(path, mode=mode)
assert store._handle.mode == mode
store.close()
with ensure_clean_path(setup_path) as path:
# context
if mode in ["r", "r+"]:
with pytest.raises(IOError):
with HDFStore(path, mode=mode) as store: # noqa
pass
else:
with HDFStore(path, mode=mode) as store:
assert store._handle.mode == mode
with ensure_clean_path(setup_path) as path:
# conv write
if mode in ["r", "r+"]:
with pytest.raises(IOError):
df.to_hdf(path, "df", mode=mode)
df.to_hdf(path, "df", mode="w")
else:
df.to_hdf(path, "df", mode=mode)
# conv read
if mode in ["w"]:
with pytest.raises(ValueError):
read_hdf(path, "df", mode=mode)
else:
result = read_hdf(path, "df", mode=mode)
tm.assert_frame_equal(result, df)
def check_default_mode():
# read_hdf uses default mode
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="w")
result = read_hdf(path, "df")
tm.assert_frame_equal(result, df)
check("r")
check("r+")
check("a")
check("w")
check_default_mode()
def test_reopen_handle(self, setup_path):
with ensure_clean_path(setup_path) as path:
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# invalid mode change
with pytest.raises(PossibleDataLossError):
store.open("w")
store.close()
assert not store.is_open
# truncation ok here
store.open("w")
assert store.is_open
assert len(store) == 0
store.close()
assert not store.is_open
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# reopen as read
store.open("r")
assert store.is_open
assert len(store) == 1
assert store._mode == "r"
store.close()
assert not store.is_open
# reopen as append
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
# reopen as append (again)
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
def test_open_args(self, setup_path):
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
# create an in memory store
store = HDFStore(
path, mode="a", driver="H5FD_CORE", driver_core_backing_store=0
)
store["df"] = df
store.append("df2", df)
tm.assert_frame_equal(store["df"], df)
tm.assert_frame_equal(store["df2"], df)
store.close()
# the file should not have actually been written
assert not os.path.exists(path)
def test_flush(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store.flush()
store.flush(fsync=True)
def test_get(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
left = store.get("a")
right = store["a"]
tm.assert_series_equal(left, right)
left = store.get("/a")
right = store["/a"]
tm.assert_series_equal(left, right)
with pytest.raises(KeyError, match="'No object named b in the file'"):
store.get("b")
@pytest.mark.parametrize(
"where, expected",
[
(
"/",
{
"": ({"first_group", "second_group"}, set()),
"/first_group": (set(), {"df1", "df2"}),
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
(
"/second_group",
{
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
],
)
def test_walk(self, where, expected, setup_path):
# GH10143
objs = {
"df1": pd.DataFrame([1, 2, 3]),
"df2": pd.DataFrame([4, 5, 6]),
"df3": pd.DataFrame([6, 7, 8]),
"df4": pd.DataFrame([9, 10, 11]),
"s1": pd.Series([10, 9, 8]),
# Next 3 items aren't pandas objects and should be ignored
"a1": np.array([[1, 2, 3], [4, 5, 6]]),
"tb1": np.array([(1, 2, 3), (4, 5, 6)], dtype="i,i,i"),
"tb2": np.array([(7, 8, 9), (10, 11, 12)], dtype="i,i,i"),
}
with ensure_clean_store("walk_groups.hdf", mode="w") as store:
store.put("/first_group/df1", objs["df1"])
store.put("/first_group/df2", objs["df2"])
store.put("/second_group/df3", objs["df3"])
store.put("/second_group/s1", objs["s1"])
store.put("/second_group/third_group/df4", objs["df4"])
# Create non-pandas objects
store._handle.create_array("/first_group", "a1", objs["a1"])
store._handle.create_table("/first_group", "tb1", obj=objs["tb1"])
store._handle.create_table("/second_group", "tb2", obj=objs["tb2"])
assert len(list(store.walk(where=where))) == len(expected)
for path, groups, leaves in store.walk(where=where):
assert path in expected
expected_groups, expected_frames = expected[path]
assert expected_groups == set(groups)
assert expected_frames == set(leaves)
for leaf in leaves:
frame_path = "/".join([path, leaf])
obj = store.get(frame_path)
if "df" in leaf:
tm.assert_frame_equal(obj, objs[leaf])
else:
tm.assert_series_equal(obj, objs[leaf])
def test_getattr(self, setup_path):
with ensure_clean_store(setup_path) as store:
s = tm.makeTimeSeries()
store["a"] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store, "a")
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store["df"] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
for x in ["d", "mode", "path", "handle", "complib"]:
with pytest.raises(AttributeError):
getattr(store, x)
# not stores
for x in ["mode", "path", "handle", "complib"]:
getattr(store, "_{x}".format(x=x))
def test_put(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
store["a"] = ts
store["b"] = df[:10]
store["foo/bar/bah"] = df[:10]
store["foo"] = df[:10]
store["/foo"] = df[:10]
store.put("c", df[:10], format="table")
# not OK, not a table
with pytest.raises(ValueError):
store.put("b", df[10:], append=True)
# node does not currently exist, test _is_table_type returns False
# in this case
_maybe_remove(store, "f")
with pytest.raises(ValueError):
store.put("f", df[10:], append=True)
# can't put to a table (use append instead)
with pytest.raises(ValueError):
store.put("c", df[10:], append=True)
# overwrite table
store.put("c", df[:10], format="table", append=False)
tm.assert_frame_equal(df[:10], store["c"])
def test_put_string_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
index = Index(
["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(20), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
# mixed length
index = Index(
["abcdefghijklmnopqrstuvwxyz1234567890"]
+ ["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(21), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
def test_put_compression(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
store.put("c", df, format="table", complib="zlib")
tm.assert_frame_equal(store["c"], df)
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="zlib")
@td.skip_if_windows_python_3
def test_put_compression_blosc(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="blosc")
store.put("c", df, format="table", complib="blosc")
tm.assert_frame_equal(store["c"], df)
def test_complibs_default_settings(self, setup_path):
# GH15943
df = tm.makeDataFrame()
# Set complevel and check if complib is automatically set to
# default value
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complevel=9)
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "zlib"
# Set complib and check to see if compression is disabled
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complib="zlib")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if not setting complib or complevel results in no compression
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if file-defaults can be overridden on a per table basis
with ensure_clean_path(setup_path) as tmpfile:
store = pd.HDFStore(tmpfile)
store.append("dfc", df, complevel=9, complib="blosc")
store.append("df", df)
store.close()
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
for node in h5file.walk_nodes(where="/dfc", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "blosc"
def test_complibs(self, setup_path):
# GH14478
df = tm.makeDataFrame()
# Building list of all complibs and complevels tuples
all_complibs = tables.filters.all_complibs
# Remove lzo if its not available on this platform
if not tables.which_lib_version("lzo"):
all_complibs.remove("lzo")
# Remove bzip2 if its not available on this platform
if not tables.which_lib_version("bzip2"):
all_complibs.remove("bzip2")
all_levels = range(0, 10)
all_tests = [(lib, lvl) for lib in all_complibs for lvl in all_levels]
for (lib, lvl) in all_tests:
with ensure_clean_path(setup_path) as tmpfile:
gname = "foo"
# Write and read file to see if data is consistent
df.to_hdf(tmpfile, gname, complib=lib, complevel=lvl)
result = pd.read_hdf(tmpfile, gname)
tm.assert_frame_equal(result, df)
# Open file and check metadata
# for correct amount of compression
h5table = tables.open_file(tmpfile, mode="r")
for node in h5table.walk_nodes(where="/" + gname, classname="Leaf"):
assert node.filters.complevel == lvl
if lvl == 0:
assert node.filters.complib is None
else:
assert node.filters.complib == lib
h5table.close()
def test_put_integer(self, setup_path):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
self._check_roundtrip(df, tm.assert_frame_equal, setup_path)
@td.xfail_non_writeable
def test_put_mixed_type(self, setup_path):
df = tm.makeTimeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
# PerformanceWarning
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store.put("df", df)
expected = store.get("df")
tm.assert_frame_equal(expected, df)
@pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
def test_append(self, setup_path):
with ensure_clean_store(setup_path) as store:
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning):
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
_maybe_remove(store, "df2")
store.put("df2", df[:10], format="table")
store.append("df2", df[10:])
tm.assert_frame_equal(store["df2"], df)
_maybe_remove(store, "df3")
store.append("/df3", df[:10])
store.append("/df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning
_maybe_remove(store, "/df3 foo")
store.append("/df3 foo", df[:10])
store.append("/df3 foo", df[10:])
tm.assert_frame_equal(store["df3 foo"], df)
# dtype issues - mizxed type in a single object column
df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]])
df["mixed_column"] = "testing"
df.loc[2, "mixed_column"] = np.nan
_maybe_remove(store, "df")
store.append("df", df)
tm.assert_frame_equal(store["df"], df)
# uints - test storage of uints
uint_data = DataFrame(
{
"u08": Series(
np.random.randint(0, high=255, size=5), dtype=np.uint8
),
"u16": Series(
np.random.randint(0, high=65535, size=5), dtype=np.uint16
),
"u32": Series(
np.random.randint(0, high=2 ** 30, size=5), dtype=np.uint32
),
"u64": Series(
[2 ** 58, 2 ** 59, 2 ** 60, 2 ** 61, 2 ** 62],
dtype=np.uint64,
),
},
index=np.arange(5),
)
_maybe_remove(store, "uints")
store.append("uints", uint_data)
tm.assert_frame_equal(store["uints"], uint_data)
# uints - test storage of uints in indexable columns
_maybe_remove(store, "uints")
# 64-bit indices not yet supported
store.append("uints", uint_data, data_columns=["u08", "u16", "u32"])
tm.assert_frame_equal(store["uints"], uint_data)
def test_append_series(self, setup_path):
with ensure_clean_store(setup_path) as store:
# basic
ss = tm.makeStringSeries()
ts = tm.makeTimeSeries()
ns = Series(np.arange(100))
store.append("ss", ss)
result = store["ss"]
tm.assert_series_equal(result, ss)
assert result.name is None
store.append("ts", ts)
result = store["ts"]
tm.assert_series_equal(result, ts)
assert result.name is None
ns.name = "foo"
store.append("ns", ns)
result = store["ns"]
tm.assert_series_equal(result, ns)
assert result.name == ns.name
# select on the values
expected = ns[ns > 60]
result = store.select("ns", "foo>60")
tm.assert_series_equal(result, expected)
# select on the index and values
expected = ns[(ns > 70) & (ns.index < 90)]
result = store.select("ns", "foo>70 and index<90")
tm.assert_series_equal(result, expected)
# multi-index
mi = DataFrame(np.random.randn(5, 1), columns=["A"])
mi["B"] = np.arange(len(mi))
mi["C"] = "foo"
mi.loc[3:5, "C"] = "bar"
mi.set_index(["C", "B"], inplace=True)
s = mi.stack()
s.index = s.index.droplevel(2)
store.append("mi", s)
tm.assert_series_equal(store["mi"], s)
def test_store_index_types(self, setup_path):
# GH5386
# test storing various index types
with ensure_clean_store(setup_path) as store:
def check(format, index):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df.index = index(len(df))
_maybe_remove(store, "df")
store.put("df", df, format=format)
tm.assert_frame_equal(df, store["df"])
for index in [
tm.makeFloatIndex,
tm.makeStringIndex,
tm.makeIntIndex,
tm.makeDateIndex,
]:
check("table", index)
check("fixed", index)
# period index currently broken for table
# seee GH7796 FIXME
check("fixed", tm.makePeriodIndex)
# check('table',tm.makePeriodIndex)
# unicode
index = tm.makeUnicodeIndex
check("table", index)
check("fixed", index)
@pytest.mark.skipif(
not is_platform_little_endian(), reason="reason platform is not little endian"
)
def test_encoding(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A="foo", B="bar"), index=range(5))
df.loc[2, "A"] = np.nan
df.loc[3, "B"] = np.nan
_maybe_remove(store, "df")
store.append("df", df, encoding="ascii")
tm.assert_frame_equal(store["df"], df)
expected = df.reindex(columns=["A"])
result = store.select("df", Term("columns=A", encoding="ascii"))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"val",
[
[b"E\xc9, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"a", b"b", b"c"],
[b"EE, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"\xf8\xfc", b"a", b"b", b"c"],
[b"", b"a", b"b", b"c"],
[b"\xf8\xfc", b"a", b"b", b"c"],
[b"A\xf8\xfc", b"", b"a", b"b", b"c"],
[np.nan, b"", b"b", b"c"],
[b"A\xf8\xfc", np.nan, b"", b"b", b"c"],
],
)
@pytest.mark.parametrize("dtype", ["category", object])
def test_latin_encoding(self, setup_path, dtype, val):
enc = "latin-1"
nan_rep = ""
key = "data"
val = [x.decode(enc) if isinstance(x, bytes) else x for x in val]
ser = pd.Series(val, dtype=dtype)
with ensure_clean_path(setup_path) as store:
ser.to_hdf(store, key, format="table", encoding=enc, nan_rep=nan_rep)
retr = read_hdf(store, key)
s_nan = ser.replace(nan_rep, np.nan)
if is_categorical_dtype(s_nan):
assert is_categorical_dtype(retr)
tm.assert_series_equal(
s_nan, retr, check_dtype=False, check_categorical=False
)
else:
tm.assert_series_equal(s_nan, retr)
# FIXME: don't leave commented-out
# fails:
# for x in examples:
# roundtrip(s, nan_rep=b'\xf8\xfc')
def test_append_some_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{
"A": Series(np.random.randn(20)).astype("int32"),
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
# some nans
_maybe_remove(store, "df1")
df.loc[0:15, ["A1", "B", "D", "E"]] = np.nan
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
# first column
df1 = df.copy()
df1.loc[:, "A1"] = np.nan
_maybe_remove(store, "df1")
store.append("df1", df1[:10])
store.append("df1", df1[10:])
tm.assert_frame_equal(store["df1"], df1)
# 2nd column
df2 = df.copy()
df2.loc[:, "A2"] = np.nan
_maybe_remove(store, "df2")
store.append("df2", df2[:10])
store.append("df2", df2[10:])
tm.assert_frame_equal(store["df2"], df2)
# datetimes
df3 = df.copy()
df3.loc[:, "E"] = np.nan
_maybe_remove(store, "df3")
store.append("df3", df3[:10])
store.append("df3", df3[10:])
tm.assert_frame_equal(store["df3"], df3)
def test_append_all_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{"A1": np.random.randn(20), "A2": np.random.randn(20)},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
# nan some entire rows (dropna=True)
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df[-4:])
# nan some entire rows (dropna=False)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# tests the option io.hdf.dropna_table
pd.set_option("io.hdf.dropna_table", False)
_maybe_remove(store, "df3")
store.append("df3", df[:10])
store.append("df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
pd.set_option("io.hdf.dropna_table", True)
_maybe_remove(store, "df4")
store.append("df4", df[:10])
store.append("df4", df[10:])
tm.assert_frame_equal(store["df4"], df[-4:])
# nan some entire rows (string are still written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# nan some entire rows (but since we have dates they are still
# written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# Test to make sure defaults are to not drop.
# Corresponding to Issue 9382
df_with_missing = DataFrame(
{"col1": [0, np.nan, 2], "col2": [1, np.nan, np.nan]}
)
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df_with_missing", format="table")
reloaded = read_hdf(path, "df_with_missing")
tm.assert_frame_equal(df_with_missing, reloaded)
def test_read_missing_key_close_store(self, setup_path):
# GH 25766
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(path, "k2")
# smoke test to test that file is properly closed after
# read with KeyError before another write
df.to_hdf(path, "k2")
def test_read_missing_key_opened_store(self, setup_path):
# GH 28699
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
store = pd.HDFStore(path, "r")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(store, "k2")
# Test that the file is still open after a KeyError and that we can
# still read from it.
pd.read_hdf(store, "k1")
def test_append_frame_column_oriented(self, setup_path):
with ensure_clean_store(setup_path) as store:
# column oriented
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df.iloc[:, :2], axes=["columns"])
store.append("df1", df.iloc[:, 2:])
tm.assert_frame_equal(store["df1"], df)
result = store.select("df1", "columns=A")
expected = df.reindex(columns=["A"])
tm.assert_frame_equal(expected, result)
# selection on the non-indexable
result = store.select("df1", ("columns=A", "index=df.index[0:4]"))
expected = df.reindex(columns=["A"], index=df.index[0:4])
tm.assert_frame_equal(expected, result)
# this isn't supported
with pytest.raises(TypeError):
store.select("df1", "columns=A and index>df.index[4]")
def test_append_with_different_block_ordering(self, setup_path):
# GH 4096; using same frames, but different block orderings
with ensure_clean_store(setup_path) as store:
for i in range(10):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df["index"] = range(10)
df["index"] += i * 10
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
if i % 2 == 0:
del df["int64"]
df["int64"] = Series([1] * len(df), dtype="int64")
if i % 3 == 0:
a = df.pop("A")
df["A"] = a
df.set_index("index", inplace=True)
store.append("df", df)
# test a different ordering but with more fields (like invalid
# combinate)
with ensure_clean_store(setup_path) as store:
df = DataFrame(np.random.randn(10, 2), columns=list("AB"), dtype="float64")
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
store.append("df", df)
# store additional fields in different blocks
df["int16_2"] = Series([1] * len(df), dtype="int16")
with pytest.raises(ValueError):
store.append("df", df)
# store multile additional fields in different blocks
df["float_3"] = Series([1.0] * len(df), dtype="float64")
with pytest.raises(ValueError):
store.append("df", df)
def test_append_with_strings(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big", df)
tm.assert_frame_equal(store.select("df_big"), df)
check_col("df_big", "values_block_1", 15)
# appending smaller string ok
df2 = DataFrame([[124, "asdqy"], [346, "dggnhefbdfb"]])
store.append("df_big", df2)
expected = concat([df, df2])
tm.assert_frame_equal(store.select("df_big"), expected)
check_col("df_big", "values_block_1", 15)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big2", df, min_itemsize={"values": 50})
tm.assert_frame_equal(store.select("df_big2"), df)
check_col("df_big2", "values_block_1", 50)
# bigger string on next append
store.append("df_new", df)
df_new = DataFrame(
[[124, "abcdefqhij"], [346, "abcdefghijklmnopqrtsuvwxyz"]]
)
with pytest.raises(ValueError):
store.append("df_new", df_new)
# min_itemsize on Series index (GH 11412)
df = tm.makeMixedDataFrame().set_index("C")
store.append("ss", df["B"], min_itemsize={"index": 4})
tm.assert_series_equal(store.select("ss"), df["B"])
# same as above, with data_columns=True
store.append(
"ss2", df["B"], data_columns=True, min_itemsize={"index": 4}
)
tm.assert_series_equal(store.select("ss2"), df["B"])
# min_itemsize in index without appending (GH 10381)
store.put("ss3", df, format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
store.append("ss3", df2)
tm.assert_frame_equal(store.select("ss3"), pd.concat([df, df2]))
# same as above, with a Series
store.put("ss4", df["B"], format="table", min_itemsize={"index": 6})
store.append("ss4", df2["B"])
tm.assert_series_equal(
store.select("ss4"), pd.concat([df["B"], df2["B"]])
)
# with nans
_maybe_remove(store, "df")
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[1:4, "string"] = np.nan
df["string2"] = "bar"
df.loc[4:8, "string2"] = np.nan
df["string3"] = "bah"
df.loc[1:, "string3"] = np.nan
store.append("df", df)
result = store.select("df")
tm.assert_frame_equal(result, df)
with ensure_clean_store(setup_path) as store:
def check_col(key, name, size):
assert getattr(
store.get_storer(key).table.description, name
).itemsize, size
df = DataFrame(dict(A="foo", B="bar"), index=range(10))
# a min_itemsize that creates a data_column
_maybe_remove(store, "df")
store.append("df", df, min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["B", "A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"values": 200})
check_col("df", "B", 200)
check_col("df", "values_block_0", 200)
assert store.get_storer("df").data_columns == ["B"]
# infer the .typ on subsequent appends
_maybe_remove(store, "df")
store.append("df", df[:5], min_itemsize=200)
store.append("df", df[5:], min_itemsize=200)
tm.assert_frame_equal(store["df"], df)
# invalid min_itemsize keys
df = DataFrame(["foo", "foo", "foo", "barh", "barh", "barh"], columns=["A"])
_maybe_remove(store, "df")
with pytest.raises(ValueError):
store.append("df", df, min_itemsize={"foo": 20, "foobar": 20})
def test_append_with_empty_string(self, setup_path):
with ensure_clean_store(setup_path) as store:
# with all empty strings (GH 12242)
df = DataFrame({"x": ["a", "b", "c", "d", "e", "f", ""]})
store.append("df", df[:-1], min_itemsize={"x": 1})
store.append("df", df[-1:], min_itemsize={"x": 1})
tm.assert_frame_equal(store.select("df"), df)
def test_to_hdf_with_min_itemsize(self, setup_path):
with ensure_clean_path(setup_path) as path:
# min_itemsize in index with to_hdf (GH 10381)
df = tm.makeMixedDataFrame().set_index("C")
df.to_hdf(path, "ss3", format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
df2.to_hdf(path, "ss3", append=True, format="table")
tm.assert_frame_equal(pd.read_hdf(path, "ss3"), pd.concat([df, df2]))
# same as above, with a Series
df["B"].to_hdf(path, "ss4", format="table", min_itemsize={"index": 6})
df2["B"].to_hdf(path, "ss4", append=True, format="table")
tm.assert_series_equal(
pd.read_hdf(path, "ss4"), pd.concat([df["B"], df2["B"]])
)
@pytest.mark.parametrize(
"format", [pytest.param("fixed", marks=td.xfail_non_writeable), "table"]
)
def test_to_hdf_errors(self, format, setup_path):
data = ["\ud800foo"]
ser = pd.Series(data, index=pd.Index(data))
with ensure_clean_path(setup_path) as path:
# GH 20835
ser.to_hdf(path, "table", format=format, errors="surrogatepass")
result = pd.read_hdf(path, "table", errors="surrogatepass")
tm.assert_series_equal(result, ser)
def test_append_with_data_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
df.iloc[0, df.columns.get_loc("B")] = 1.0
_maybe_remove(store, "df")
store.append("df", df[:2], data_columns=["B"])
store.append("df", df[2:])
tm.assert_frame_equal(store["df"], df)
# check that we have indices created
assert store._handle.root.df.table.cols.index.is_indexed is True
assert store._handle.root.df.table.cols.B.is_indexed is True
# data column searching
result = store.select("df", "B>0")
expected = df[df.B > 0]
tm.assert_frame_equal(result, expected)
# data column searching (with an indexable and a data_columns)
result = store.select("df", "B>0 and index>df.index[3]")
df_new = df.reindex(index=df.index[4:])
expected = df_new[df_new.B > 0]
tm.assert_frame_equal(result, expected)
# data column selection with a string data_column
df_new = df.copy()
df_new["string"] = "foo"
df_new.loc[1:4, "string"] = np.nan
df_new.loc[5:6, "string"] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"])
result = store.select("df", "string='foo'")
expected = df_new[df_new.string == "foo"]
tm.assert_frame_equal(result, expected)
# using min_itemsize and a data column
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"string": 30}
)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"], min_itemsize=30)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"values": 30}
)
check_col("df", "string", 30)
with ensure_clean_store(setup_path) as store:
df_new["string2"] = "foobarbah"
df_new["string_block1"] = "foobarbah1"
df_new["string_block2"] = "foobarbah2"
_maybe_remove(store, "df")
store.append(
"df",
df_new,
data_columns=["string", "string2"],
min_itemsize={"string": 30, "string2": 40, "values": 50},
)
check_col("df", "string", 30)
check_col("df", "string2", 40)
check_col("df", "values_block_1", 50)
with ensure_clean_store(setup_path) as store:
# multiple data columns
df_new = df.copy()
df_new.iloc[0, df_new.columns.get_loc("A")] = 1.0
df_new.iloc[0, df_new.columns.get_loc("B")] = -1.0
df_new["string"] = "foo"
sl = df_new.columns.get_loc("string")
df_new.iloc[1:4, sl] = np.nan
df_new.iloc[5:6, sl] = "bar"
df_new["string2"] = "foo"
sl = df_new.columns.get_loc("string2")
df_new.iloc[2:5, sl] = np.nan
df_new.iloc[7:8, sl] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["A", "B", "string", "string2"])
result = store.select(
"df", "string='foo' and string2='foo' and A>0 and B<0"
)
expected = df_new[
(df_new.string == "foo")
& (df_new.string2 == "foo")
& (df_new.A > 0)
& (df_new.B < 0)
]
tm.assert_frame_equal(result, expected, check_index_type=False)
# yield an empty frame
result = store.select("df", "string='foo' and string2='cool'")
expected = df_new[(df_new.string == "foo") & (df_new.string2 == "cool")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example
df_dc = df.copy()
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc["string2"] = "cool"
df_dc["datetime"] = Timestamp("20010102")
df_dc = df_dc._convert(datetime=True)
df_dc.loc[3:5, ["A", "B", "datetime"]] = np.nan
_maybe_remove(store, "df_dc")
store.append(
"df_dc", df_dc, data_columns=["B", "C", "string", "string2", "datetime"]
)
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected, check_index_type=False)
result = store.select("df_dc", ["B > 0", "C > 0", "string == foo"])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example part 2
np.random.seed(1234)
index = date_range("1/1/2000", periods=8)
df_dc = DataFrame(
np.random.randn(8, 3), index=index, columns=["A", "B", "C"]
)
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc.loc[:, ["B", "C"]] = df_dc.loc[:, ["B", "C"]].abs()
df_dc["string2"] = "cool"
# on-disk operations
store.append("df_dc", df_dc, data_columns=["B", "C", "string", "string2"])
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected)
result = store.select("df_dc", ["B > 0", "C > 0", 'string == "foo"'])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected)
def test_create_table_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def col(t, column):
return getattr(store.get_storer(t).table.cols, column)
# data columns
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df["string2"] = "bar"
store.append("f", df, data_columns=["string", "string2"])
assert col("f", "index").is_indexed is True
assert col("f", "string").is_indexed is True
assert col("f", "string2").is_indexed is True
# specify index=columns
store.append(
"f2", df, index=["string"], data_columns=["string", "string2"]
)
assert col("f2", "index").is_indexed is False
assert col("f2", "string").is_indexed is True
assert col("f2", "string2").is_indexed is False
# try to index a non-table
_maybe_remove(store, "f2")
store.put("f2", df)
with pytest.raises(TypeError):
store.create_table_index("f2")
def test_append_hierarchical(self, setup_path):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo", "bar"],
)
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.append("mi", df)
result = store.select("mi")
tm.assert_frame_equal(result, df)
# GH 3748
result = store.select("mi", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
with ensure_clean_path("test.hdf") as path:
df.to_hdf(path, "df", format="table")
result = read_hdf(path, "df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_column_multiindex(self, setup_path):
# GH 4710
# recreate multi-indexes properly
index = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")], names=["first", "second"]
)
df = DataFrame(np.arange(12).reshape(3, 4), columns=index)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df", df)
tm.assert_frame_equal(
store["df"], expected, check_index_type=True, check_column_type=True
)
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
with pytest.raises(ValueError):
store.put("df2", df, format="table", data_columns=["A"])
with pytest.raises(ValueError):
store.put("df3", df, format="table", data_columns=True)
# appending multi-column on existing table (see GH 6167)
with ensure_clean_store(setup_path) as store:
store.append("df2", df)
store.append("df2", df)
tm.assert_frame_equal(store["df2"], concat((df, df)))
# non_index_axes name
df = DataFrame(
np.arange(12).reshape(3, 4), columns=Index(list("ABCD"), name="foo")
)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
def test_store_multiindex(self, setup_path):
# validate multi-index names
# GH 5527
with ensure_clean_store(setup_path) as store:
def make_index(names=None):
return MultiIndex.from_tuples(
[
(datetime.datetime(2013, 12, d), s, t)
for d in range(1, 3)
for s in range(2)
for t in range(3)
],
names=names,
)
# no names
_maybe_remove(store, "df")
df = DataFrame(np.zeros((12, 2)), columns=["a", "b"], index=make_index())
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# partial names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", None, None]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# series
_maybe_remove(store, "s")
s = Series(np.zeros(12), index=make_index(["date", None, None]))
store.append("s", s)
xp = Series(np.zeros(12), index=make_index(["date", "level_1", "level_2"]))
tm.assert_series_equal(store.select("s"), xp)
# dup with column
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "a", "t"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# dup within level
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "date", "date"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# fully names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "s", "t"]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
def test_select_columns_in_where(self, setup_path):
# GH 6169
# recreate multi-indexes when columns is passed
# in the `where` argument
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo_name", "bar_name"],
)
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
expected = df[["A"]]
tm.assert_frame_equal(store.select("df", columns=["A"]), expected)
tm.assert_frame_equal(store.select("df", where="columns=['A']"), expected)
# With a Series
s = Series(np.random.randn(10), index=index, name="A")
with ensure_clean_store(setup_path) as store:
store.put("s", s, format="table")
tm.assert_series_equal(store.select("s", where="columns=['A']"), s)
def test_mi_data_columns(self, setup_path):
# GH 14435
idx = pd.MultiIndex.from_arrays(
[date_range("2000-01-01", periods=5), range(5)], names=["date", "id"]
)
df = pd.DataFrame({"a": [1.1, 1.2, 1.3, 1.4, 1.5]}, index=idx)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=True)
actual = store.select("df", where="id == 1")
expected = df.iloc[[1], :]
tm.assert_frame_equal(actual, expected)
def test_pass_spec_to_storer(self, setup_path):
df = tm.makeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("df", df)
with pytest.raises(TypeError):
store.select("df", columns=["A"])
with pytest.raises(TypeError):
store.select("df", where=[("columns=A")])
@td.xfail_non_writeable
def test_append_misc(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df, chunksize=1)
result = store.select("df")
tm.assert_frame_equal(result, df)
store.append("df1", df, expectedrows=10)
result = store.select("df1")
tm.assert_frame_equal(result, df)
# more chunksize in append tests
def check(obj, comparator):
for c in [10, 200, 1000]:
with ensure_clean_store(setup_path, mode="w") as store:
store.append("obj", obj, chunksize=c)
result = store.select("obj")
comparator(result, obj)
df = tm.makeDataFrame()
df["string"] = "foo"
df["float322"] = 1.0
df["float322"] = df["float322"].astype("float32")
df["bool"] = df["float322"] > 0
df["time1"] = Timestamp("20130101")
df["time2"] = Timestamp("20130102")
check(df, tm.assert_frame_equal)
# empty frame, GH4273
with ensure_clean_store(setup_path) as store:
# 0 len
df_empty = DataFrame(columns=list("ABC"))
store.append("df", df_empty)
with pytest.raises(KeyError, match="'No object named df in the file'"):
store.select("df")
# repeated append of 0/non-zero frames
df = DataFrame(np.random.rand(10, 3), columns=list("ABC"))
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
store.append("df", df_empty)
tm.assert_frame_equal(store.select("df"), df)
# store
df = DataFrame(columns=list("ABC"))
store.put("df2", df)
tm.assert_frame_equal(store.select("df2"), df)
def test_append_raise(self, setup_path):
with ensure_clean_store(setup_path) as store:
# test append with invalid input to get good error messages
# list in column
df = tm.makeDataFrame()
df["invalid"] = [["a"]] * len(df)
assert df.dtypes["invalid"] == np.object_
with pytest.raises(TypeError):
store.append("df", df)
# multiple invalid columns
df["invalid2"] = [["a"]] * len(df)
df["invalid3"] = [["a"]] * len(df)
with pytest.raises(TypeError):
store.append("df", df)
# datetime with embedded nans as object
df = tm.makeDataFrame()
s = Series(datetime.datetime(2001, 1, 2), index=df.index)
s = s.astype(object)
s[0:5] = np.nan
df["invalid"] = s
assert df.dtypes["invalid"] == np.object_
with pytest.raises(TypeError):
store.append("df", df)
# directly ndarray
with pytest.raises(TypeError):
store.append("df", np.arange(10))
# series directly
with pytest.raises(TypeError):
store.append("df", Series(np.arange(10)))
# appending an incompatible table
df = tm.makeDataFrame()
store.append("df", df)
df["foo"] = "foo"
with pytest.raises(ValueError):
store.append("df", df)
def test_table_index_incompatible_dtypes(self, setup_path):
df1 = DataFrame({"a": [1, 2, 3]})
df2 = DataFrame({"a": [4, 5, 6]}, index=date_range("1/1/2000", periods=3))
with ensure_clean_store(setup_path) as store:
store.put("frame", df1, format="table")
with pytest.raises(TypeError):
store.put("frame", df2, format="table", append=True)
def test_table_values_dtypes_roundtrip(self, setup_path):
with ensure_clean_store(setup_path) as store:
df1 = DataFrame({"a": [1, 2, 3]}, dtype="f8")
store.append("df_f8", df1)
tm.assert_series_equal(df1.dtypes, store["df_f8"].dtypes)
df2 = DataFrame({"a": [1, 2, 3]}, dtype="i8")
store.append("df_i8", df2)
tm.assert_series_equal(df2.dtypes, store["df_i8"].dtypes)
# incompatible dtype
with pytest.raises(ValueError):
store.append("df_i8", df1)
# check creation/storage/retrieval of float32 (a bit hacky to
# actually create them thought)
df1 = DataFrame(np.array([[1], [2], [3]], dtype="f4"), columns=["A"])
store.append("df_f4", df1)
tm.assert_series_equal(df1.dtypes, store["df_f4"].dtypes)
assert df1.dtypes[0] == "float32"
# check with mixed dtypes
df1 = DataFrame(
{
c: Series(np.random.randint(5), dtype=c)
for c in ["float32", "float64", "int32", "int64", "int16", "int8"]
}
)
df1["string"] = "foo"
df1["float322"] = 1.0
df1["float322"] = df1["float322"].astype("float32")
df1["bool"] = df1["float32"] > 0
df1["time1"] = Timestamp("20130101")
df1["time2"] = Timestamp("20130102")
store.append("df_mixed_dtypes1", df1)
result = store.select("df_mixed_dtypes1").dtypes.value_counts()
result.index = [str(i) for i in result.index]
expected = Series(
{
"float32": 2,
"float64": 1,
"int32": 1,
"bool": 1,
"int16": 1,
"int8": 1,
"int64": 1,
"object": 1,
"datetime64[ns]": 2,
}
)
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
def test_table_mixed_dtypes(self, setup_path):
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
store.append("df1_mixed", df)
tm.assert_frame_equal(store.select("df1_mixed"), df)
def test_unimplemented_dtypes_table_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
dtypes = [("date", datetime.date(2001, 1, 2))]
# currently not supported dtypes ####
for n, f in dtypes:
df = tm.makeDataFrame()
df[n] = f
with pytest.raises(TypeError):
store.append("df1_{n}".format(n=n), df)
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["datetime1"] = datetime.date(2001, 1, 2)
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
# this fails because we have a date in the object block......
with pytest.raises(TypeError):
store.append("df_unimplemented", df)
@td.xfail_non_writeable
@pytest.mark.skipif(
LooseVersion(np.__version__) == LooseVersion("1.15.0"),
reason=(
"Skipping pytables test when numpy version is "
"exactly equal to 1.15.0: gh-22098"
),
)
def test_calendar_roundtrip_issue(self, setup_path):
# 8591
# doc example from tseries holiday section
weekmask_egypt = "Sun Mon Tue Wed Thu"
holidays = [
"2012-05-01",
datetime.datetime(2013, 5, 1),
np.datetime64("2014-05-01"),
]
bday_egypt = pd.offsets.CustomBusinessDay(
holidays=holidays, weekmask=weekmask_egypt
)
dt = datetime.datetime(2013, 4, 30)
dts = date_range(dt, periods=5, freq=bday_egypt)
s = Series(dts.weekday, dts).map(Series("Mon Tue Wed Thu Fri Sat Sun".split()))
with ensure_clean_store(setup_path) as store:
store.put("fixed", s)
result = store.select("fixed")
tm.assert_series_equal(result, s)
store.append("table", s)
result = store.select("table")
tm.assert_series_equal(result, s)
def test_roundtrip_tz_aware_index(self, setup_path):
# GH 17618
time = pd.Timestamp("2000-01-01 01:00:00", tz="US/Eastern")
df = pd.DataFrame(data=[0], index=[time])
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="fixed")
recons = store["frame"]
tm.assert_frame_equal(recons, df)
assert recons.index[0].value == 946706400000000000
def test_append_with_timedelta(self, setup_path):
# GH 3577
# append timedelta
df = DataFrame(
dict(
A=Timestamp("20130101"),
B=[
Timestamp("20130101") + timedelta(days=i, seconds=10)
for i in range(10)
],
)
)
df["C"] = df["A"] - df["B"]
df.loc[3:5, "C"] = np.nan
with | ensure_clean_store(setup_path) | pandas.tests.io.pytables.common.ensure_clean_store |
import time
import numpy as np
import pandas as pd
from molecules import mol_from_smiles
from molecules import add_property
from molecules import (
add_atom_counts, add_bond_counts, add_ring_counts)
from .config import get_dataset_info
from .filesystem import load_dataset
SCORES = ["validity", "novelty", "uniqueness"]
def dump_scores(config, scores, epoch):
filename = config.path('performance') / "scores.csv"
df = pd.DataFrame([scores], columns=SCORES)
if not filename.exists():
df.to_csv(filename)
is_max = True
else:
ref = pd.read_csv(filename, index_col=0)
is_max = scores[2] >= ref.uniqueness.max()
ref = pd.concat([ref, df], axis=0, sort=False, ignore_index=True)
ref.to_csv(filename)
return is_max
def retrieve_samples(config):
dfs = []
filenames = config.path('samples').glob('*_*.csv')
for filename in filenames:
dfs.append( | pd.read_csv(filename, index_col=0) | pandas.read_csv |
import sys
import pandas as pd
import numpy as np
import seaborn as sns
from matplotlib import pyplot as plt
from sqlalchemy import create_engine
def load_data(messages_filepath, categories_filepath):
"""
Loading Messages and Categories from Destination Database
Arguments:
messages_filepath -> Path to the CSV containing file messages
categories_filepath -> Path to the CSV containing file categories
Output:
df -> Combined data containing messages and categories
"""
messages = | pd.read_csv(messages_filepath) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 28 09:33:11 2022
@author: rossgra
"""
import pandas as pd
import numpy as np
import csv
import glob
import os
Phase = "1H"
Computer = "personal"
if Computer == "work":
USB = "D"
os.chdir("C:/Users/rossgra/Box/OSU, CSC, CQC Project files/"+ Phase +"/Compiler_1_exact/Raw_Day/Raw_D_metrics")
else:
os.chdir("C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/"+Phase+"/Compiler_1_exact/Raw_D_metrics")
USB = "E"
Pump_Fuel_path = os.getcwd()
csv_Fuel = glob.glob(os.path.join(Pump_Fuel_path, "*.csv"))
HH_count = []
Fuel_Removed_for_24_Hours = []
for file in csv_Fuel:
with open(file, 'r') as f:
csv_reader = csv.reader(f)
for idx, row in enumerate(csv_reader):
if idx == 1:
ID_Number = row[1]
print(ID_Number)
elif 'Fuel Raw Data' in row:
data_start = idx
sensor_data = | pd.read_csv(file, skiprows=data_start) | pandas.read_csv |
from .data import CovidData
import datetime as dt
from matplotlib.offsetbox import AnchoredText
import pandas as pd
import seaborn as sns
import geopandas as gpd
import matplotlib.pyplot as plt
plt.style.use('ggplot')
def pan_duration(date):
"""Return the duration in days of the pandemic.
As calculated from the gov.uk API. It subtracts the first date entry
in the API data from the most recent date entry.
Args:
date (datetime): DataFrame column (i.e Series) containing date
field as downloaded from the gov.uk API by get_national_data()
method from CovidData Class.
Returns:
datetime: Duration of pandemic in days as datetime object.
"""
return (date[0] - date[-1]).days
def validate_input(df):
"""Check that input into the plotting functions is of the correct type.
Args:
df (Pandas DataFrame): this is intended to be the plotting parameter
Raises:
TypeError: if parameter is not a DataFrame
"""
# if for_function == 'deaths' or for_function == 'cases':
# expected_cols = {'cases_cumulative', 'cases_demographics',
# 'cases_newDaily', 'case_rate', 'date',
# 'death_Demographics', 'name', 'vac_firstDose',
# 'vac_secondDose'}
if not isinstance(df, pd.DataFrame):
raise TypeError('Parameter must be DataFrame, use get_regional_data'
+ ' method from CovidData class.')
# if set(df.columns) != expected_cols:
# raise ValueError('Incorrect features. Expecting output from'
# + ' get_regional_data method from CovidData class')
def my_path():
"""Find correct path at module level for geo_data files.
Returns:
[type]: [description]
"""
from pathlib import Path
base = Path(__file__).resolve().parent / 'geo_data'
return base
def daily_case_plot(df, pan_duration=pan_duration, save=False):
"""Create a matplotlib plot of case numbers in the UK.
Calculated over the duration of the pandemic.Display text information
giving the most recent daily number, the highest daily number and the
date recorded, the total cumulative
number of cases and the duration of the pandemic in days.
Args:
df (DataFrame): containing covid data retrieved from CovidData
class using get_national_data() or get_UK_data() method.
pan_duration (function, optional): Defaults to pan_duration.
save (bool, optional): set True to save plot. Defaults to False.
Returns:
- Matplotlib plot, styled using matplotlib template 'ggplot'
"""
# Create Variables we wish to plot
cases = df['case_newCases'].to_list()
date = df['date'].to_list()
cumulative = df['case_cumulativeCases'].to_list()
# Find date of highest number of daily cases
high, arg_high = max(cases), cases.index(max(cases))
high_date = date[arg_high].strftime('%d %b %Y')
duration = pan_duration(date=date)
# Create matplotlib figure and specify size
fig = plt.figure(figsize=(12, 10))
plt.style.use('ggplot')
ax = fig.add_subplot()
# Plot varibles
ax.plot(date, cases)
# Style and label plot
ax.set_xlabel('Date')
ax.set_ylabel('Cases')
ax.fill_between(date, cases,
alpha=0.3)
ax.set_title('Number of people who tested positive for Covid-19 (UK)',
fontsize=18)
at = AnchoredText(f"Most recent new cases\n{cases[0]:,.0f}\
\nMax new cases\n{high:,.0f}: {high_date}\
\nCumulative cases\n{cumulative[0]:,.0f}\
\nPandemic duration\n{duration} days",
prop=dict(size=16), frameon=True, loc='upper left')
at.patch.set_boxstyle("round,pad=0.,rounding_size=0.2")
ax.add_artist(at)
ax.annotate('Source: gov.uk https://api.coronavirus.data.gov.uk/v1/data',
xy=(0.25, 0.0175), xycoords='figure fraction',
fontsize=12, color='#555555')
plt.style.use('ggplot')
if save:
plt.savefig(f"{date[0].strftime('%Y-%m-%d')}-case_numbers_plot");
plt.show()
def regional_plot_cases(save=False):
"""Plot regional case numbers on a map of the UK.
Function collects data using CovidData get_regional_data method.
Args:
save (bool, optional): If true will save plot. Defaults to False.
Returns:
Plot of regional case numbers on map of UK
"""
# Collect data
regions = CovidData().get_regional_data()
scotland = CovidData(nation='scotland').get_national_data()
wales = CovidData(nation='wales').get_national_data()
ni = CovidData(nation='northern ireland').get_national_data()
regions = regions.assign(case_newCases=regions['cases_newDaily'])
# Set date to plot
date_selector = regions['date'][0]
regions_date = regions.loc[regions['date'] == date_selector]
scotland_date = \
scotland.loc[scotland['date'] == date_selector,
['date', 'name', 'case_newCases']]
wales_date = wales.loc[wales['date'] == date_selector,
['date', 'name', 'case_newCases']]
ni_date = ni.loc[ni['date'] == date_selector,
['date', 'name', 'case_newCases']]
# Combine regional data into single dataframe
final_df = pd.concat([regions_date, scotland_date, wales_date, ni_date],
axis=0)
file_path = my_path() / 'NUTS_Level_1_(January_2018)_Boundaries.shp'
# Check required file exists
try:
# Read shape file
geo_df = gpd.read_file(file_path)
except: # bare except is not good practice, this should be changed
print('Ensure you have imported geo_data sub-folder')
geo_df['nuts118nm'] = \
geo_df['nuts118nm'].replace(['North East (England)',
'North West (England)',
'East Midlands (England)',
'West Midlands (England)',
'South East (England)',
'South West (England)'],
['North East', 'North West',
'East Midlands', 'West Midlands',
'South East', 'South West'])
merged = geo_df.merge(final_df, how='left', left_on="nuts118nm",
right_on="name")
# Column to plot
feature = 'case_newCases'
# Plot range
feature_min, feature_max = merged['case_newCases'].min(), \
merged['case_newCases'].max()
# Create plot
fig, ax = plt.subplots(1, figsize=(12, 10))
# Set style and labels
ax.axis('off')
ax.set_title(f'Number of new cases per region {date_selector}',
fontdict={'fontsize': '18', 'fontweight': '3'})
ax.annotate('Source: gov.uk'
+ ' https://api.coronavirus.data.gov.uk/v1/data',
xy=(0.25, .05), xycoords='figure fraction',
fontsize=12, color='#555555')
# Create colorbar
sm = plt.cm.ScalarMappable(cmap='Reds',
norm=plt.Normalize(vmin=feature_min,
vmax=feature_max))
fig.colorbar(sm)
# Create map
merged.plot(column=feature, cmap='Reds', linewidth=0.8, ax=ax,
edgecolor='0.8');
plt.show()
if save:
image = merged.plot(column=feature, cmap='Reds', linewidth=0.8,
ax=ax, edgecolor='0.8');
image.figure.savefig(f'{date_selector}-regional_cases_plot')
def regional_plot_rate(save=False):
"""Plot regional case rate per 100,000 on a map of the UK.
Function collects data using CovidData get_regional_data method.
Args:
save (bool, optional): If true will save plot. Defaults to False.
Returns:
Plot of regional case rate on map of UK.
"""
# Collect data
regions = CovidData().get_regional_data()
scotland = CovidData(nation='scotland').get_national_data()
wales = CovidData(nation='wales').get_national_data()
ni = CovidData(nation='northern ireland').get_national_data()
# Set date to plot
date_selector = regions['date'][5]
regions_date = regions.loc[regions['date'] == date_selector]
scotland_date = scotland.loc[scotland['date'] == date_selector,
['date', 'name', 'case_rate']]
wales_date = wales.loc[wales['date'] == date_selector,
['date', 'name', 'case_rate']]
ni_date = ni.loc[ni['date'] == date_selector,
['date', 'name', 'case_rate']]
# Combine regional data into single dataframe
final_df = pd.concat([regions_date, scotland_date, wales_date, ni_date],
axis=0)
file_path = my_path() / 'NUTS_Level_1_(January_2018)_Boundaries.shp'
# Check required file exists
try:
# Read shape file
geo_df = gpd.read_file(file_path)
except: # bare except should be changed, will do so in later interation
print('Ensure you have imported geo_data sub-folder')
geo_df['nuts118nm'] = \
geo_df['nuts118nm'].replace(['North East (England)',
'North West (England)',
'East Midlands (England)',
'West Midlands (England)',
'South East (England)',
'South West (England)'],
['North East', 'North West',
'East Midlands', 'West Midlands',
'South East', 'South West'])
merged = geo_df.merge(final_df, how='left', left_on="nuts118nm",
right_on="name")
# Column to plot
feature = 'case_rate'
# Plot range
feature_min, feature_max = merged['case_rate'].min(),\
merged['case_rate'].max()
# Create plot
fig, ax = plt.subplots(1, figsize=(12, 10))
# Set style and labels
ax.axis('off')
ax.set_title('Regional rate per 100,000 (new cases)',
fontdict={'fontsize': '20', 'fontweight': '3'})
ax.annotate('Source: gov.uk'
+ ' https://api.coronavirus.data.gov.uk/v1/data',
xy=(0.25, .05), xycoords='figure fraction',
fontsize=12, color='#555555')
# Create colorbar
sm = plt.cm.ScalarMappable(cmap='Reds',
norm=plt.Normalize(vmin=feature_min,
vmax=feature_max))
fig.colorbar(sm)
# Create map
merged.plot(column=feature, cmap='Reds', linewidth=0.8, ax=ax,
edgecolor='0.8');
plt.show()
if save:
image = merged.plot(column=feature, cmap='Reds', linewidth=0.8,
ax=ax, edgecolor='0.8');
image.figure.savefig(f'{date_selector}-regional_rate_plot')
def heatmap_cases(df):
"""Create heatmap of case numbers for duration of pandemic.
Args:
df (DataFrame): Covid case data retrieved by calling CovidData
class method.
Returns:
Seaborn heatmap plot of case numbers for each day of the pandemic.
"""
# Variables to plot
cases = df['case_newCases'].to_list()
date = df['date'].to_list()
# Create new DataFrame containing two columns: date and case numbers
heat_df = pd.DataFrame({'date': date, 'cases': cases}, index=date)
# Separate out date into year month and day
heat_df['year'] = heat_df.index.year
heat_df["month"] = heat_df.index.month
heat_df['day'] = heat_df.index.day
# Use groupby to convert data to wide format for heatmap plot
x = heat_df.groupby(["year", "month", "day"])["cases"].sum()
df_wide = x.unstack()
# Plot data
sns.set(rc={"figure.figsize": (12, 10)})
# Reverse colormap so that dark colours represent higher numbers
cmap = sns.cm.rocket_r
ax = sns.heatmap(df_wide, cmap=cmap)
ax.set_title('Heatmap of daily cases since start of pandemic',
fontsize=20)
ax.annotate('Source: gov.uk https://api.coronavirus.data.gov.uk/v1/data',
xy=(0.25, 0.01), xycoords='figure fraction',
fontsize=12, color='#555555')
plt.show()
def local_rate_plot(save=False):
"""Plot local case rate per 100,000 on a map of the UK.
Function collects data using CovidData get_regional_data method.
Args:
save (bool, optional): If true will save plot. Defaults to False.
Returns:
Plot of local case rate on map of UK
"""
# Find latest data
recent_date = CovidData().get_regional_data()
recent_date = recent_date['date'][5]
# Select latest data from local data
local = CovidData().get_local_data(date=recent_date)
date_selector = recent_date
local_date = local.loc[local['date'] == date_selector,
['date', 'name', 'case_rate']]
file_path = my_path() / "Local_Authority_Districts.shp"
# Check required file exists
try:
# Read shape file
geo_df = gpd.read_file(file_path)
except: # bare except should be changed, will do so in later interation
print('Ensure you have imported geo_data sub-folder')
local_date['name'] = \
local_date['name'].replace(['Cornwall and Isles of Scilly'],
['Cornwall'])
merged = geo_df.merge(local_date, how='outer',
left_on="lad19nm", right_on="name")
# Column to plot
feature = 'case_rate'
# Plot range
vmin, vmax = merged['case_rate'].min(), merged['case_rate'].max()
# Create plot
fig, ax = plt.subplots(1, figsize=(12, 10))
# Set style and labels
ax.axis('off')
ax.set_title(f'Local rate per 100,000 {recent_date}',
fontdict={'fontsize': '20', 'fontweight': '3'})
ax.annotate('Source: gov.uk'
+ ' https://api.coronavirus.data.gov.uk/v1/data',
xy=(0.25, .05), xycoords='figure fraction',
fontsize=12, color='#555555')
# Create colorbar
sm = plt.cm.ScalarMappable(cmap='Reds',
norm=plt.Normalize(vmin=vmin, vmax=vmax))
fig.colorbar(sm)
# Create map
merged.plot(column=feature, cmap='Reds', linewidth=0.2, ax=ax,
edgecolor='0.8')
plt.show()
if save:
image = merged.plot(column=feature, cmap='Reds', linewidth=0.2,
ax=ax, edgecolor='0.8');
image.figure.savefig(f'{date_selector}-local_rate_plot')
def local_cases_plot(save=False):
"""Plot local case numbers on a map of the UK.
Function collects data using CovidData get_regional_data method.
Args:
save (bool, optional): If true will save plot. Defaults to False.
"""
# Find latest data
recent_date = CovidData().get_regional_data()
recent_date = recent_date['date'][0]
# Select latest data from local data
local = CovidData().get_local_data(date=recent_date)
date_selector = recent_date
local_date = local.loc[local['date'] == date_selector,
['date', 'name', 'case_newDaily']]
file_path = my_path() / "Local_Authority_Districts.shp"
# Check required file exists
try:
# Read shape file
geo_df = gpd.read_file(file_path)
except: # bare except should be changed, will do so in later interation
print('Ensure you have imported geo_data sub-folder')
local_date['name'] = \
local_date['name'].replace(['Cornwall and Isles of Scilly'],
['Cornwall'])
merged = geo_df.merge(local_date, how='outer',
left_on="lad19nm", right_on="name")
# Column to plot
feature = 'case_newDaily'
# Plot range
vmin, vmax = merged['case_newDaily'].min(), \
merged['case_newDaily'].max()
# Create plot
fig, ax = plt.subplots(1, figsize=(12, 10))
# Set style and labels
ax.axis('off')
ax.set_title(f'Number of new cases by local authority {recent_date}',
fontdict={'fontsize': '20', 'fontweight': '3'})
ax.annotate('Source: gov.uk'
+ ' https://api.coronavirus.data.gov.uk/v1/data',
xy=(0.25, .05), xycoords='figure fraction',
fontsize=12, color='#555555')
# Create colorbar
sm = plt.cm.ScalarMappable(cmap='Reds',
norm=plt.Normalize(vmin=vmin, vmax=vmax))
fig.colorbar(sm)
# Create map
merged.plot(column=feature, cmap='Reds', linewidth=0.2, ax=ax,
edgecolor='0.8')
plt.show()
if save:
image = merged.plot(column=feature, cmap='Reds', linewidth=0.2,
ax=ax, edgecolor='0.8');
image.figure.savefig(f'{date_selector}-local_cases_plot')
def case_demographics(df):
"""Produce a plot of the age demographics of cases across England.
Args:
df (DataFrame): this must be the dataframe produced by the
get_regional_data method from the CovidData class
Returns:
Plot of case numbers broken down by age
"""
validate_input(df)
df_list = df.loc[:, ['cases_demographics', 'date']]
age_df = []
for i in range(df_list.shape[0]):
if df_list.iloc[i, 0]:
temp_df = pd.DataFrame(df_list.iloc[i, 0])
temp_df['date'] = df_list.iloc[i, 1]
temp_df = temp_df.pivot(values='rollingRate',
columns='age', index='date')
age_df.append(temp_df)
data = pd.concat(age_df)
data.index = pd.to_datetime(data.index)
data = \
data.assign(under_15=(data['00_04']+data['05_09']+data['10_14'])/3,
age_15_29=(data['15_19']+data['20_24']+data['25_29'])/3,
age_30_39=(data['30_34']+data['35_39'])/2,
age_40_49=(data['40_44']+data['45_49'])/2,
age_50_59=(data['50_54']+data['55_59'])/2)
data.drop(columns=['00_04', '00_59', '05_09', '10_14', '15_19', '20_24',
'25_29', '30_34', '35_39', '40_44', '45_49', '50_54',
'55_59', '60_64', '65_69', '70_74', '75_79', '80_84',
'85_89', '90+', 'unassigned'], inplace=True)
date = data.index[0].strftime('%d-%b-%y')
ready_df = data.resample('W').mean()
ready_df.plot(figsize=(15, 10), subplots=True, layout=(3, 3),
title=f'{date} - England case rate per 100,000 by age'
+ ' (weekly)')
plt.style.use('ggplot')
plt.show()
def vaccine_demographics(df):
"""Plot of the age demographics of third vaccine uptake across England.
Args:
df ([DataFrame]): this must be the dataframe produced by the
get_regional_data method from the CovidData class
Returns:
Plot of cumulative third vaccination numbers broken down by age.
"""
validate_input(df)
df_list = df.loc[:, ['vac_demographics', 'date']]
age_df = []
for i in range(df_list.shape[0]):
if df_list.iloc[i, 0]:
temp_df = pd.DataFrame(df_list.iloc[i, 0])
temp_df['date'] = df_list.iloc[i, 1]
temp_df =\
temp_df.pivot(values=
'cumVaccinationThirdInjectionUptakeByVaccinationDatePercentage',
columns='age', index='date')
age_df.append(temp_df)
data = pd.concat(age_df)
data.index = | pd.to_datetime(data.index) | pandas.to_datetime |
import os
import subprocess
import pickle
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy as sc
import pathlib
import threading
import concurrent.futures as cf
from scipy.signal import medfilt
import csv
import tikzplotlib
import encoders_comparison_tool as enc
import video_info as vi
from bj_delta import bj_delta, bj_delta_akima
# Colors in terminal
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
useage_log_suffix = "_useage.log"
psnr_log_suffix = "-psnr_logfile.txt"
ssim_log_suffix = "-ssim_logfile.txt"
vmaf_log_suffix = "-vmaf_logfile.txt"
videofiles = []
codecs = ["av1", "svtav1", "vp9", "x264", "x265", "vvc"]
codecs_short = {"av1": "AV1", "svtav1": "SVT-AV1", "vp9": "VP9", "x264": "x264", "x265": "x265", "vvc": "VVenC",}
sequences = ["Netflix Aerial yuv420p10le 60fps",
"ShakeNDry yuv420p 30fps",
"SunBath yuv420p10le 50fps",
"Tree Shade yuv420p10le 30fps",
"Sintel2 yuv420p10le 24fps",
]
preset = ["preset"]
top_dir = "/run/media/ondra/video/test2/"
# top_dir = "/run/media/ondra/61597e72-9c9f-4edd-afab-110602521f55/test2/"
graphics_dir = "graphs/"
sequences_short = {
"Netflix Aerial yuv420p10le 60fps": "Aerial",
"ShakeNDry yuv420p 30fps": "ShakeNDry",
"SunBath yuv420p10le 50fps": "SunBath",
"Tree Shade yuv420p10le 30fps": "Tree Shade",
"Sintel2 yuv420p10le 24fps": "Sintel2",
}
series_labels = {
'av1-cpu-used_3-': "AV1 cpu-used 3",
'av1-cpu-used_4-': "AV1 cpu-used 4",
'av1-cpu-used_5-': "AV1 cpu-used 5",
'av1-cpu-used_6-': "AV1 cpu-used 6",
'svtav1-preset_3-': "SVT-AV1 preset 3",
'svtav1-preset_5-': "SVT-AV1 preset 5",
'svtav1-preset_7-': "SVT-AV1 preset 7",
'svtav1-preset_9-': "SVT-AV1 preset 9",
'svtav1-preset_11-': "SVT-AV1 preset 11",
'svtav1-preset_13-': "SVT-AV1 preset 13",
'vp9-rc_0-': "VP9 RC 0",
'vp9-cpu-used_0-': "VP9 cpu-used 0",
'vp9-cpu-used_2-': "VP9 cpu-used 2",
'vp9-cpu-used_4-': "VP9 cpu-used 4",
# 'x264-preset_ultrafast-': "x264 ultrafast",
'x264-preset_fast-': "x264 fast",
'x264-preset_medium-': "x264 medium",
'x264-preset_slow-': "x264 slow",
'x264-preset_veryslow-': "x264 veryslow",
'x264-preset_placebo-': "x264 placebo",
'x265-preset_ultrafast-': "x265 ultrafast",
'x265-preset_fast-': "x265 fast",
'x265-preset_medium-': "x265 medium",
'x265-preset_slow-': "x265 slow",
'x265-preset_veryslow-': "x265 veryslow",
'vvc-preset_faster-': "VVenC faster",
'vvc-preset_fast-': "VVenC fast",
'vvc-preset_medium-': "VVenC medium",
}
psnr_lim = {
"Netflix Aerial yuv420p10le 60fps": (33, 47),
"ShakeNDry yuv420p 30fps": (33, 44),
"Sintel2 yuv420p10le 24fps": (40, 60),
"SunBath yuv420p10le 50fps": (35, 55),
"Tree Shade yuv420p10le 30fps": (35, 45),
}
ssim_lim = {
"Netflix Aerial yuv420p10le 60fps": (0.9, 1),
"ShakeNDry yuv420p 30fps": (0.9, 0.98),
"Sintel2 yuv420p10le 24fps": (0.98, 1),
"SunBath yuv420p10le 50fps": (0.94, 1),
"Tree Shade yuv420p10le 30fps": (0.92, 0.99),
}
msssim_lim = {
"Netflix Aerial yuv420p10le 60fps": (0.9, 1),
"ShakeNDry yuv420p 30fps": (0.92, 1),
"Sintel2 yuv420p10le 24fps": (0.98, 1),
"SunBath yuv420p10le 50fps": (0.94, 1),
"Tree Shade yuv420p10le 30fps": (0.96, 1),
}
vmaf_lim = {
"Netflix Aerial yuv420p10le 60fps": (60, 100),
"ShakeNDry yuv420p 30fps": (70, 100),
"Sintel2 yuv420p10le 24fps": (70, 100),
"SunBath yuv420p10le 50fps": (70, 100),
"Tree Shade yuv420p10le 30fps": (80, 100),
}
bitrate_lim = {
"Netflix Aerial yuv420p10le 60fps": (0, 150),
"ShakeNDry yuv420p 30fps": (0, 200),
"Sintel2 yuv420p10le 24fps": (0, 45),
"SunBath yuv420p10le 50fps": (0, 150),
"Tree Shade yuv420p10le 30fps": (0, 200),
}
bitrate_lim_log = {
"Netflix Aerial yuv420p10le 60fps": (0.1, 1000),
"ShakeNDry yuv420p 30fps": (0.1, 1000),
"SunBath yuv420p10le 50fps": (0.1, 1000),
"Tree Shade yuv420p10le 30fps": (0.1, 1000),
"Sintel2 yuv420p10le 24fps": (0.1, 100),
}
processing_lim = {
"Netflix Aerial yuv420p10le 60fps": (0, 50000),
"ShakeNDry yuv420p 30fps": (0, 8000),
"SunBath yuv420p10le 50fps": (0, 5000),
"Tree Shade yuv420p10le 30fps": (0, 12000),
"Sintel2 yuv420p10le 24fps": (0, 12000),
}
processing_lim_log = {
"Netflix Aerial yuv420p10le 60fps": (1, 1000),
"ShakeNDry yuv420p 30fps": (1, 10000),
"SunBath yuv420p10le 50fps": (1, 1000),
"Tree Shade yuv420p10le 30fps": (1, 1000),
"Sintel2 yuv420p10le 24fps": (1, 1000),
}
cpu_time_lim = {
"Netflix Aerial yuv420p10le 60fps": (0, 200000),
"ShakeNDry yuv420p 30fps": (0, 60000),
"SunBath yuv420p10le 50fps": (0, 35000),
"Tree Shade yuv420p10le 30fps": (0, 70000),
"Sintel2 yuv420p10le 24fps": (0, 70000),
}
cpu_time_lim_log = {
"Netflix Aerial yuv420p10le 60fps": (0.1, 1000),
"ShakeNDry yuv420p 30fps": (0.1, 10000),
"SunBath yuv420p10le 50fps": (0.1, 1000),
"Tree Shade yuv420p10le 30fps": (0.1, 1000),
"Sintel2 yuv420p10le 24fps": (0.1, 1000),
}
cpu_fps_lim = {
"Netflix Aerial yuv420p10le 60fps": (0, 200),
"ShakeNDry yuv420p 30fps": (0, 200),
"SunBath yuv420p10le 50fps": (0, 200),
"Tree Shade yuv420p10le 30fps": (0, 200),
"Sintel2 yuv420p10le 24fps": (0, 200),
}
decode_fps_lim = {
"Netflix Aerial yuv420p10le 60fps": (0, None),
"ShakeNDry yuv420p 30fps": (0, 60),
"SunBath yuv420p10le 50fps": (0, 60),
"Tree Shade yuv420p10le 30fps": (0, 60),
"Sintel2 yuv420p10le 24fps": (0, 60),
}
BJ1_serie = "x264-preset_placebo-"
BD_xname = "avg_bitrate_mb"
BD_ynames = ["psnr_avg", "ssim_avg", "msssim_avg", "vmaf_avg"]
BD_names = []
for n in BD_ynames:
# BD_names.append("bd_" + n)
BD_names.append("bd_rate_" + n)
encode_excluded_states = ["measuring decode"]
speeds_table = {
"placebo": 0,
"slow": 3,
"slower": 2,
"veryslow": 1,
"medium": 4,
"fast": 5,
"faster": 6,
"veryfast": 7,
"superfast": 8,
"ultrafast": 9,
}
binaries = {
"ffprobe": "/usr/bin/ffprobe",
"ffmpeg": "/usr/bin/ffmpeg"
}
vi.set_defaults(binaries)
def video_stream_size(videofile_path):
if videofile_path.endswith(".266"):
return os.path.getsize(videofile_path[0:-4] + ".266") / 1024 #in KiB
log = videofile_path + ".stream_size"
if os.path.exists(log):
with open(log, "r") as f:
s = f.readline()
print("stream size hit!")
return float(s)
result = subprocess.run(
[
"ffmpeg",
"-hide_banner",
"-i", videofile_path,
"-map", "0:v:0",
"-c", "copy",
"-f", "null", "-"
],
capture_output=True,
text=True,
)
try:
size = (result.stderr.rsplit("\n")[-2].rsplit(" ")[0].rsplit(":")[1][0: -2])
s = float(size) # in KiB
with open(log, "w") as f:
f.write(str(s))
return s
except ValueError:
raise ValueError(result.stderr.rstrip("\n"))
def video_stream_length(videofile_path):
if videofile_path.endswith(".266"):
videofile = videofile_path[:-4] + ".mkv"
else:
videofile = videofile_path
log = videofile + ".stream_length"
if os.path.exists(log):
with open(log, "r") as f:
s = f.readline()
print("stream length hit!")
return float(s)
result = vi.video_length_seconds(videofile)
with open(log, "w") as f:
f.write(str(result))
return result
def video_stream_frames(videofile_path):
if videofile_path.endswith(".266"):
videofile = videofile_path[:-4] + ".mkv"
else:
videofile = videofile_path
log = videofile + ".stream_frames"
if os.path.exists(log):
with open(log, "r") as f:
s = f.readline()
print("stream framenum hit!")
return int(s)
result = vi.video_frames(videofile)
with open(log, "w") as f:
f.write(str(result))
return result
def series_label(key, sequence=None):
if sequence is None or sequence in key:
k = series_labels.keys()
for s in (s for s in k if s in key):
return series_labels[s]
raise KeyError
'''
def simple_plot(x, y, xlabel, ylabel, savefile, minxlim=True):
i1, ax1 = plt.subplots()
plt.plot(x, y)
ax1.set(xlabel=xlabel, ylabel=ylabel)
if minxlim:
ax1.set_xlim(left=min(x), right=max(x))
ax1.grid()
plt.savefig(f"{savefile}.svg")
plt.savefig(f"{savefile}.pgf")
tikzplotlib.save(f"{savefile}.tex")
plt.close(i1)
def composite_plot(mxy, mlegend, xlabel, ylabel, savefile, xlim=None, ylim=None):
i1, ax1 = plt.subplots()
i = enc.count()
for m in mxy:
t = zip(*m)
x, y = [list(t) for t in t]
plt.plot(x, y, label=mlegend[next(i)], marker="+")
ax1.set(xlabel=xlabel, ylabel=ylabel)
plt.legend()
if xlim is True:
ax1.set_xlim(left=min(x), right=max(x))
elif xlim is not None:
ax1.set_xlim(left=xlim[0], right=xlim[1])
if ylim is True:
ax1.set_ylim(bottom=min(y), top=max(y))
elif ylim is not None:
ax1.set_ylim(bottom=ylim[0], top=ylim[1])
ax1.grid()
p = os.path.split(savefile)
enc.create_dir(p[0] + '/svg/')
enc.create_dir(p[0] + '/png/')
enc.create_dir(p[0] + '/tex/')
plt.savefig(f"{p[0] + '/svg/' + p[1]}.svg")
plt.savefig(f"{p[0] + '/png/' + p[1]}.png")
tikzplotlib.save(f"{p[0] + '/tex/' + p[1]}.tex")
plt.close(i1)
def composite_plot_smooth(mxy, mlegend, xlabel, ylabel, savefile, xlim=None, ylim=None):
i1, ax1 = plt.subplots()
i = enc.count()
for m in mxy:
t = zip(*m)
x, y = [list(t) for t in t]
c = plt.scatter(x, y, label=mlegend[next(i)], marker="+")
colr = c.get_facecolor()[0]
lx = np.log(x)
p = sc.interpolate.Akima1DInterpolator(lx, y)
x_smooth = np.linspace(min(x), max(x), 1000)
y_smooth = p(np.log(x_smooth))
plt.plot(x_smooth, y_smooth, color=colr)
ax1.set(xlabel=xlabel, ylabel=ylabel)
plt.legend()
if xlim is True:
ax1.set_xlim(left=x.min(), right=x.max())
elif xlim is not None:
ax1.set_xlim(left=xlim[0], right=xlim[1])
if ylim is True:
ax1.set_ylim(bottom=y.min(), top=y.max())
elif ylim is not None:
ax1.set_ylim(bottom=ylim[0], top=ylim[1])
ax1.grid()
p = os.path.split(savefile)
enc.create_dir(p[0] + '/svg/')
enc.create_dir(p[0] + '/png/')
enc.create_dir(p[0] + '/tex/')
plt.savefig(f"{p[0] + '/svg/' + p[1]}.svg")
plt.savefig(f"{p[0] + '/png/' + p[1]}.png")
tikzplotlib.save(f"{p[0] + '/tex/' + p[1]}.tex")
plt.close(i1)
'''
def plot_graphs(data, sequence=None, codec=None):
if sequence is None and codec is None:
out = graphics_dir
elif sequence is None:
out = graphics_dir + codec + "/"
elif codec is None:
out = graphics_dir + sequences_short[sequence] + "/"
else:
out = graphics_dir + sequences_short[sequence] + "/" + codec + "/"
lower_right = 4
d = df_to_plot(data, "avg_bitrate_mb", "psnr_avg")
composite_plot(d, "Bitrate [Mbit/s]", "PSNR (YUV) [dB]", out + "psnr", xlim=bitrate_lim[sequence], ylim=psnr_lim[sequence], legend_loc=lower_right)
composite_plot(d, "Bitrate [Mbit/s]", "PSNR (YUV) [dB]", out + "psnr_log", ylim=psnr_lim[sequence], xlog=True, legend_loc=lower_right)
d = df_to_plot(data, "avg_bitrate_mb", "ssim_avg")
composite_plot(d, "Bitrate [Mbit/s]", "SSIM", out + "ssim", xlim=bitrate_lim[sequence], ylim=ssim_lim[sequence], legend_loc=lower_right)
# composite_plot(d, "Bitrate [Mbit/s]", "SSIM", out + "ssim_log", ylim=ssim_lim[sequence], xlog=True, legend_loc=lower_right)
d = df_to_plot(data, "avg_bitrate_mb", "msssim_avg")
composite_plot(d, "Bitrate [Mbit/s]", "MS-SSIM", out + "msssim", xlim=bitrate_lim[sequence], ylim=msssim_lim[sequence], legend_loc=lower_right)
# composite_plot(d, "Bitrate [Mbit/s]", "MS-SSIM", out + "msssim_log", ylim=msssim_lim[sequence], xlog=True, legend_loc=lower_right)
d = df_to_plot(data, "avg_bitrate_mb", "vmaf_avg")
composite_plot(d, "Bitrate [Mbit/s]", "VMAF", out + "vmaf", xlim=bitrate_lim[sequence], ylim=vmaf_lim[sequence], legend_loc=lower_right)
# composite_plot(d, "Bitrate [Mbit/s]", "VMAF", out + "vmaf_log", ylim=vmaf_lim[sequence], xlog=True, legend_loc=lower_right)
d = df_to_plot(data, "avg_bitrate_mb", "decode_time_fps")
composite_plot(d, "Bitrate [Mbit/s]", "Rychlost dekódování [frame/s]", out + "decode", ylim=(0, None), xlim=bitrate_lim_log[sequence], xlog=True)
d = df_to_plot(data, "avg_bitrate_mb", "total_time_fps")
composite_plot(d, "Bitrate [Mbit/s]", "Procesorový čas [s/frame]", out + "encode", ylim=(0.1, None), xlim=bitrate_lim_log[sequence], xlog=True, ylog=True)
def df_to_plot(data, x_name, y_name):
tables = [t[[x_name, y_name]].rename(columns={x_name: "x", y_name: "y"}).sort_values(by="x") for t in list(data["table"])]
l = list(data["label"])
s = list(data["speed"])
lt = zip(l, tables, s)
for m in lt:
setattr(m[1], "label", m[0])
setattr(m[1], "speed", m[2])
return tables
def df_to_plot2(data, x_name, y_name):
tables = [data[[x_name, y_name]].rename(columns={x_name: "x", y_name: "y"}).loc[data["codec"] == s].sort_values(by="x") for s in codecs]
lt = zip(codecs, tables)
for m in lt:
setattr(m[1], "label", codecs_short[m[0]])
return tables
#def composite_plot(data, xlabel, ylabel, savefile, xlim=None, ylim=None, log_inter=True, xlog=False, ylog=False, smooth=True, xlogscalar=False, ylogscalar=False, legend_loc=None, tikz_before=True):
#i1, ax1 = plt.subplots()
#if not (xlog or ylog):
#tikz_before = False
#if xlog:
#ax1.set_xscale('log')
#ax1.grid(True, which="both")
#if xlogscalar:
#ax1.xaxis.set_major_formatter(matplotlib.ticker.ScalarFormatter())
#else:
#ax1.set_xscale('linear')
#ax1.grid(True)
#if ylog:
#ax1.set_yscale('log')
#ax1.grid(True, which="both")
#if ylogscalar:
#ax1.yaxis.set_major_formatter(matplotlib.ticker.ScalarFormatter())
#else:
#ax1.set_yscale('linear')
#ax1.grid(True)
#for table in data:
#if smooth:
#c = plt.scatter(table.x, table.y, label=table.label, marker="+")
#colr = c.get_facecolor()[0]
#if log_inter:
#lx = np.log(table.x)
#p = sc.interpolate.Akima1DInterpolator(lx, table.y)
#x_smooth = np.logspace(np.log10(min(table.x)), np.log10(max(table.x)), 200)
#else:
#lx = table.x
#p = sc.interpolate.Akima1DInterpolator(lx, table.y)
#x_smooth = np.linspace(min(table.x), max(table.x), 200)
#y_smooth = p(np.log(x_smooth))
#plt.plot(x_smooth, y_smooth, color=colr)
#else:
#plt.plot(table.x, table.y, label=table.label, marker="+")
#ax1.set(xlabel=xlabel, ylabel=ylabel)
#if legend_loc is None:
#ax1.legend()
#else:
#ax1.legend(loc=legend_loc)
#if xlim is True:
#ax1.set_xlim(left=table.x.min(), right=table.x.max())
#elif xlim is not None:
#ax1.set_xlim(left=xlim[0], right=xlim[1])
#if ylim is True:
#ax1.set_ylim(bottom=table.y.min(), top=table.y.max())
#elif ylim is not None:
#ax1.set_ylim(bottom=ylim[0], top=ylim[1])
#p = os.path.split(savefile)
#enc.create_dir(p[0] + '/svg/')
#enc.create_dir(p[0] + '/png/')
#enc.create_dir(p[0] + '/tex/')
#if tikz_before:
#tikzplotlib.save(f"{p[0] + '/tex/' + p[1]}.tex")
#plt.savefig(f"{p[0] + '/svg/' + p[1]}.svg")
#plt.savefig(f"{p[0] + '/png/' + p[1]}.png")
#if not tikz_before:
#tikzplotlib.save(f"{p[0] + '/tex/' + p[1]}.tex")
#plt.close(i1)
def composite_plot(data, xlabel, ylabel, savefile, xlim=None, ylim=None, log_inter=True, xlog=False, ylog=False, smooth=True, xlogscalar=False, ylogscalar=False, legend_loc=None, tikz_before=True):
plt.figure()
plt.axis()
if not (xlog or ylog):
tikz_before = False
if xlog:
plt.xscale('log')
plt.grid(True, which="both")
# if xlogscalar:
# plt.xaxis.set_major_formatter(matplotlib.ticker.ScalarFormatter())
else:
plt.xscale('linear')
plt.grid(True)
if ylog:
plt.yscale('log')
plt.grid(True, which="both")
# if ylogscalar:
# plt.yaxis.set_major_formatter(matplotlib.ticker.ScalarFormatter())
else:
plt.yscale('linear')
plt.grid(True)
for table in data:
if smooth:
c = plt.scatter(table.x, table.y, label=table.label, marker="+")
colr = c.get_facecolor()[0]
if log_inter:
lx = np.log(table.x)
p = sc.interpolate.Akima1DInterpolator(lx, table.y)
x_smooth = np.logspace(np.log10(min(table.x)), np.log10(max(table.x)), 200)
else:
lx = table.x
p = sc.interpolate.Akima1DInterpolator(lx, table.y)
x_smooth = np.linspace(min(table.x), max(table.x), 200)
y_smooth = p(np.log(x_smooth))
plt.plot(x_smooth, y_smooth, color=colr)
else:
plt.plot(table.x, table.y, label=table.label, marker="+")
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.legend(loc=legend_loc)
if xlim is True:
plt.xlim(left=table.x.min(), right=table.x.max())
elif xlim is not None:
plt.xlim(left=xlim[0], right=xlim[1])
if ylim is True:
plt.ylim(bottom=table.y.min(), top=table.y.max())
elif ylim is not None:
plt.ylim(bottom=ylim[0], top=ylim[1])
p = os.path.split(savefile)
enc.create_dir(p[0] + '/svg/')
enc.create_dir(p[0] + '/png/')
enc.create_dir(p[0] + '/tex/')
if tikz_before:
tikzplotlib.save(f"{p[0] + '/tex/' + p[1]}.tex")
plt.savefig(f"{p[0] + '/svg/' + p[1]}.svg")
plt.savefig(f"{p[0] + '/png/' + p[1]}.png")
if not tikz_before:
tikzplotlib.save(f"{p[0] + '/tex/' + p[1]}.tex")
plt.close()
def df_to_latex_table(values, save_path):
pass
def calc_bj(mxy_o, mlegend_o, bd_metric_legend, bd_rate_legend):
mxy = mxy_o.copy()
mlegend = mlegend_o.copy()
xy1 = mxy[mlegend.index(BJ1_serie)]
t1 = zip(*xy1)
x1, y1 = [list(t1) for t1 in t1]
mxy.remove(xy1)
mlegend.remove(BJ1_serie)
i = enc.count()
for m in mxy:
t = zip(*m)
x, y = [list(t) for t in t]
bd_metric = bj_delta(x1, y1, x, y, mode=0)
bd_rate = bj_delta(x1, y1, x, y, mode=1)
l = mlegend[next(i)]
print(f"{l}: BD-{bd_metric_legend}: {bd_metric}%")
print(f"{l}: BD-{bd_rate_legend}: {bd_rate}%")
def formatter1(x):
s = ('%1.2f' % x).replace(".",",") + "\,\%"
return s
def formatter2(x):
s = ('%1.2f' % x).replace(".",",") + "\%"
if x > 0:
s = "\cellcolor{red!25}" + s
elif x < 0:
s = "\cellcolor{green!25}" + s
return s
def calc_bj_cross_to_table(mxy_o, mlegend_o, bd_metric_legend, bd_rate_legend):
table_metric = pd.DataFrame(np.zeros((len(mlegend_o), len(mlegend_o))), columns=mlegend_o, index=mlegend_o)
table_rate = pd.DataFrame(np.zeros((len(mlegend_o), len(mlegend_o))), columns=mlegend_o, index=mlegend_o)
for mleg in mlegend_o:
mxy = mxy_o.copy()
mlegend = mlegend_o.copy()
xy1 = mxy[mlegend.index(mleg)]
t1 = zip(*xy1)
x1, y1 = [list(t1) for t1 in t1]
mxy.remove(xy1)
mlegend.remove(mleg)
i = enc.count()
for m in mxy:
t = zip(*m)
x, y = [list(t) for t in t]
bd_metric = bj_delta(x1, y1, x, y, mode=0)
bd_rate = bj_delta(x1, y1, x, y, mode=1)
l = mlegend[next(i)]
table_metric.loc[l, mleg] = bd_metric
table_rate.loc[l, mleg] = bd_rate
# print(table_metric.to_latex(float_format="%.2f", decimal=","))
# print(table_rate.to_latex(float_format="%.2f"))
return table_metric, table_rate
'''
def calc_bj_akima(dftable, x_name, y_name, bd_metric_legend, bd_rate_legend):
xy1 = mxy[mlegend.index(BJ1_serie)]
t1 = zip(*xy1)
x1, y1 = [list(t1) for t1 in t1]
mxy.remove(xy1)
mlegend.remove(BJ1_serie)
i = enc.count()
for m in mxy:
t = zip(*m)
x, y = [list(t) for t in t]
bd_metric = bj_delta_akima(x1, y1, x, y, mode=0)
bd_rate = bj_delta_akima(x1, y1, x, y, mode=1)
l = mlegend[next(i)]
print(f"{l}: BD-{bd_metric_legend}: {bd_metric}%")
print(f"{l}: BD-{bd_rate_legend}: {bd_rate}%")
'''
def calc_bj_akima(data, x_name, y_name, bd_metric_legend, bd_rate_legend):
df = data.copy()
for t in df.itertuples():
t.table.rename(columns={x_name: "x", y_name: "y"}).sort_values(by="x")
df
bd_metric = bj_delta_akima(x1, y1, x, y, mode=0)
bd_rate = bj_delta_akima(x1, y1, x, y, mode=1)
def read_table_kcolv(logpath):
with open(logpath, "r") as f:
firstline = next(f).rstrip(" \n")
columns = []
for x in firstline.rsplit(" "):
columns.append(x.rsplit(":")[0])
r = range(len(columns))
table = pd.read_table(logpath, names=columns, usecols=list(r), sep=" ",
converters={k: lambda x: (x.rsplit(":")[1]) for k in r})
return table.apply(pd.to_numeric)
class PSNR_values:
def __init__(self, logpath):
self.logpath = logpath
table = read_table_kcolv(self.logpath)
self.n = table.n
self.mse_avg = table.mse_avg
self.mse_y = table.mse_y
self.mse_u = table.mse_u
self.mse_v = table.mse_v
self.psnr_avg = table.psnr_avg
self.psnr_y = table.psnr_y
self.psnr_u = table.psnr_u
self.psnr_v = table.psnr_v
self.mse_avg_avg = np.average(self.mse_avg)
self.mse_y_avg = np.average(self.mse_y)
self.mse_u_avg = np.average(self.mse_u)
self.mse_v_avg = np.average(self.mse_v)
self.psnr_avg_avg = np.average(self.psnr_avg)
self.psnr_y_avg = np.average(self.psnr_y)
self.psnr_u_avg = np.average(self.psnr_u)
self.psnr_v_avg = np.average(self.psnr_v)
class SSIM_values:
def __init__(self, logpath):
self.logpath = logpath
names = ("n", "Y", "U", "V", "All", "unnorm")
table = pd.read_table(self.logpath, names=names, sep=" ",
converters={k: lambda x: (x.rsplit(":")[1]) for k in range(5)})
table.unnorm = table.unnorm.str.slice(start=1, stop=-1)
table = table.apply(pd.to_numeric)
self.n = table.n
self.Y = table.Y
self.U = table.U
self.V = table.V
self.All = table.All
self.unnorm = table.unnorm # unnorm = 10*log10(1-All)
self.Y_avg = np.average(self.Y)
self.U_avg = np.average(self.U)
self.V_avg = np.average(self.V)
self.All_avg = np.average(self.All)
self.unnorm_avg = np.average(self.unnorm)
class VMAF_values:
def __init__(self, logpath):
self.logpath = logpath
table = pd.read_table(logpath, sep=",")
table = table.loc[:, ~table.columns.str.contains('^Unnamed')]
self.table = table
self.vmaf_avg = table.vmaf.mean()
class Useage_values:
def __init__(self, logpath):
self.logpath = logpath
with open(logpath, "r") as log:
firstline = next(log)
self.row_names = firstline.rsplit(",")[0:-1]
table = pd.read_csv(self.logpath)
self.table = table
self.state_names = list(table.state.unique())
total_time = 0
total_cpu_time = 0
for state in [x for x in self.state_names if x not in encode_excluded_states]:
for row in self.row_names:
if row == "state":
pass
else:
arr = np.array(table[row][table.index[table['state'] == state]])
setattr(self, state + "_" + row, arr)
cpu_time_user = getattr(self, state + "_cpu_time_user")
cpu_time_user = np.append(np.array([0]), cpu_time_user)
cpu_time_system = getattr(self, state + "_cpu_time_system")
cpu_time_system = np.append(np.array([0]), cpu_time_system)
cpu_time_total = cpu_time_user + cpu_time_system
setattr(self, state + "_cpu_time_total", cpu_time_total)
cpu_time_diff = np.ediff1d(cpu_time_total)
time = np.append(np.array([0]), getattr(self, state + "_time"))
time_diff = np.ediff1d(time)
cpu_percent_calc = cpu_time_diff / time_diff
setattr(self, state + "_cpu_percent_calc", cpu_percent_calc)
total_time += time[-1]
total_cpu_time += cpu_time_total[-1]
self.total_time = total_time
self.total_cpu_time = total_cpu_time
cpu_time_diff = np.ediff1d(np.append(np.array([0]), np.array(table.cpu_time_user + table.cpu_time_system)))
time_diff = np.ediff1d(np.append(np.array([0]), np.array(table.time)))
cpu_time_int = np.sum(cpu_time_diff * time_diff)
self.cpu_usage_avg = cpu_time_int / total_time
self.max_RSS = self.table.RSS.max()
self.perc_RSS = self.table.RSS.quantile(0.9)
self.mean_RSS = self.table.RSS.mean()
self.med_RSS = self.table.RSS.median()
for row in self.row_names:
if row == "state":
pass
else:
arr = np.array(table[row][table.index[table['state'] == "measuring decode"]])
setattr(self, "decode_row_" + row, arr)
self.decode_time = self.decode_row_time[-1]
self.decode_cpu_time = self.decode_row_cpu_time_user[-1] + self.decode_row_cpu_time_system[-1]
class VideoFile:
def __init__(self, videofilepath):
self.videofilepath = videofilepath
self.basename = os.path.basename(videofilepath)
self.path_without_ext = os.path.splitext(videofilepath)[0]
if os.path.exists(self.path_without_ext + ".266"):
self.videofilepath = self.path_without_ext + ".266"
self.useage_log_path = self.path_without_ext + useage_log_suffix
if not os.path.isfile(self.useage_log_path):
print(f"File not found: {self.useage_log_path}")
self.useage_log_path = None
self.psnr_log_path = self.path_without_ext + psnr_log_suffix
if not os.path.isfile(self.psnr_log_path):
print(f"File not found: {self.psnr_log_path}")
self.psnr_log_path = None
self.ssim_log_path = self.path_without_ext + ssim_log_suffix
if not os.path.isfile(self.ssim_log_path):
print(f"File not found: {self.ssim_log_path}")
self.ssim_log_path = None
self.vmaf_log_path = self.path_without_ext + vmaf_log_suffix
if not os.path.isfile(self.vmaf_log_path):
print(f"File not found: {self.vmaf_log_path}")
self.vmaf_log_path = None
self.topserie = os.path.split(os.path.split(self.videofilepath)[0])[1]
# eg. /path/to/video/av1/cpu-used_4/ShakeNDry/ShakeNDry-crf10.mkv -> ShakeNDry
for c in codecs:
if c in videofilepath:
self.codec = c
s = os.path.split(os.path.split(self.videofilepath)[0])[0]
# eg. /path/to/video/av1/cpu-used_4/ShakeNDry -> /path/to/video/av1/cpu-used_4
self.serie = s[s.index(self.codec)+len(self.codec)+1:].replace("/", "-") + "-" + self.topserie
self.label = s[s.index(self.codec)+len(self.codec)+1:].replace("/", "-")
self.codec_serie = self.codec + "-" + self.serie
def load_log(self):
with cf.ThreadPoolExecutor() as executor:
if self.psnr_log_path is not None:
psnr = executor.submit(PSNR_values, self.psnr_log_path)
if self.ssim_log_path is not None:
ssim = executor.submit(SSIM_values, self.ssim_log_path)
if self.vmaf_log_path is not None:
vmaf = executor.submit(VMAF_values, self.vmaf_log_path)
if self.useage_log_path is not None:
useage = executor.submit(Useage_values, self.useage_log_path)
if self.psnr_log_path is not None:
self.psnr = psnr.result()
if self.ssim_log_path is not None:
self.ssim = ssim.result()
if self.vmaf_log_path is not None:
self.vmaf = vmaf.result()
if self.useage_log_path is not None:
self.useage = useage.result()
def load_stream_size(self):
self.bitstream_size = video_stream_size(self.videofilepath)
self.total_length = video_stream_length(self.videofilepath)
self.avg_bitrate = (8 * self.bitstream_size) * 1024 / self.total_length
self.avg_bitrate_mb = self.avg_bitrate / 1024 / 1024
self.total_frames = video_stream_frames(self.videofilepath)
self.total_length = video_stream_length(self.videofilepath)
self.total_time_fps = self.useage.total_time / self.total_frames
self.total_cpu_time_fps = self.useage.total_cpu_time / self.total_frames
self.decode_time_fps = self.total_frames / self.useage.decode_time
self.decode_cpu_time_fps = self.total_frames / self.useage.decode_cpu_time
def async_load(f):
print(f"{f.videofilepath}")
f.load_log()
f.load_stream_size()
print(f"{f.videofilepath}:\tPSNR: {f.psnr.psnr_avg_avg}\tSSIM: {f.ssim.All_avg}\tVMAF: {f.vmaf.vmaf_avg}")
return f
class DataSerie:
def __init__(self, serie):
self.serie = serie
try:
self.label = series_label(serie)
except KeyError:
self.label = serie
print(f"DataSerie: {serie}: {self.label}")
self.data = []
self.n = []
self.psnr_avg = []
self.mse_avg = []
self.ssim_avg = []
self.msssim_avg = []
self.vmaf_avg = []
self.cpu_time = []
self.total_time = []
self.cpu_time_fps = []
self.total_time_fps = []
self.decode_cpu_time_fps = []
self.decode_time_fps = []
self.avg_bitrate = []
self.avg_bitrate_mb = []
self.max_RSS = []
self.med_RSS = []
self.perc_RSS = []
def add_entry(self, entry):
self.data.append(entry)
self.frames = entry.total_frames
self.n.append(max(entry.psnr.n))
self.psnr_avg.append(entry.psnr.psnr_avg_avg)
self.mse_avg.append(entry.psnr.mse_avg)
self.ssim_avg.append(entry.ssim.All_avg)
self.msssim_avg.append(entry.vmaf.table.ms_ssim.mean())
self.vmaf_avg.append(entry.vmaf.vmaf_avg)
self.cpu_time.append(entry.useage.total_cpu_time)
self.total_time.append(entry.useage.total_time)
self.cpu_time_fps.append(entry.total_cpu_time_fps)
self.total_time_fps.append(entry.total_time_fps)
self.decode_cpu_time_fps.append(entry.decode_cpu_time_fps)
self.decode_time_fps.append(entry.decode_time_fps)
self.avg_bitrate.append(entry.avg_bitrate)
self.avg_bitrate_mb.append(entry.avg_bitrate_mb)
self.max_RSS.append(entry.useage.max_RSS)
self.med_RSS.append(entry.useage.med_RSS)
self.perc_RSS.append(entry.useage.perc_RSS)
def make_df(self):
d = {'n': self.n, 'psnr_avg': self.psnr_avg, 'mse_avg': self.mse_avg,
'ssim_avg': self.ssim_avg, 'msssim_avg': self.msssim_avg,
'vmaf_avg': self.vmaf_avg, 'cpu_time': self.cpu_time,
'total_time': self.total_time, 'cpu_time_fps': self.cpu_time_fps,
'total_time_fps': self.total_time_fps,
'decode_cpu_time_fps': self.decode_cpu_time_fps,
'decode_time_fps': self.decode_time_fps,
'avg_bitrate': self.avg_bitrate,
'avg_bitrate_mb': self.avg_bitrate_mb,
'max_RSS': self.max_RSS, 'med_RSS': self.med_RSS,
'perc_RSS': self.perc_RSS, }
self.table = pd.DataFrame(data=d)
setattr(self.table, "serie", self.serie)
setattr(self.table, "label", self.label)
class DataStr:
def __init__(self):
self.series = []
self.serie_names = []
self.codecs = []
self.sequences = []
self.labels = []
self.speed = []
self.codec_speed = []
def add_serie(self, serie):
self.series.append(serie.table)
self.serie_names.append(serie.table.serie)
self.codecs.append(serie.serie.split("-")[0])
self.sequences.append(serie.serie.split("-")[-1])
self.labels.append(serie.label)
speed = None
print(serie.table.serie)
k = speeds_table.keys()
for s in k:
if s in serie.table.serie:
speed = speeds_table[s]
s = serie.table.serie
if "-cpu-used_" in serie.table.serie and speed is None:
speed = int(s.rsplit("-cpu-used_")[1].rsplit("-")[0])
if "-preset_" in serie.table.serie and speed is None:
speed = int(s.rsplit("-preset_")[1].rsplit("-")[0])
self.speed.append(speed)
self.codec_speed.append(serie.serie.split("-")[0] + "-" + str(speed))
def make_df(self):
short_sequence_names = [sequences_short[i] for i in self.sequences]
d = {"serie": self.serie_names,
"codec": self.codecs,
"sequence": self.sequences,
"sequence_short": short_sequence_names,
"label": self.labels,
"speed": self.speed,
"codecspeed": self.codec_speed,
"table": self.series, }
self.table = pd.DataFrame(data=d).sort_values(["speed", "serie"])
self.table["reference"] = [BJ1_serie in s for s in list(self.table["serie"])]
self.bd_ref = self.table.loc[self.table["reference"] == True]
self.table["cpu_time_avg"] = 0.0
self.table["time_avg"] = 0.0
self.table["cpu_time_avg_rel"] = 100.0
self.table["time_avg_rel"] = 100.0
for serie in self.table.itertuples():
self.table.loc[serie.Index, "cpu_time_avg"] = serie.table.cpu_time.mean()
self.table.loc[serie.Index, "time_avg"] = serie.table.total_time.mean()
self.table.loc[serie.Index, "codec_short"] = codecs_short[serie.codec]
for sequence in sequences:
table = self.table.loc[(self.table["sequence"] == sequence) & (self.table["reference"] == False)]
bd_ref = self.bd_ref.loc[self.bd_ref["sequence"] == sequence]
for serie in table.itertuples():
self.table.loc[serie.Index, "cpu_time_avg_rel"] = 100 * serie.table.cpu_time.mean() / bd_ref.table.to_list()[0].cpu_time.mean()
self.table.loc[serie.Index, "time_avg_rel"] = 100 * serie.table.total_time.mean() / bd_ref.table.to_list()[0].total_time.mean()
for yname in BD_ynames:
v1 = bd_ref.table.to_list()[0].loc[:, [BD_xname, yname]].sort_values(by=BD_xname)
v2 = serie.table.loc[:, [BD_xname, yname]].sort_values(by=BD_xname)
x1 = list(v1[BD_xname])
y1 = list(v1[yname])
x = list(v2[BD_xname])
y = list(v2[yname])
if np.any(~np.diff(y1).astype(bool)):
for i in range(len(y1)-1):
if(y1[i] == y1[i+1]):
y1[i+1] += 0.000001
if np.any(~np.diff(y).astype(bool)):
for i in range(len(y)-1):
if(y[i] == y[i+1]):
y[i+1] += 0.000001
self.table.loc[serie.Index, "bd_" + yname] = bj_delta_akima(x1, y1, x, y, mode=0)
self.table.loc[serie.Index, "bd_rate_" + yname] = bj_delta_akima(x1, y1, x, y, mode=1)
self.table = self.table.fillna(0.0)
def make_bj_plot(self):
self.table = self.table.sort_values(["sequence", "codec", "speed"])
for sequence in sequences:
print(sequence)
table = self.table.loc[datastr.table["sequence"] == sequence]
out = graphics_dir + sequences_short[sequence] + "/"
for yname in BD_names:
df = df_to_plot2(table, "cpu_time_avg_rel", yname)
#composite_plot(df, "Relativní průměrný procesorový čas [%]", "BD-rate [%]", out + yname + "_cpu", xlim=cpu_time_lim_log[sequence], xlog=True, smooth=False, xlogscalar=True)
df = df_to_plot2(table, "time_avg_rel", yname)
#composite_plot(df, "Relativní průměrný čas zpracování [%]", "BD-rate [%]", out + yname, xlim=processing_lim_log[sequence], xlog=True, smooth=False, xlogscalar=True)
#t = self.table.groupby(by="codecspeed").mean()
###############################################################################
# Code for graphing starts from here
###############################################################################
data_pickle = top_dir + "data.pkl"
if os.path.exists(data_pickle):
with open(data_pickle, 'rb') as inp:
videofiles_paths = pickle.load(inp)
else:
videofiles_paths = []
print("Finding log files:\n")
for directory in os.walk(top_dir):
if directory[2] != []:
for f in directory[2]:
if f.endswith(".mkv"):
videofiles_paths.append(VideoFile(os.path.join(directory[0], f)))
print("Reading log files:\n")
with cf.ProcessPoolExecutor(max_workers=12) as executor:
futures = tuple(executor.submit(async_load, f) for f in videofiles_paths)
videofiles_paths = []
for f in futures:
videofiles_paths.append(f.result())
with open(data_pickle, 'wb') as outp:
pickle.dump(videofiles_paths, outp)
#print("Rendering graphs:\n")
# simple_plot(np.array(np.array([0]),np.array(f.useage.table.time)),
# np.array(np.array([0]),np.array(f.useage.table.RSS/(1024*1024))),
# "Time [s]", "RAM usage (RSS) [MiB]", f.path_without_ext + "_RSS")
print("Rendering composite graphs:\n")
series = []
codec_series = []
used_codecs = []
for f in videofiles_paths:
series.append(f.serie)
codec_series.append(f.codec_serie)
used_codecs.append(f.codec)
series = list(set(series))
codec_series = list(set(codec_series))
used_codecs = list(set(used_codecs))
print(series)
print(codec_series)
print(used_codecs)
data = []
for serie in codec_series:
data.append(DataSerie(serie))
for f in (f for f in videofiles_paths if serie == f.codec_serie):
data[-1].add_entry(f)
for serie in data:
serie.make_df()
# sort by serie name
l, data = zip(*sorted(zip([d.serie for d in data], data)))
del l
print(data[0].avg_bitrate)
print(data[0].msssim_avg)
datastr = DataStr()
for serie in data:
datastr.add_serie(serie)
datastr.make_df()
datastr.make_bj_plot()
# datastr.table.loc[datastr.table["codec"] == "av1"]
# vybrat jen řádky, kterém mají ve sloupci tuto hodnotu
# d = data[0].table[["avg_bitrate_mb", "psnr_avg"]].sort_values(by=["avg_bitrate_mb"])
print("\nplotting by sequence, codec")
for sequence in sequences:
print(sequence)
table = datastr.table.loc[datastr.table["sequence"] == sequence]
# plot_graphs(table, sequence=sequence)
for codec in codecs:
table2 = table.loc[table["codec"] == codec]
# plot_graphs(table2, sequence=sequence, codec=codec)
rdfb_list = []
print("\nplotting by sequence, and most slow in codec")
for sequence in sequences:
print(sequence)
rdf_list = []
table = datastr.table.loc[datastr.table["sequence"] == sequence]
for codec in codecs:
table2 = table.loc[table["codec"] == codec]
rdf_list.append(table2.loc[table2["speed"].idxmin()])
table2 = | pd.concat(rdf_list) | pandas.concat |
"""
Copyright 2019 <NAME>.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import datetime
import datetime as dt
import os
from typing import Union
import numpy as np
import pandas as pd
import pytest
import pytz
from gs_quant.target.common import XRef, PricingLocation, Currency as CurrEnum
from numpy.testing import assert_allclose
from pandas.testing import assert_series_equal
from pandas.tseries.offsets import CustomBusinessDay
from pytz import timezone
from testfixtures import Replacer
from testfixtures.mock import Mock
import gs_quant.timeseries.measures as tm
import gs_quant.timeseries.measures_rates as tm_rates
from gs_quant.api.gs.assets import GsTemporalXRef, GsAssetApi, GsIdType, IdList, GsAsset
from gs_quant.api.gs.data import GsDataApi, MarketDataResponseFrame
from gs_quant.api.gs.data import QueryType
from gs_quant.data.core import DataContext
from gs_quant.data.dataset import Dataset
from gs_quant.data.fields import Fields
from gs_quant.errors import MqError, MqValueError, MqTypeError
from gs_quant.markets.securities import AssetClass, Cross, Index, Currency, SecurityMaster, Stock, \
Swap, CommodityNaturalGasHub
from gs_quant.session import GsSession, Environment
from gs_quant.test.timeseries.utils import mock_request
from gs_quant.timeseries import Returns
from gs_quant.timeseries.measures import BenchmarkType
_index = [pd.Timestamp('2019-01-01')]
_test_datasets = ('TEST_DATASET',)
def mock_empty_market_data_response():
df = MarketDataResponseFrame()
df.dataset_ids = ()
return df
def map_identifiers_default_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "USD-LIBOR-BBA" in ids:
return {"USD-LIBOR-BBA": "MAPDB7QNB2TZVQ0E"}
elif "EUR-EURIBOR-TELERATE" in ids:
return {"EUR-EURIBOR-TELERATE": "MAJNQPFGN1EBDHAE"}
elif "GBP-LIBOR-BBA" in ids:
return {"GBP-LIBOR-BBA": "MAFYB8Z4R1377A19"}
elif "JPY-LIBOR-BBA" in ids:
return {"JPY-LIBOR-BBA": "MABMVE27EM8YZK33"}
elif "EUR OIS" in ids:
return {"EUR OIS": "MARFAGXDQRWM07Y2"}
def map_identifiers_swap_rate_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "USD-3m" in ids:
return {"USD-3m": "MAAXGV0GZTW4GFNC"}
elif "EUR-6m" in ids:
return {"EUR-6m": "MA5WM2QWRVMYKDK0"}
elif "KRW" in ids:
return {"KRW": 'MAJ6SEQH3GT0GA2Z'}
def map_identifiers_inflation_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "CPI-UKRPI" in ids:
return {"CPI-UKRPI": "MAQ7ND0MBP2AVVQW"}
elif "CPI-CPXTEMU" in ids:
return {"CPI-CPXTEMU": "MAK1FHKH5P5GJSHH"}
def map_identifiers_cross_basis_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "USD-3m/JPY-3m" in ids:
return {"USD-3m/JPY-3m": "MA99N6C1KF9078NM"}
elif "EUR-3m/USD-3m" in ids:
return {"EUR-3m/USD-3m": "MAXPKTXW2D4X6MFQ"}
elif "GBP-3m/USD-3m" in ids:
return {"GBP-3m/USD-3m": "MA8BZHQV3W32V63B"}
def get_data_policy_rate_expectation_mocker(
start: Union[dt.date, dt.datetime] = None,
end: Union[dt.date, dt.datetime] = None,
as_of: dt.datetime = None,
since: dt.datetime = None,
fields: Union[str, Fields] = None,
asset_id_type: str = None,
**kwargs) -> pd.DataFrame:
if 'meetingNumber' in kwargs:
if kwargs['meetingNumber'] == 0:
return mock_meeting_spot()
elif 'meeting_date' in kwargs:
if kwargs['meeting_date'] == dt.date(2019, 10, 24):
return mock_meeting_spot()
return mock_meeting_expectation()
def test_parse_meeting_date():
assert tm.parse_meeting_date(5) == ''
assert tm.parse_meeting_date('') == ''
assert tm.parse_meeting_date('test') == ''
assert tm.parse_meeting_date('2019-09-01') == dt.date(2019, 9, 1)
def test_currency_to_default_benchmark_rate(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_default_mocker)
asset_id_list = ["MAZ7RWC904JYHYPS", "MAJNQPFGN1EBDHAE", "MA66CZBQJST05XKG", "MAK1FHKH5P5GJSHH", "MA4J1YB8XZP2BPT8",
"MA4B66MW5E27U8P32SB"]
correct_mapping = ["MAPDB7QNB2TZVQ0E", "MAJNQPFGN1EBDHAE", "MAFYB8Z4R1377A19", "MABMVE27EM8YZK33",
"MA4J1YB8XZP2BPT8", "MA4B66MW5E27U8P32SB"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.currency_to_default_benchmark_rate(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_currency_to_default_swap_rate_asset(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_swap_rate_mocker)
asset_id_list = ['MAZ7RWC904JYHYPS', 'MAJNQPFGN1EBDHAE', 'MAJ6SEQH3GT0GA2Z']
correct_mapping = ['MAAXGV0GZTW4GFNC', 'MA5WM2QWRVMYKDK0', 'MAJ6SEQH3GT0GA2Z']
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.currency_to_default_swap_rate_asset(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_currency_to_inflation_benchmark_rate(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_inflation_mocker)
asset_id_list = ["MA66CZBQJST05XKG", "MAK1FHKH5P5GJSHH", "MA4J1YB8XZP2BPT8"]
correct_mapping = ["MAQ7ND0MBP2AVVQW", "MAK1FHKH5P5GJSHH", "MA4J1YB8XZP2BPT8"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.currency_to_inflation_benchmark_rate(asset_id_list[i])
assert correct_id == correct_mapping[i]
# Test that the same id is returned when a TypeError is raised
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=TypeError('Test'))
assert tm.currency_to_inflation_benchmark_rate('MA66CZBQJST05XKG') == 'MA66CZBQJST05XKG'
def test_cross_to_basis(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_cross_basis_mocker)
asset_id_list = ["MAYJPCVVF2RWXCES", "MA4B66MW5E27U8P32SB", "nobbid"]
correct_mapping = ["MA99N6C1KF9078NM", "MA4B66MW5E27U8P32SB", "nobbid"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.cross_to_basis(asset_id_list[i])
assert correct_id == correct_mapping[i]
# Test that the same id is returned when a TypeError is raised
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=TypeError('Test'))
assert tm.cross_to_basis('MAYJPCVVF2RWXCES') == 'MAYJPCVVF2RWXCES'
def test_currency_to_tdapi_swap_rate_asset(mocker):
replace = Replacer()
mocker.patch.object(GsSession.__class__, 'current',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=mock_request)
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
with tm.PricingContext(dt.date.today()):
asset = Currency('MA25DW5ZGC1BSC8Y', 'NOK')
bbid_mock.return_value = 'NOK'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
asset = Currency('MAZ7RWC904JYHYPS', 'USD')
bbid_mock.return_value = 'USD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAFRSWPAF5QPNTP2' == correct_id
bbid_mock.return_value = 'CHF'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAW25BGQJH9P6DPT' == correct_id
bbid_mock.return_value = 'EUR'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAA9MVX15AJNQCVG' == correct_id
bbid_mock.return_value = 'GBP'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA6QCAP9B7ABS9HA' == correct_id
bbid_mock.return_value = 'JPY'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAEE219J5ZP0ZKRK' == correct_id
bbid_mock.return_value = 'SEK'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAETMVTPNP3199A5' == correct_id
bbid_mock.return_value = 'HKD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MABRNGY8XRFVC36N' == correct_id
bbid_mock.return_value = 'NZD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAH16NHE1HBN0FBZ' == correct_id
bbid_mock.return_value = 'AUD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAY8147CRK0ZP53B' == correct_id
bbid_mock.return_value = 'CAD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MANJ8SS88WJ6N28Q' == correct_id
bbid_mock.return_value = 'KRW'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAP55AXG5SQVS6C5' == correct_id
bbid_mock.return_value = 'INR'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA20JHJXN1PD5HGE' == correct_id
bbid_mock.return_value = 'CNY'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA4K1D8HH2R0RQY5' == correct_id
bbid_mock.return_value = 'SGD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA5CQFHYBPH9E5BS' == correct_id
bbid_mock.return_value = 'DKK'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAF131NKWVRESFYA' == correct_id
asset = Currency('MA890', 'PLN')
bbid_mock.return_value = 'PLN'
assert 'MA890' == tm_rates._currency_to_tdapi_swap_rate_asset(asset)
replace.restore()
def test_currency_to_tdapi_basis_swap_rate_asset(mocker):
replace = Replacer()
mocker.patch.object(GsSession.__class__, 'current',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=mock_request)
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
with tm.PricingContext(dt.date.today()):
asset = Currency('MA890', 'NOK')
bbid_mock.return_value = 'NOK'
assert 'MA890' == tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
asset = Currency('MAZ7RWC904JYHYPS', 'USD')
bbid_mock.return_value = 'USD'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAQB1PGEJFCET3GG' == correct_id
bbid_mock.return_value = 'EUR'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAGRG2VT11GQ2RQ9' == correct_id
bbid_mock.return_value = 'GBP'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAHCYNB3V75JC5Q8' == correct_id
bbid_mock.return_value = 'JPY'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAXVRBEZCJVH0C4V' == correct_id
replace.restore()
def test_check_clearing_house():
assert tm_rates._ClearingHouse.LCH == tm_rates._check_clearing_house('lch')
assert tm_rates._ClearingHouse.CME == tm_rates._check_clearing_house(tm_rates._ClearingHouse.CME)
assert tm_rates._ClearingHouse.LCH == tm_rates._check_clearing_house(None)
invalid_ch = ['NYSE']
for ch in invalid_ch:
with pytest.raises(MqError):
tm_rates._check_clearing_house(ch)
def test_get_swap_csa_terms():
euribor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EURIBOR.value]
usd_libor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.LIBOR.value]
fed_funds_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.Fed_Funds.value]
estr_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EUROSTR.value]
assert dict(csaTerms='USD-1') == tm_rates._get_swap_csa_terms('USD', fed_funds_index)
assert dict(csaTerms='EUR-EuroSTR') == tm_rates._get_swap_csa_terms('EUR', estr_index)
assert {} == tm_rates._get_swap_csa_terms('EUR', euribor_index)
assert {} == tm_rates._get_swap_csa_terms('USD', usd_libor_index)
def test_get_basis_swap_csa_terms():
euribor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EURIBOR.value]
usd_libor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.LIBOR.value]
fed_funds_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.Fed_Funds.value]
sofr_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.SOFR.value]
estr_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EUROSTR.value]
eonia_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EONIA.value]
assert dict(csaTerms='USD-1') == tm_rates._get_basis_swap_csa_terms('USD', fed_funds_index, sofr_index)
assert dict(csaTerms='EUR-EuroSTR') == tm_rates._get_basis_swap_csa_terms('EUR', estr_index, eonia_index)
assert {} == tm_rates._get_basis_swap_csa_terms('EUR', eonia_index, euribor_index)
assert {} == tm_rates._get_basis_swap_csa_terms('USD', fed_funds_index, usd_libor_index)
def test_match_floating_tenors():
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['SOFR'],
asset_parameters_receiver_designated_maturity='1y')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_receiver_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['SOFR'],
asset_parameters_payer_designated_maturity='1y',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_receiver_designated_maturity='3m')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_payer_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['SONIA'],
asset_parameters_payer_designated_maturity='1y',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['LIBOR'],
asset_parameters_receiver_designated_maturity='3m')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_payer_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['LIBOR'],
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['SONIA'],
asset_parameters_receiver_designated_maturity='1y')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_receiver_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_receiver_designated_maturity='6m')
assert swap_args == tm_rates._match_floating_tenors(swap_args)
def test_get_term_struct_date(mocker):
today = datetime.datetime.today()
biz_day = CustomBusinessDay()
assert today == tm_rates._get_term_struct_date(tenor=today, index=today, business_day=biz_day)
date_index = datetime.datetime(2020, 7, 31, 0, 0)
assert date_index == tm_rates._get_term_struct_date(tenor='2020-07-31', index=date_index, business_day=biz_day)
assert date_index == tm_rates._get_term_struct_date(tenor='0b', index=date_index, business_day=biz_day)
assert datetime.datetime(2021, 7, 30, 0, 0) == tm_rates._get_term_struct_date(tenor='1y', index=date_index,
business_day=biz_day)
def test_cross_stored_direction_for_fx_vol(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
asset_id_list = ["MAYJPCVVF2RWXCES", "MATGYV0J9MPX534Z"]
correct_mapping = ["MATGYV0J9MPX534Z", "MATGYV0J9MPX534Z"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.cross_stored_direction_for_fx_vol(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_cross_to_usd_based_cross_for_fx_forecast(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
asset_id_list = ["MAYJPCVVF2RWXCES", "MATGYV0J9MPX534Z"]
correct_mapping = ["MATGYV0J9MPX534Z", "MATGYV0J9MPX534Z"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.cross_to_usd_based_cross(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_cross_to_used_based_cross(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=TypeError('unsupported'))
replace = Replacer()
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'HELLO'
assert 'FUN' == tm.cross_to_usd_based_cross(Cross('FUN', 'EURUSD'))
replace.restore()
def test_cross_stored_direction(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=TypeError('unsupported'))
replace = Replacer()
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'HELLO'
assert 'FUN' == tm.cross_stored_direction_for_fx_vol(Cross('FUN', 'EURUSD'))
replace.restore()
def test_get_tdapi_rates_assets(mocker):
mock_asset_1 = GsAsset(asset_class='Rate', id='MAW25BGQJH9P6DPT', type_='Swap', name='Test_asset')
mock_asset_2 = GsAsset(asset_class='Rate', id='MAA9MVX15AJNQCVG', type_='Swap', name='Test_asset')
mock_asset_3 = GsAsset(asset_class='Rate', id='MANQHVYC30AZFT7R', type_='BasisSwap', name='Test_asset')
replace = Replacer()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_1]
assert 'MAW25BGQJH9P6DPT' == tm_rates._get_tdapi_rates_assets()
replace.restore()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_1, mock_asset_2]
kwargs = dict(asset_parameters_termination_date='10y', asset_parameters_effective_date='0b')
with pytest.raises(MqValueError):
tm_rates._get_tdapi_rates_assets(**kwargs)
replace.restore()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = []
with pytest.raises(MqValueError):
tm_rates._get_tdapi_rates_assets()
replace.restore()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_1, mock_asset_2]
kwargs = dict()
assert ['MAW25BGQJH9P6DPT', 'MAA9MVX15AJNQCVG'] == tm_rates._get_tdapi_rates_assets(**kwargs)
replace.restore()
# test case will test matching sofr maturity with libor leg and flipping legs to get right asset
kwargs = dict(type='BasisSwap', asset_parameters_termination_date='10y',
asset_parameters_payer_rate_option=BenchmarkType.LIBOR,
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=BenchmarkType.SOFR,
asset_parameters_receiver_designated_maturity='1y',
asset_parameters_clearing_house='lch', asset_parameters_effective_date='Spot',
asset_parameters_notional_currency='USD',
pricing_location='NYC')
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_3]
assert 'MANQHVYC30AZFT7R' == tm_rates._get_tdapi_rates_assets(**kwargs)
replace.restore()
def test_get_swap_leg_defaults():
result_dict = dict(currency=CurrEnum.JPY, benchmark_type='JPY-LIBOR-BBA', floating_rate_tenor='6m',
pricing_location=PricingLocation.TKO)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.JPY)
assert result_dict == defaults
result_dict = dict(currency=CurrEnum.USD, benchmark_type='USD-LIBOR-BBA', floating_rate_tenor='3m',
pricing_location=PricingLocation.NYC)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.USD)
assert result_dict == defaults
result_dict = dict(currency=CurrEnum.EUR, benchmark_type='EUR-EURIBOR-TELERATE', floating_rate_tenor='6m',
pricing_location=PricingLocation.LDN)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.EUR)
assert result_dict == defaults
result_dict = dict(currency=CurrEnum.SEK, benchmark_type='SEK-STIBOR-SIDE', floating_rate_tenor='6m',
pricing_location=PricingLocation.LDN)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.SEK)
assert result_dict == defaults
def test_check_forward_tenor():
valid_tenors = [datetime.date(2020, 1, 1), '1y', 'imm2', 'frb2', '1m', '0b']
for tenor in valid_tenors:
assert tenor == tm_rates._check_forward_tenor(tenor)
invalid_tenors = ['5yr', 'imm5', 'frb0']
for tenor in invalid_tenors:
with pytest.raises(MqError):
tm_rates._check_forward_tenor(tenor)
def mock_commod(_cls, _q):
d = {
'price': [30, 30, 30, 30, 35.929686, 35.636039, 27.307498, 23.23177, 19.020833, 18.827291, 17.823749, 17.393958,
17.824999, 20.307603, 24.311249, 25.160103, 25.245728, 25.736873, 28.425206, 28.779789, 30.519996,
34.896348, 33.966973, 33.95489, 33.686348, 34.840307, 32.674163, 30.261665, 30, 30, 30]
}
df = MarketDataResponseFrame(data=d, index=pd.date_range('2019-05-01', periods=31, freq='H', tz=timezone('UTC')))
df.dataset_ids = _test_datasets
return df
def mock_forward_price(_cls, _q):
d = {
'forwardPrice': [
22.0039,
24.8436,
24.8436,
11.9882,
14.0188,
11.6311,
18.9234,
21.3654,
21.3654,
],
'quantityBucket': [
"PEAK",
"PEAK",
"PEAK",
"7X8",
"7X8",
"7X8",
"2X16H",
"2X16H",
"2X16H",
],
'contract': [
"J20",
"K20",
"M20",
"J20",
"K20",
"M20",
"J20",
"K20",
"M20",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 9))
df.dataset_ids = _test_datasets
return df
def mock_fair_price(_cls, _q):
d = {
'fairPrice': [
2.880,
2.844,
2.726,
],
'contract': [
"F21",
"G21",
"H21",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 3))
df.dataset_ids = _test_datasets
return df
def mock_natgas_forward_price(_cls, _q):
d = {
'forwardPrice': [
2.880,
2.844,
2.726,
],
'contract': [
"F21",
"G21",
"H21",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 3))
df.dataset_ids = _test_datasets
return df
def mock_fair_price_swap(_cls, _q):
d = {'fairPrice': [2.880]}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)]))
df.dataset_ids = _test_datasets
return df
def mock_implied_volatility(_cls, _q):
d = {
'impliedVolatility': [
2.880,
2.844,
2.726,
],
'contract': [
"F21",
"G21",
"H21",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 3))
df.dataset_ids = _test_datasets
return df
def mock_missing_bucket_forward_price(_cls, _q):
d = {
'forwardPrice': [
22.0039,
24.8436,
24.8436,
11.9882,
14.0188,
18.9234,
21.3654,
21.3654,
],
'quantityBucket': [
"PEAK",
"PEAK",
"PEAK",
"7X8",
"7X8",
"2X16H",
"2X16H",
"2X16H",
],
'contract': [
"J20",
"K20",
"M20",
"J20",
"K20",
"J20",
"K20",
"M20",
]
}
return pd.DataFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 8))
def mock_fx_vol(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
return MarketDataResponseFrame({'impliedVolatility': [3]}, index=[pd.Timestamp('2019-01-04T12:00:00Z')])
d = {
'strikeReference': ['delta', 'spot', 'forward'],
'relativeStrike': [25, 100, 100],
'impliedVolatility': [5, 1, 2],
'forecast': [1.1, 1.1, 1.1]
}
df = MarketDataResponseFrame(data=d, index=pd.date_range('2019-01-01', periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_fx_forecast(_cls, _q):
d = {
'fxForecast': [1.1, 1.1, 1.1]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_fx_delta(_cls, _q):
d = {
'relativeStrike': [25, -25, 0],
'impliedVolatility': [1, 5, 2],
'forecast': [1.1, 1.1, 1.1]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_fx_empty(_cls, _q):
d = {
'strikeReference': [],
'relativeStrike': [],
'impliedVolatility': []
}
df = MarketDataResponseFrame(data=d, index=[])
df.dataset_ids = _test_datasets
return df
def mock_fx_switch(_cls, _q, _n):
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_empty)
replace.restore()
return Cross('MA1889', 'ABC/XYZ')
def mock_curr(_cls, _q):
d = {
'swapAnnuity': [1, 2, 3],
'swapRate': [1, 2, 3],
'basisSwapRate': [1, 2, 3],
'swaptionVol': [1, 2, 3],
'atmFwdRate': [1, 2, 3],
'midcurveVol': [1, 2, 3],
'capFloorVol': [1, 2, 3],
'spreadOptionVol': [1, 2, 3],
'inflationSwapRate': [1, 2, 3],
'midcurveAtmFwdRate': [1, 2, 3],
'capFloorAtmFwdRate': [1, 2, 3],
'spreadOptionAtmFwdRate': [1, 2, 3],
'strike': [0.25, 0.5, 0.75]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_cross(_cls, _q):
d = {
'basis': [1, 2, 3],
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_eq(_cls, _q):
d = {
'relativeStrike': [0.75, 0.25, 0.5],
'impliedVolatility': [5, 1, 2],
'impliedCorrelation': [5, 1, 2],
'realizedCorrelation': [3.14, 2.71828, 1.44],
'averageImpliedVolatility': [5, 1, 2],
'averageImpliedVariance': [5, 1, 2],
'averageRealizedVolatility': [5, 1, 2],
'impliedVolatilityByDeltaStrike': [5, 1, 2],
'fundamentalMetric': [5, 1, 2]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_eq_vol(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
idx = [pd.Timestamp(datetime.datetime.now(pytz.UTC))]
return MarketDataResponseFrame({'impliedVolatility': [3]}, index=idx)
d = {
'impliedVolatility': [5, 1, 2],
}
end = datetime.datetime.now(pytz.UTC).date() - datetime.timedelta(days=1)
df = MarketDataResponseFrame(data=d, index=pd.date_range(end=end, periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_eq_vol_last_err(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
raise MqValueError('error while getting last')
d = {
'impliedVolatility': [5, 1, 2],
}
end = datetime.date.today() - datetime.timedelta(days=1)
df = MarketDataResponseFrame(data=d, index=pd.date_range(end=end, periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_eq_vol_last_empty(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
return MarketDataResponseFrame()
d = {
'impliedVolatility': [5, 1, 2],
}
end = datetime.date.today() - datetime.timedelta(days=1)
df = MarketDataResponseFrame(data=d, index=pd.date_range(end=end, periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_eq_norm(_cls, _q):
d = {
'relativeStrike': [-4.0, 4.0, 0],
'impliedVolatility': [5, 1, 2]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_eq_spot(_cls, _q):
d = {
'relativeStrike': [0.75, 1.25, 1.0],
'impliedVolatility': [5, 1, 2]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_inc(_cls, _q):
d = {
'relativeStrike': [0.25, 0.75],
'impliedVolatility': [5, 1]
}
df = MarketDataResponseFrame(data=d, index=_index * 2)
df.dataset_ids = _test_datasets
return df
def mock_meeting_expectation():
data_dict = MarketDataResponseFrame({'date': [dt.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2'],
'location': ['NYC'],
'rateType': ['Meeting Forward'],
'startingDate': [dt.date(2020, 1, 29)],
'endingDate': [dt.date(2020, 1, 29)],
'meetingNumber': [2],
'valuationDate': [dt.date(2019, 12, 6)],
'meetingDate': [dt.date(2020, 1, 23)],
'value': [-0.004550907771]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_meeting_spot():
data_dict = MarketDataResponseFrame({'date': [dt.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2'],
'location': ['NYC'],
'rateType': ['Meeting Forward'],
'startingDate': [dt.date(2019, 10, 30)],
'endingDate': [dt.date(2019, 12, 18)],
'meetingNumber': [0],
'valuationDate': [dt.date(2019, 12, 6)],
'meetingDate': [dt.date(2019, 10, 24)],
'value': [-0.004522570525]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_meeting_absolute():
data_dict = MarketDataResponseFrame({'date': [datetime.date(2019, 12, 6), datetime.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2', 'MARFAGXDQRWM07Y2'],
'location': ['NYC', 'NYC'],
'rateType': ['Meeting Forward', 'Meeting Forward'],
'startingDate': [datetime.date(2019, 10, 30), datetime.date(2020, 1, 29)],
'endingDate': [datetime.date(2019, 10, 30), datetime.date(2020, 1, 29)],
'meetingNumber': [0, 2],
'valuationDate': [datetime.date(2019, 12, 6), datetime.date(2019, 12, 6)],
'meetingDate': [datetime.date(2019, 10, 24), datetime.date(2020, 1, 23)],
'value': [-0.004522570525, -0.004550907771]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_ois_spot():
data_dict = MarketDataResponseFrame({'date': [datetime.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2'],
'location': ['NYC'],
'rateType': ['Spot'],
'startingDate': [datetime.date(2019, 12, 6)],
'endingDate': [datetime.date(2019, 12, 7)],
'meetingNumber': [-1],
'valuationDate': [datetime.date(2019, 12, 6)],
'meetingDate': [datetime.date(2019, 12, 6)],
'value': [-0.00455]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_esg(_cls, _q):
d = {
"esNumericScore": [2, 4, 6],
"esNumericPercentile": [81.2, 75.4, 65.7],
"esPolicyScore": [2, 4, 6],
"esPolicyPercentile": [81.2, 75.4, 65.7],
"esScore": [2, 4, 6],
"esPercentile": [81.2, 75.4, 65.7],
"esProductImpactScore": [2, 4, 6],
"esProductImpactPercentile": [81.2, 75.4, 65.7],
"gScore": [2, 4, 6],
"gPercentile": [81.2, 75.4, 65.7],
"esMomentumScore": [2, 4, 6],
"esMomentumPercentile": [81.2, 75.4, 65.7],
"gRegionalScore": [2, 4, 6],
"gRegionalPercentile": [81.2, 75.4, 65.7],
"controversyScore": [2, 4, 6],
"controversyPercentile": [81.2, 75.4, 65.7],
"esDisclosurePercentage": [49.2, 55.7, 98.4]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_index_positions_data(
asset_id,
start_date,
end_date,
fields=None,
position_type=None
):
return [
{'underlyingAssetId': 'MA3',
'netWeight': 0.1,
'positionType': 'close',
'assetId': 'MA890',
'positionDate': '2020-01-01'
},
{'underlyingAssetId': 'MA1',
'netWeight': 0.6,
'positionType': 'close',
'assetId': 'MA890',
'positionDate': '2020-01-01'
},
{'underlyingAssetId': 'MA2',
'netWeight': 0.3,
'positionType': 'close',
'assetId': 'MA890',
'positionDate': '2020-01-01'
}
]
def mock_rating(_cls, _q):
d = {
'rating': ['Buy', 'Sell', 'Buy', 'Neutral'],
'convictionList': [1, 0, 0, 0]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2020, 8, 13), datetime.date(2020, 8, 14),
datetime.date(2020, 8, 17), datetime.date(2020, 8, 18)]))
df.dataset_ids = _test_datasets
return df
def mock_gsdeer_gsfeer(_cls, assetId, start_date):
d = {
'gsdeer': [1, 1.2, 1.1],
'gsfeer': [2, 1.8, 1.9],
'year': [2000, 2010, 2020],
'quarter': ['Q1', 'Q2', 'Q3']
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
return df
def mock_factor_profile(_cls, _q):
d = {
'growthScore': [0.238, 0.234, 0.234, 0.230],
'financialReturnsScore': [0.982, 0.982, 0.982, 0.982],
'multipleScore': [0.204, 0.192, 0.190, 0.190],
'integratedScore': [0.672, 0.676, 0.676, 0.674]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2020, 8, 13), datetime.date(2020, 8, 14),
datetime.date(2020, 8, 17), datetime.date(2020, 8, 18)]))
df.dataset_ids = _test_datasets
return df
def mock_commodity_forecast(_cls, _q):
d = {
'forecastPeriod': ['3m', '3m', '3m', '3m'],
'forecastType': ['spotReturn', 'spotReturn', 'spotReturn', 'spotReturn'],
'commodityForecast': [1700, 1400, 1500, 1600]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2020, 8, 13), datetime.date(2020, 8, 14),
datetime.date(2020, 8, 17), datetime.date(2020, 8, 18)]))
df.dataset_ids = _test_datasets
return df
def test_skew():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.skew(mock_spx, '1m', tm.SkewReference.DELTA, 25)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_norm)
actual = tm.skew(mock_spx, '1m', tm.SkewReference.NORMALIZED, 4)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_spot)
actual = tm.skew(mock_spx, '1m', tm.SkewReference.SPOT, 25)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
mock.return_value = mock_empty_market_data_response()
actual = tm.skew(mock_spx, '1m', tm.SkewReference.SPOT, 25)
assert actual.empty
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_inc)
with pytest.raises(MqError):
tm.skew(mock_spx, '1m', tm.SkewReference.DELTA, 25)
replace.restore()
with pytest.raises(MqError):
tm.skew(mock_spx, '1m', None, 25)
def test_skew_fx():
replace = Replacer()
cross = Cross('MAA0NE9QX2ABETG6', 'USD/EUR')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='EURUSD', ))]
replace('gs_quant.markets.securities.SecurityMaster.get_asset', Mock()).return_value = cross
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_delta)
mock = cross
actual = tm.skew(mock, '1m', tm.SkewReference.DELTA, 25)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.DELTA, 25, real_time=True)
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.SPOT, 25)
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.FORWARD, 25)
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.NORMALIZED, 25)
with pytest.raises(MqError):
tm.skew(mock, '1m', None, 25)
replace.restore()
def test_implied_vol():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_vol)
idx = pd.date_range(end=datetime.datetime.now(pytz.UTC).date(), periods=4, freq='D')
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2, 3], index=idx, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2, 3], index=idx, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(MqError):
tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_NEUTRAL)
with pytest.raises(MqError):
tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL)
replace.restore()
def test_implied_vol_no_last():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
idx = pd.date_range(end=datetime.date.today() - datetime.timedelta(days=1), periods=3, freq='D')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_vol_last_err)
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_vol_last_empty)
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
replace.restore()
def test_implied_vol_fx():
replace = Replacer()
mock = Cross('MAA0NE9QX2ABETG6', 'USD/EUR')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='EURUSD', ))]
replace('gs_quant.markets.securities.SecurityMaster.get_asset', Mock()).return_value = mock
# for different delta strikes
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_vol)
actual = tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_CALL, 25)
expected = pd.Series([5, 1, 2, 3], index=pd.date_range('2019-01-01', periods=4, freq='D'), name='impliedVolatility')
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_PUT, 25)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_NEUTRAL)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.FORWARD, 100)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.SPOT, 100)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
# NORMALIZED not supported
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_CALL)
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.NORMALIZED, 25)
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.SPOT, 25)
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.FORWARD, 25)
replace.restore()
def test_fx_forecast():
replace = Replacer()
mock = Cross('MAA0NE9QX2ABETG6', 'USD/EUR')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='EURUSD', ))]
replace('gs_quant.markets.securities.SecurityMaster.get_asset', Mock()).return_value = mock
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_forecast)
actual = tm.fx_forecast(mock, '12m')
assert_series_equal(pd.Series([1.1, 1.1, 1.1], index=_index * 3, name='fxForecast'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.fx_forecast(mock, '3m')
assert_series_equal(pd.Series([1.1, 1.1, 1.1], index=_index * 3, name='fxForecast'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.fx_forecast(mock, '3m', real_time=True)
replace.restore()
def test_fx_forecast_inverse():
replace = Replacer()
get_cross = replace('gs_quant.timeseries.measures.cross_to_usd_based_cross', Mock())
get_cross.return_value = "MATGYV0J9MPX534Z"
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_forecast)
mock = Cross("MAYJPCVVF2RWXCES", 'USD/JPY')
actual = tm.fx_forecast(mock, '3m')
assert_series_equal(pd.Series([1 / 1.1, 1 / 1.1, 1 / 1.1], index=_index * 3, name='fxForecast'),
pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace.restore()
def test_vol_smile():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.vol_smile(mock_spx, '1m', tm.VolSmileReference.FORWARD, '5d')
assert_series_equal(pd.Series([5, 1, 2], index=[0.75, 0.25, 0.5]), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.vol_smile(mock_spx, '1m', tm.VolSmileReference.SPOT, '5d')
assert_series_equal( | pd.Series([5, 1, 2], index=[0.75, 0.25, 0.5]) | pandas.Series |
import pandas as pd
def date_formatter(time_stamp,ldf):
"""
Given a numpy timestamp and ldf, inspects which date granularity is appropriate and reformats timestamp accordingly
Example
----------
For changing granularity the results differ as so.
days: '2020-01-01' -> '2020-1-1'
months: '2020-01-01' -> '2020-1'
years: '2020-01-01' -> '2020'
Parameters
----------
time_stamp: np.datetime64
timestamp object holding the date information
ldf : lux.core.frame
LuxDataFrame with a temporal field
Returns
-------
date_str: str
A reformatted version of the time_stamp according to granularity
"""
datetime = pd.to_datetime(time_stamp)
if ldf.data_type["temporal"]:
date_column = ldf[ldf.data_type["temporal"][0]] # assumes only one temporal column, may need to change this function to recieve multiple temporal columns in the future
granularity = compute_date_granularity(date_column)
date_str = ""
if granularity == "year":
date_str += str(datetime.year)
elif granularity == "month":
date_str += str(datetime.year)+ "-" + str(datetime.month)
elif granularity == "day":
date_str += str(datetime.year) +"-"+ str(datetime.month) +"-"+ str(datetime.day)
else:
# non supported granularity
return datetime.date()
return date_str
def compute_date_granularity(date_column:pd.core.series.Series):
"""
Given a temporal column (pandas.core.series.Series), finds out the granularity of dates.
Example
----------
['2018-01-01', '2019-01-02', '2018-01-03'] -> "day"
['2018-01-01', '2019-02-01', '2018-03-01'] -> "month"
['2018-01-01', '2019-01-01', '2020-01-01'] -> "year"
Parameters
----------
date_column: pandas.core.series.Series
Column series with datetime type
Returns
-------
field: str
A str specifying the granularity of dates for the inspected temporal column
"""
date_fields = ["day", "month", "year"] #supporting a limited set of Vega-Lite TimeUnit (https://vega.github.io/vega-lite/docs/timeunit.html)
date_index = | pd.DatetimeIndex(date_column) | pandas.DatetimeIndex |
import streamlit as st
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import altair as alt
from requests import get
import re
import os
from bs4 import BeautifulSoup
from urllib.request import Request, urlopen
import datetime
import time
import matplotlib.pyplot as plt
import statsmodels.api as sm
from geopy.geocoders import Nominatim
from geopy.distance import geodesic
geolocator = Nominatim(user_agent='myuseragent')
import lxml
import plotly.express as px
from PIL import Image
#with open("styles/style.css") as f:
# st.markdown(f'<style>{f.read()}</style>', unsafe_allow_html=True)
st.set_page_config(
page_title="O/U Hockey Analytics",
page_icon=":ice_hockey_stick_and_puck:"
)
#Dummy data to get the header to display correctly
st.markdown("""<Head>
<Title> Test Title</Title><link rel="shortcut icon" href="favicon.ico" type="image/x-icon"> </Head>""",unsafe_allow_html=True)
#Title/Header
st.markdown("""<h1 style="text-align:center;color:white;font-weight:bolder;font-size:70px;font-family:helvetica; background:
-webkit-linear-gradient(#a73305,#000000,#093ff0); -webkit-background-clip:
text;-webkit-text-fill-color: transparent;">NHL<br>Wager<br>Analytics</h1>""",unsafe_allow_html=True)
# Load data
data_load_state = st.text('Checking and Fetching Data...')
#####################################
#### Data Gathering and Cleaning ####
#####################################
master_df = | pd.read_csv('master_df.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
@file
@brief Defines a streaming dataframe.
"""
import pickle
import os
from io import StringIO, BytesIO
from inspect import isfunction
import numpy
import numpy.random as nrandom
import pandas
from pandas.testing import assert_frame_equal
from pandas.io.json import json_normalize
from .dataframe_split import sklearn_train_test_split, sklearn_train_test_split_streaming
from .dataframe_io_helpers import enumerate_json_items, JsonIterator2Stream
class StreamingDataFrameSchemaError(Exception):
"""
Reveals an issue with inconsistant schemas.
"""
pass
class StreamingDataFrame:
"""
Defines a streaming dataframe.
The goal is to reduce the memory footprint.
The class takes a function which creates an iterator
on :epkg:`dataframe`. We assume this function can
be called multiple time. As a matter of fact, the
function is called every time the class needs to walk
through the stream with the following loop:
::
for df in self: # self is a StreamingDataFrame
# ...
The constructor cannot receive an iterator otherwise
this class would be able to walk through the data
only once. The main reason is it is impossible to
:epkg:`*py:pickle` (or :epkg:`dill`)
an iterator: it cannot be replicated.
Instead, the class takes a function which generates
an iterator on :epkg:`DataFrame`.
Most of the methods returns either a :epkg:`DataFrame`
either a @see cl StreamingDataFrame. In the second case,
methods can be chained.
By default, the object checks that the schema remains
the same between two chunks. This can be disabled
by setting *check_schema=False* in the constructor.
The user should expect the data to remain stable.
Every loop should produce the same data. However,
in some situations, it is more efficient not to keep
that constraints. Draw a random @see me sample
is one of these cases.
:param iter_creation: function which creates an iterator or an
instance of @see cl StreamingDataFrame
:param check_schema: checks that the schema is the same
for every :epkg:`dataframe`
:param stable: indicates if the :epkg:`dataframe` remains the same
whenever it is walked through
"""
def __init__(self, iter_creation, check_schema=True, stable=True):
self._delete_ = []
if isinstance(iter_creation, (pandas.DataFrame, dict,
numpy.ndarray, str)):
raise TypeError(
"Unexpected type %r for iter_creation. It must "
"be an iterator." % type(iter_creation))
if isinstance(iter_creation, StreamingDataFrame):
self.iter_creation = iter_creation.iter_creation
self.stable = iter_creation.stable
else:
self.iter_creation = iter_creation
self.stable = stable
self.check_schema = check_schema
def is_stable(self, do_check=False, n=10):
"""
Tells if the :epkg:`dataframe` is supposed to be stable.
@param do_check do not trust the value sent to the constructor
@param n number of rows used to check the stability,
None for all rows
@return boolean
*do_check=True* means the methods checks the first
*n* rows remains the same for two iterations.
"""
if do_check:
for i, (a, b) in enumerate(zip(self, self)):
if n is not None and i >= n:
break
try:
assert_frame_equal(a, b)
except AssertionError: # pragma: no cover
return False
return True
else:
return self.stable
def get_kwargs(self):
"""
Returns the parameters used to call the constructor.
"""
return dict(check_schema=self.check_schema)
def train_test_split(self, path_or_buf=None, export_method="to_csv",
names=None, streaming=True, partitions=None,
**kwargs):
"""
Randomly splits a :epkg:`dataframe` into smaller pieces.
The function returns streams of file names.
It chooses one of the options from module
:mod:`dataframe_split <pandas_streaming.df.dataframe_split>`.
@param path_or_buf a string, a list of strings or buffers, if it is a
string, it must contain ``{}`` like ``partition{}.txt``,
if None, the function returns strings.
@param export_method method used to store the partitions, by default
:epkg:`pandas:DataFrame:to_csv`, additional parameters
will be given to that function
@param names partitions names, by default ``('train', 'test')``
@param kwargs parameters for the export function and
:epkg:`sklearn:model_selection:train_test_split`.
@param streaming the function switches to a
streaming version of the algorithm.
@param partitions splitting partitions
@return outputs of the exports functions or two
@see cl StreamingDataFrame if path_or_buf is None.
The streaming version of this algorithm is implemented by function
@see fn sklearn_train_test_split_streaming. Its documentation
indicates the limitation of the streaming version and gives some
insights about the additional parameters.
"""
if streaming:
if partitions is not None:
if len(partitions) != 2:
raise NotImplementedError( # pragma: no cover
"Only train and test split is allowed, *partitions* "
"must be of length 2.")
kwargs = kwargs.copy()
kwargs['train_size'] = partitions[0]
kwargs['test_size'] = partitions[1]
return sklearn_train_test_split_streaming(self, **kwargs)
return sklearn_train_test_split(self, path_or_buf=path_or_buf,
export_method=export_method,
names=names, **kwargs)
@staticmethod
def _process_kwargs(kwargs):
"""
Filters out parameters for the constructor of this class.
"""
kw = {}
for k in ['check_schema']:
if k in kwargs:
kw[k] = kwargs[k]
del kwargs[k]
return kw
@staticmethod
def read_json(*args, chunksize=100000, flatten=False, **kwargs) -> 'StreamingDataFrame':
"""
Reads a :epkg:`json` file or buffer as an iterator
on :epkg:`DataFrame`. The signature is the same as
:epkg:`pandas:read_json`. The important parameter is
*chunksize* which defines the number
of rows to parse in a single bloc
and it must be defined to return an iterator.
If *lines* is True, the function falls back into
:epkg:`pandas:read_json`, otherwise it used
@see fn enumerate_json_items. If *lines* is ``'stream'``,
*enumerate_json_items* is called with parameter
``lines=True``.
Parameter *flatten* uses the trick described at
`Flattening JSON objects in Python
<https://towardsdatascience.com/flattening-json-objects-in-python-f5343c794b10>`_.
Examples:
.. runpython::
:showcode:
from io import BytesIO
from pandas_streaming.df import StreamingDataFrame
data = b'''{"a": 1, "b": 2}
{"a": 3, "b": 4}'''
it = StreamingDataFrame.read_json(BytesIO(data), lines=True)
dfs = list(it)
print(dfs)
.. runpython::
:showcode:
from io import BytesIO
from pandas_streaming.df import StreamingDataFrame
data = b'''[{"a": 1,
"b": 2},
{"a": 3,
"b": 4}]'''
it = StreamingDataFrame.read_json(BytesIO(data))
dfs = list(it)
print(dfs)
.. index:: IncompleteJSONError
The parsed json must have an empty line at the end otherwise
the following exception is raised:
`ijson.common.IncompleteJSONError: `
`parse error: unallowed token at this point in JSON text`.
"""
if not isinstance(chunksize, int) or chunksize <= 0:
raise ValueError( # pragma: no cover
'chunksize must be a positive integer')
kwargs_create = StreamingDataFrame._process_kwargs(kwargs)
if isinstance(args[0], (list, dict)):
if flatten:
return StreamingDataFrame.read_df(
json_normalize(args[0]), **kwargs_create)
return StreamingDataFrame.read_df(args[0], **kwargs_create)
if kwargs.get('lines', None) == 'stream':
del kwargs['lines']
def localf(a0=args[0]):
if hasattr(a0, 'seek'):
a0.seek(0)
return enumerate_json_items(
a0, encoding=kwargs.get('encoding', None), lines=True,
flatten=flatten)
st = JsonIterator2Stream(localf)
args = args[1:]
if chunksize is None:
return StreamingDataFrame(
lambda: pandas.read_json(
st, *args, chunksize=None, lines=True, **kwargs),
**kwargs_create)
def fct1(st=st, args=args, chunksize=chunksize, kw=kwargs.copy()):
st.seek(0)
for r in pandas.read_json(
st, *args, chunksize=chunksize, nrows=chunksize,
lines=True, **kw):
yield r
return StreamingDataFrame(fct1, **kwargs_create)
if kwargs.get('lines', False):
if flatten:
raise NotImplementedError(
"flatten==True is implemented with option lines='stream'")
if chunksize is None:
return StreamingDataFrame(
lambda: pandas.read_json(*args, chunksize=None, **kwargs),
**kwargs_create)
def fct2(args=args, chunksize=chunksize, kw=kwargs.copy()):
for r in pandas.read_json(
*args, chunksize=chunksize, nrows=chunksize, **kw):
yield r
return StreamingDataFrame(fct2, **kwargs_create)
st = JsonIterator2Stream(
lambda a0=args[0]: enumerate_json_items(
a0, encoding=kwargs.get('encoding', None), flatten=flatten))
args = args[1:]
if 'lines' in kwargs:
del kwargs['lines']
if chunksize is None:
return StreamingDataFrame(
lambda: pandas.read_json(
st, *args, chunksize=chunksize, lines=True, **kwargs),
**kwargs_create)
def fct3(st=st, args=args, chunksize=chunksize, kw=kwargs.copy()):
if hasattr(st, 'seek'):
st.seek(0)
for r in pandas.read_json(
st, *args, chunksize=chunksize, nrows=chunksize,
lines=True, **kw):
yield r
return StreamingDataFrame(fct3, **kwargs_create)
@staticmethod
def read_csv(*args, **kwargs) -> 'StreamingDataFrame':
"""
Reads a :epkg:`csv` file or buffer
as an iterator on :epkg:`DataFrame`.
The signature is the same as :epkg:`pandas:read_csv`.
The important parameter is *chunksize* which defines the number
of rows to parse in a single bloc. If not specified,
it will be equal to 100000.
"""
if not kwargs.get('iterator', True):
raise ValueError("If specified, iterator must be True.")
if not kwargs.get('chunksize', 100000):
raise ValueError("If specified, chunksize must not be None.")
kwargs_create = StreamingDataFrame._process_kwargs(kwargs)
kwargs['iterator'] = True
if 'chunksize' not in kwargs:
kwargs['chunksize'] = 100000
return StreamingDataFrame(lambda: pandas.read_csv(*args, **kwargs), **kwargs_create)
@staticmethod
def read_str(text, **kwargs) -> 'StreamingDataFrame':
"""
Reads a :epkg:`DataFrame` as an iterator on :epkg:`DataFrame`.
The signature is the same as :epkg:`pandas:read_csv`.
The important parameter is *chunksize* which defines the number
of rows to parse in a single bloc.
"""
if not kwargs.get('iterator', True):
raise ValueError("If specified, iterator must be True.")
if not kwargs.get('chunksize', 100000):
raise ValueError("If specified, chunksize must not be None.")
kwargs_create = StreamingDataFrame._process_kwargs(kwargs)
kwargs['iterator'] = True
if 'chunksize' not in kwargs:
kwargs['chunksize'] = 100000
if isinstance(text, str):
buffer = StringIO(text)
else:
buffer = BytesIO(text)
return StreamingDataFrame(
lambda: pandas.read_csv(buffer, **kwargs), **kwargs_create)
@staticmethod
def read_df(df, chunksize=None, check_schema=True) -> 'StreamingDataFrame':
"""
Splits a :epkg:`DataFrame` into small chunks mostly for
unit testing purposes.
@param df :epkg:`DataFrame`
@param chunksize number rows per chunks (// 10 by default)
@param check_schema check schema between two iterations
@return iterator on @see cl StreamingDataFrame
"""
if chunksize is None:
if hasattr(df, 'shape'):
chunksize = df.shape[0]
else:
raise NotImplementedError(
"Cannot retrieve size to infer chunksize for type={0}"
".".format(type(df)))
if hasattr(df, 'shape'):
size = df.shape[0]
else:
raise NotImplementedError( # pragma: no cover
"Cannot retrieve size for type={0}.".format(type(df)))
def local_iterator():
"local iterator"
for i in range(0, size, chunksize):
end = min(size, i + chunksize)
yield df[i:end].copy()
return StreamingDataFrame(local_iterator, check_schema=check_schema)
def __iter__(self):
"""
Iterator on a large file with a sliding window.
Each windows is a :epkg:`DataFrame`.
The method stores a copy of the initial iterator
and restores it after the end of the iterations.
If *check_schema* was enabled when calling the constructor,
the method checks that every :epkg:`DataFrame`
follows the same schema as the first chunck.
Even with a big chunk size, it might happen
that consecutive chunks might detect different type
for one particular column. An error message shows up
saying ``Column types are different after row``
with more information about the column which failed.
In that case, :epkg:`pandas:DataFrame.read_csv` can overwrite
the type on one column by specifying
``dtype={column_name: new_type}``. It frequently happens
when a string column has many missing values.
"""
iters = self.iter_creation()
sch = None
rows = 0
for it in iters:
if sch is None:
sch = (list(it.columns), list(it.dtypes))
elif self.check_schema:
if list(it.columns) != sch[0]: # pylint: disable=E1136
raise StreamingDataFrameSchemaError( # pragma: no cover
'Column names are different after row {0}\nFirst chunk: {1}'
'\nCurrent chunk: {2}'.format(
rows, sch[0], list(it.columns))) # pylint: disable=E1136
if list(it.dtypes) != sch[1]: # pylint: disable=E1136
errdf = pandas.DataFrame(
dict(names=sch[0], schema1=sch[1], # pylint: disable=E1136
schema2=list(it.dtypes))) # pylint: disable=E1136
tdf = StringIO()
errdf['diff'] = errdf['schema2'] != errdf['schema1']
errdf = errdf[errdf['diff']]
errdf.to_csv(tdf, sep=",", index=False)
raise StreamingDataFrameSchemaError(
'Column types are different after row {0}. You may use option '
'dtype={{"column_name": str}} to force the type on this column.'
'\n---\n{1}'.format(rows, tdf.getvalue()))
rows += it.shape[0]
yield it
@property
def shape(self):
"""
This is the kind of operations you do not want to do
when a file is large because it goes through the whole
stream just to get the number of rows.
"""
nl, nc = 0, 0
for it in self:
nc = max(it.shape[1], nc)
nl += it.shape[0]
return nl, nc
@property
def columns(self):
"""
See :epkg:`pandas:DataFrame:columns`.
"""
for it in self:
return it.columns
# The dataframe is empty.
return []
@property
def dtypes(self):
"""
See :epkg:`pandas:DataFrame:dtypes`.
"""
for it in self:
return it.dtypes
def to_csv(self, path_or_buf=None, **kwargs) -> 'StreamingDataFrame':
"""
Saves the :epkg:`DataFrame` into string.
See :epkg:`pandas:DataFrame.to_csv`.
"""
if path_or_buf is None:
st = StringIO()
close = False
elif isinstance(path_or_buf, str):
st = open( # pylint: disable=R1732
path_or_buf, "w", encoding=kwargs.get('encoding'))
close = True
else:
st = path_or_buf
close = False
for df in self:
df.to_csv(st, **kwargs)
kwargs['header'] = False
if close:
st.close()
if isinstance(st, StringIO):
return st.getvalue()
return path_or_buf
def to_dataframe(self) -> pandas.DataFrame:
"""
Converts everything into a single :epkg:`DataFrame`.
"""
return | pandas.concat(self, axis=0) | pandas.concat |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from tqdm import tqdm
from multiprocessing import Pool
class NoiseGenerator:
def __init__(self, n_frequencies, f_interval):
self.f_interval = f_interval
self.t_end = 1 / self.f_interval
self.n_frequencies = n_frequencies
self.n_fft_frequencies = 2 * self.n_frequencies - 1
self.n_times = self.n_fft_frequencies
self.t_interval = self.t_end / self.n_times
self.nyquist_frequency = 0.5 * self.n_fft_frequencies * self.f_interval
self.positive_frequencies = np.arange(n_frequencies) * f_interval
self.sample_times = np.linspace(0, self.t_end, self.n_times, endpoint=False)
self.samples = None
self.fft_coeffs = None
self.fft_frequencies = np.fft.fftfreq(self.n_fft_frequencies, self.t_interval)
self.measured_psd = None
self.mean_square_fft_coeffs = None
self.autocorrelation = None
self.psd = None
self.fft_power_filter = None
self.fft_amplitude_filter = None
def specify_psd(self, psd='white', f_ir=None, normalization=None, A=None):
assert not ((normalization is not None) and (A is not None))
if A is None:
A = 1
if psd == 'white':
self.psd = A*np.ones(self.n_frequencies)
self.psd = np.hstack([self.psd, np.flip(self.psd)[:-1]])
elif psd == 'pink':
assert f_ir is not None
cutoff_idx = np.sum(self.positive_frequencies < f_ir)
self.psd = np.zeros(self.n_frequencies)
self.psd[cutoff_idx:] = A / self.positive_frequencies[cutoff_idx:]
self.psd[:cutoff_idx] = self.psd[cutoff_idx]
self.psd = np.hstack([self.psd, np.flip(self.psd)[:-1]])
else:
self.psd = psd
self.psd = | pd.Series(self.psd, index=self.fft_frequencies) | pandas.Series |
import numpy as np
import pandas as pd
import os
import librosa
from multiprocessing import Pool
SEED = int(1e9+7e7+17)
np.random.seed(SEED)
default_labels = ['blues']*100 + ['classical']*100 + ['country']*100 + ['disco']*100 + ['hiphop']*100 + ['jazz']*99 + ['metal']*100 + ['pop']*100 + ['reggae']*100 + ['rock']*100
genres = ['blues', 'classical', 'country', 'disco', 'hiphop', 'jazz', 'metal', 'pop', 'reggae', 'rock']
features = ['chroma_stft_mean', 'chroma_stft_var', 'rms_mean',
'rms_var', 'spectral_centroid_mean', 'spectral_centroid_var',
'spectral_bandwidth_mean', 'spectral_bandwidth_var', 'rolloff_mean',
'rolloff_var', 'zero_crossing_rate_mean', 'zero_crossing_rate_var',
'harmony_mean', 'harmony_var', 'perceptr_mean', 'perceptr_var', 'tempo',
'mfcc1_mean', 'mfcc1_var', 'mfcc2_mean', 'mfcc2_var', 'mfcc3_mean',
'mfcc3_var', 'mfcc4_mean', 'mfcc4_var', 'mfcc5_mean', 'mfcc5_var',
'mfcc6_mean', 'mfcc6_var', 'mfcc7_mean', 'mfcc7_var', 'mfcc8_mean',
'mfcc8_var', 'mfcc9_mean', 'mfcc9_var', 'mfcc10_mean', 'mfcc10_var',
'mfcc11_mean', 'mfcc11_var', 'mfcc12_mean', 'mfcc12_var', 'mfcc13_mean',
'mfcc13_var', 'mfcc14_mean', 'mfcc14_var', 'mfcc15_mean', 'mfcc15_var',
'mfcc16_mean', 'mfcc16_var', 'mfcc17_mean', 'mfcc17_var', 'mfcc18_mean',
'mfcc18_var', 'mfcc19_mean', 'mfcc19_var', 'mfcc20_mean', 'mfcc20_var']
musicnet_path = 'musicnet'
def rel_path_to_abs(file, rel_path):
return os.path.join(os.path.abspath(os.path.dirname(os.path.abspath(file))), rel_path)
# def normalize(track):
# m,s = track.mean(), track.std()
# return (track-m)/s
def normalize(track):
mx,mn = max(track), min(track)
m = (mx+mn)/2
return (track-m)/(mx-m)
class Loader:
def __init__(self, n_jobs=-1):
self.n_jobs = n_jobs if n_jobs>0 else os.cpu_count()
self.names = None
def load_tracks(self, path, n_jobs=-1, verbose=0, get_names=False, normalize=True):
n_jobs = self.n_jobs if n_jobs==-1 else n_jobs
dataset, names = self.__scan_folder__(path, n_jobs, verbose, True, normalize=normalize)
dataset = np.array(dataset)
self.names = names
return (dataset,names) if get_names else dataset
def __scan_folder__(self, path, n_jobs, verbose, get_names, normalize, blacklist=['jazz.00054.wav']):
tracks_paths = []
tmp_paths = []
tracks = []
tracks_names = []
for dirpath, dirnames, filenames in os.walk(path):
for filename in filenames:
tmp_paths.append(os.path.join(dirpath, filename))
tmp_paths.sort()
for music_path in tmp_paths:
filename = os.path.split(music_path)[1]
if filename in blacklist:
continue
tracks_names.append(filename)
tracks_paths.append(music_path)
if verbose==1:
print(filename)
with Pool(n_jobs) as p:
tracks = p.starmap(self.__load_track__, [(track, verbose, normalize) for track in tracks_paths])
return (tracks, tracks_names) if get_names else tracks
def __load_track__(self, path, verbose, _normalize):
X,sr = librosa.load(path)
if _normalize:
X = normalize(X)
if verbose==2:
print(os.path.split(path)[1])
return X
class Cutter:
def __init__(self, n_jobs=-1):
self.n_jobs = n_jobs if n_jobs>0 else os.cpu_count()
def cut_dataset(self, dataset, durations, sr=22050, n_jobs=-1, default_labels=None, normalize=True):
n_jobs = self.n_jobs if n_jobs==-1 else n_jobs
new_dataset = []
labels = []
self.normalize = normalize
for duration in durations:
if not default_labels:
new_dataset.extend(self.cut_album_in_pieces(dataset, duration, sr, n_jobs))
else:
new_data = self.cut_album_in_pieces(dataset, duration, sr, n_jobs, default_labels)
new_dataset.extend(new_data[0])
labels.extend(new_data[1])
new_dataset = np.array(new_dataset)
return new_dataset if not default_labels else (new_dataset, labels)
def cut_album_in_pieces(self, dataset, duration, sr=22050, n_jobs=-1, default_labels=None):
n_jobs = self.n_jobs if n_jobs==-1 else n_jobs
subtracks = []
labels = []
album = dataset.copy()
if len(album[0].shape)==0:
album = album.reshape((1,-1))
with Pool(n_jobs) as p:
if not default_labels:
new_data = p.starmap(self.cut_track_in_pieces, [(track, duration, sr) for track in album])
else:
new_data = p.starmap(self.cut_track_in_pieces, [(album[i], duration, sr, default_labels[i]) for i in range(len(album))])
for new_data_sample in new_data:
subtracks.extend(new_data_sample[0])
if not default_labels is None:
labels.extend([new_data_sample[1]]*len(new_data_sample[0]))
return subtracks if not default_labels else (subtracks, labels)
def cut_track_in_pieces(self, track, duration, sr=22050, label=None):
subtracks = []
if duration == 0:
raise Exception("Duration must be non-zero")
if duration < 0:
n_pieces = int((-1)/duration)
duration = track.shape[0]/sr/n_pieces
else:
n_pieces = int((track.shape[0]/sr)//duration)
for i in range(n_pieces):
_start, _stop = int(i*duration*sr), int((i+1)*duration*sr)
if self.normalize:
subtracks.append(normalize(track[_start:_stop]))
else:
subtracks.append(track[_start:_stop])
return (subtracks, label)
class MusicFeaturesExtractor:
def __init__(self, n_jobs=-1):
self.n_jobs = n_jobs if n_jobs>0 else os.cpu_count()
self.columns = features
def extract(self, dataset, n_jobs=-1):
###################### mono sound ##########################
n_jobs = self.n_jobs if n_jobs==-1 else n_jobs
if dataset.shape[0]==1:
return pd.DataFrame([self.__extract__(dataset[0])], columns=self.columns)
elif len(dataset[0].shape)==0:
return pd.DataFrame([self.__extract__(dataset)], columns=self.columns)
else:
with Pool(n_jobs) as p:
self.data_features = p.map(self.__extract__, dataset)#, chunksize=4)
data_features = pd.DataFrame(self.data_features, columns=self.columns)
return data_features
def extract_batch(self, data, batch_size=None):
X = None
if batch_size is None:
batch_size=max(1, data.shape[0]//100)
for start_index in range(0, data.shape[0], batch_size):
_start, _stop = start_index, start_index+batch_size
tmpX = self.extract(data[_start:_stop])
if X is None:
X = tmpX
else:
X = | pd.concat((X,tmpX), axis=0, ignore_index=True) | pandas.concat |
from smach_based_introspection_framework.offline_part.model_training import train_anomaly_classifier
from smach_based_introspection_framework._constant import (
anomaly_classification_feature_selection_folder,
)
from smach_based_introspection_framework.configurables import model_type, model_config, score_metric
from smach_based_introspection_framework.online_part.anomaly_classifier.Classifier import NormalDistributedConfidenceClassifier
import glob
import os
import pandas as pd
import pprint
import coloredlogs, logging
import sys, traceback
from sklearn.externals import joblib
from sklearn.metrics import confusion_matrix
import itertools
import matplotlib.pyplot as plt
import json
import re
import numpy as np
import copy
at_extractor = re.compile(r'anomaly_type_\((.*)\)')
def get_models_of_scheme(scheme_folder, logger):
models_grouped_by_type = {}
for model_file in glob.glob(os.path.join(scheme_folder, 'anomaly_type_(*)', 'classifier_model')):
logger.info(model_file)
anomaly_type = at_extractor.search(model_file).group(1)
logger.info(anomaly_type)
with open(model_file, 'rb') as f:
models_grouped_by_type[anomaly_type] = joblib.load(f)
return models_grouped_by_type
def run():
logger = logging.getLogger('CollectClassificationStats')
scheme_folders = glob.glob(os.path.join(
anomaly_classification_feature_selection_folder,
'classifier_models',
'No.* filtering scheme',
))
for scheme_folder in scheme_folders:
logger.info(scheme_folder)
models_grouped_by_type = get_models_of_scheme(scheme_folder, logger)
c = NormalDistributedConfidenceClassifier(models_grouped_by_type)
path_postfix = os.path.relpath(scheme_folder, os.path.join(anomaly_classification_feature_selection_folder, 'classifier_models'))
anomaly_csvs = glob.glob(os.path.join(
anomaly_classification_feature_selection_folder,
path_postfix,
'anomalies_grouped_by_type',
'anomaly_type_(*)',
'*',
'*.csv',
))
stat_df = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 26 06:04:34 2017
A set of functions to analyze autosal conductivity files/data
@author: <NAME>
"""
# break into two
#docstrings
# keyword argument in calibration default = worm
import csv
import numpy as np
import pandas as pd
import sys
import os
def SaltLoad(saltFile):
""" Converts a autosal salinometer output file to a Pandas Dataframe.
Input:
- saltFile (file), an unextended file containing the output file
from the autosal salinometer. Contains columns/values such
as STATION NUMBER, CAST NUMBER,SAMPLENUMBER, CONDUCTIVITY RATIO,
etc.
Ex. saltFile = '/data/salt/ssscc'
Output:
- saltDF (Pandas Dataframe),Dataframe with 15 Columns containing the input data with
appropriate column names for the data.
Usage:
>>> saltDF = SaltLoad(saltFile)
"""
f = open(saltFile, newline='')
saltF = csv.reader(f,delimiter=' ', quoting=csv.QUOTE_NONE, skipinitialspace='True')
saltArray = []
for row in saltF:
saltArray.append(row)
del saltArray[0]
header = ['STNNBR','CASTNO','SAMPNO','BathTEMP','CRavg','autosalSAMPNO',\
'Unknown','StartTime','EndTime','Attempts','Reading1','Reading2',\
'Reading3', 'Reading4', 'Reading5']
f.close()
# make all rows of Salt files the same length as header
for row in saltArray:
if len(row) < len(header):
row.extend([np.NaN]*(len(header)-len(row)))
saltArray = np.array(saltArray) # change to np array
saltDF = pd.DataFrame(saltArray,columns=header) # change to DataFrame
saltDF = saltDF.apply(pd.to_numeric, errors='ignore')
return saltDF
def Cr_Calibration(saltDF,StandName='worm'):
"""
Cleans up a salinity dataframe as well as applied a time-dependent correction
for the offset associated with prolonged use of the autosal
Input:
-saltDF (pandas dataframe), a Dataframe containing the data from
the output of the autosal salinometer (usually the output of the
SaltLoad function)
-StandName, the value given to the standard seawater calibration
standard used to calibrate the Conductivity Ratio values.
Default Value: worm
Ex.shown in 5th column:
Column: 0 1 2 3 4 5
Standard: 0001 01 00 24 1.99967 worm 3807 04:40:13 04:40:13 01 1.99967
Sample 1: 0001 01 01 24 2.03956 1 3808 04:44:55 04:45:38 03 2.03938
Output:
-outputDF (pandas dataframe), a corrected Dataframe containing 4 columns:
Station Number, Cast Number, Sample Number, and a new Conductivity
Ratio which has the time-dependent offset from the salinometer
removed from it.
Usage:
outputDF = Cr_Calibration(saltDF)
"""
CrStrt = saltDF['CRavg'][saltDF.autosalSAMPNO==StandName][0] #First Standard Cr Value
CrEnd = saltDF['CRavg'][saltDF.autosalSAMPNO==StandName][len(saltDF['CRavg'])-1] #Second Standard Cr Value
#calculate start and end times (endtimes are when measurements are taken)
saltDF['EndTime'] = saltDF['EndTime'].apply(pd.Timedelta)
startTime = saltDF['EndTime'][0]
saltDF['ElapsedTime_(s)'] = (saltDF['EndTime']-startTime) / np.timedelta64(1,'s')
duration = saltDF['ElapsedTime_(s)'][len(saltDF['ElapsedTime_(s)'])-1]
Offset = CrEnd-CrStrt
Offset = Offset/duration # Offset per second
#Apply Offsets to Measured Data
saltDF['CRavg_Corr'] = saltDF['CRavg']-(Offset*saltDF['ElapsedTime_(s)'])
saltDF = saltDF[(saltDF['autosalSAMPNO']!=StandName)] #remove calibration entries
#Create Export Dataframe
outputDF = pd.DataFrame()
outputDF = saltDF.loc[:,['STNNBR','CASTNO','SAMPNO','CRavg_Corr']] #copy wanted columns to new DF
return outputDF
def saltCat(saltDir):
"""
Concatenates all corrected salt dataframes in the user-specified directory
and writes dataframe to a master salt .csv/.pkl file in that directory
Inputs:
- saltDir (Directory), the directory containing the corrected dataframes
for each file that is to be concatenated.
Outputs:
- a master salt .csv/.pkl file containing all a master dataframe saved
to the input directory
Usage:
saltCat('/data/salt')
"""
fileName = 'master_salt_DF.csv'
fileList = os.listdir(path=saltDir) #Creates list of files in the salt Directory
exten = '_corr.csv' # type of file to be parsed out into dataframe
extFiles = []
for i in range(len(fileList)-1): # Parse out files that have the wanted extension
if fileList[i][-9:] == exten:
extFiles.append(fileList[i])
masterDF = pd.DataFrame()
for i in extFiles: #concatenate all Dataframes together
catFrame = | pd.read_csv(i) | pandas.read_csv |
import pytest
import os
from mapping import util
from pandas.util.testing import assert_frame_equal, assert_series_equal
import pandas as pd
from pandas import Timestamp as TS
import numpy as np
@pytest.fixture
def price_files():
cdir = os.path.dirname(__file__)
path = os.path.join(cdir, 'data/')
files = ["CME-FVU2014.csv", "CME-FVZ2014.csv"]
return [os.path.join(path, f) for f in files]
def assert_dict_of_frames(dict1, dict2):
assert dict1.keys() == dict2.keys()
for key in dict1:
assert_frame_equal(dict1[key], dict2[key])
def test_read_price_data(price_files):
# using default name_func in read_price_data()
df = util.read_price_data(price_files)
dt1 = TS("2014-09-30")
dt2 = TS("2014-10-01")
idx = pd.MultiIndex.from_tuples([(dt1, "CME-FVU2014"),
(dt1, "CME-FVZ2014"),
(dt2, "CME-FVZ2014")],
names=["date", "contract"])
df_exp = pd.DataFrame([119.27344, 118.35938, 118.35938],
index=idx, columns=["Open"])
assert_frame_equal(df, df_exp)
def name_func(fstr):
file_name = os.path.split(fstr)[-1]
name = file_name.split('-')[1].split('.')[0]
return name[-4:] + name[:3]
df = util.read_price_data(price_files, name_func)
dt1 = TS("2014-09-30")
dt2 = TS("2014-10-01")
idx = pd.MultiIndex.from_tuples([(dt1, "2014FVU"), (dt1, "2014FVZ"),
(dt2, "2014FVZ")],
names=["date", "contract"])
df_exp = pd.DataFrame([119.27344, 118.35938, 118.35938],
index=idx, columns=["Open"])
assert_frame_equal(df, df_exp)
def test_calc_rets_one_generic():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')])
rets = pd.Series([0.1, 0.05, 0.1, 0.8], index=idx)
vals = [1, 0.5, 0.5, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([0.1, 0.075, 0.8],
index=weights.index.levels[0],
columns=['CL1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL2'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, 0.15], [0.075, 0.45], [-0.5, 0.2]],
index=weights.index.levels[0],
columns=['CL1', 'CL2'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics_nans_in_second_generic():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, np.NaN, 0.05, 0.1, np.NaN, -0.5, 0.2],
index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL2'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, np.NaN], [0.075, np.NaN], [-0.5, 0.2]],
index=weights.index.levels[0],
columns=['CL1', 'CL2'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics_non_unique_columns():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL1'])
with pytest.raises(ValueError):
util.calc_rets(rets, weights)
def test_calc_rets_two_generics_two_asts():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets1 = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'COF5'),
(TS('2015-01-03'), 'COG5'),
(TS('2015-01-04'), 'COF5'),
(TS('2015-01-04'), 'COG5'),
(TS('2015-01-04'), 'COH5')])
rets2 = pd.Series([0.1, 0.15, 0.05, 0.1, 0.4], index=idx)
rets = {"CL": rets1, "CO": rets2}
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights1 = pd.DataFrame(vals, index=widx, columns=["CL0", "CL1"])
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'COF5'),
(TS('2015-01-03'), 'COG5'),
(TS('2015-01-04'), 'COF5'),
(TS('2015-01-04'), 'COG5'),
(TS('2015-01-04'), 'COH5')
])
weights2 = pd.DataFrame(vals, index=widx, columns=["CO0", "CO1"])
weights = {"CL": weights1, "CO": weights2}
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, 0.15, 0.1, 0.15],
[0.075, 0.45, 0.075, 0.25],
[-0.5, 0.2, pd.np.NaN, pd.np.NaN]],
index=weights["CL"].index.levels[0],
columns=['CL0', 'CL1', 'CO0', 'CO1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_missing_instr_rets_key_error():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5')])
irets = pd.Series([0.02, 0.01, 0.012], index=idx)
vals = [1, 1/2, 1/2, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')])
weights = pd.DataFrame(vals, index=widx, columns=["CL1"])
with pytest.raises(KeyError):
util.calc_rets(irets, weights)
def test_calc_rets_nan_instr_rets():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')])
rets = pd.Series([pd.np.NaN, pd.np.NaN, 0.1, 0.8], index=idx)
vals = [1, 0.5, 0.5, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([pd.np.NaN, pd.np.NaN, 0.8],
index=weights.index.levels[0],
columns=['CL1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_missing_weight():
# see https://github.com/matthewgilbert/mapping/issues/8
# missing weight for return
idx = pd.MultiIndex.from_tuples([
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')
])
rets = pd.Series([0.02, -0.03, 0.06], index=idx)
vals = [1, 1]
widx = pd.MultiIndex.from_tuples([
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')
])
weights = pd.DataFrame(vals, index=widx, columns=["CL1"])
with pytest.raises(ValueError):
util.calc_rets(rets, weights)
# extra instrument
idx = pd.MultiIndex.from_tuples([(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')])
weights1 = pd.DataFrame(1, index=idx, columns=["CL1"])
idx = pd.MultiIndex.from_tuples([
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-02'), 'CLH5'),
(TS('2015-01-03'), 'CLH5'), # extra day for no weight instrument
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLH5')
])
rets = pd.Series([0.02, -0.03, 0.06, 0.05, 0.01], index=idx)
with pytest.raises(ValueError):
util.calc_rets(rets, weights1)
# leading / trailing returns
idx = pd.MultiIndex.from_tuples([(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')])
weights2 = pd.DataFrame(1, index=idx, columns=["CL1"])
idx = pd.MultiIndex.from_tuples([(TS('2015-01-01'), 'CLF5'),
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-05'), 'CLF5')])
rets = pd.Series([0.02, -0.03, 0.06, 0.05], index=idx)
with pytest.raises(ValueError):
util.calc_rets(rets, weights2)
def test_to_notional_empty():
instrs = pd.Series()
prices = pd.Series()
multipliers = pd.Series()
res_exp = pd.Series()
res = util.to_notional(instrs, prices, multipliers)
assert_series_equal(res, res_exp)
def test_to_notional_same_fx():
instrs = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5, 10.2], index=['CLZ6', 'COZ6', 'GCZ6'])
multipliers = pd.Series([1, 1, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
res_exp = pd.Series([-30.20, 2 * 30.5, 10.2],
index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_notional(instrs, prices, multipliers)
assert_series_equal(res, res_exp)
def test_to_notional_extra_prices():
instrs = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
multipliers = pd.Series([1, 1, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5, 10.2, 13.1], index=['CLZ6', 'COZ6',
'GCZ6', 'extra'])
res_exp = pd.Series([-30.20, 2 * 30.5, 10.2],
index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_notional(instrs, prices, multipliers)
assert_series_equal(res, res_exp)
def test_to_notional_missing_prices():
instrs = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
multipliers = pd.Series([1, 1, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5], index=['CLZ6', 'COZ6'])
res_exp = pd.Series([-30.20, 2 * 30.5, pd.np.NaN],
index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_notional(instrs, prices, multipliers)
assert_series_equal(res, res_exp)
def test_to_notional_different_fx():
instrs = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
multipliers = pd.Series([1, 1, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5, 10.2], index=['CLZ6', 'COZ6', 'GCZ6'])
instr_fx = pd.Series(['USD', 'CAD', 'AUD'],
index=['CLZ6', 'COZ6', 'GCZ6'])
fx_rates = pd.Series([1.32, 0.8], index=['USDCAD', 'AUDUSD'])
res_exp = pd.Series([-30.20, 2 * 30.5 / 1.32, 10.2 * 0.8],
index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_notional(instrs, prices, multipliers, desired_ccy='USD',
instr_fx=instr_fx, fx_rates=fx_rates)
assert_series_equal(res, res_exp)
def test_to_notional_duplicates():
instrs = pd.Series([1, 1], index=['A', 'A'])
prices = pd.Series([200.37], index=['A'])
mults = pd.Series([100], index=['A'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults)
instrs = pd.Series([1], index=['A'])
prices = pd.Series([200.37, 200.37], index=['A', 'A'])
mults = pd.Series([100], index=['A'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults)
instrs = pd.Series([1], index=['A'])
prices = pd.Series([200.37], index=['A'])
mults = pd.Series([100, 100], index=['A', 'A'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults)
instrs = pd.Series([1], index=['A'])
prices = pd.Series([200.37], index=['A'])
mults = pd.Series([100], index=['A'])
desired_ccy = "CAD"
instr_fx = pd.Series(['USD', 'USD'], index=['A', 'A'])
fx_rate = pd.Series([1.32], index=['USDCAD'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults, desired_ccy,
instr_fx, fx_rate)
instrs = pd.Series([1], index=['A'])
prices = pd.Series([200.37], index=['A'])
mults = pd.Series([100], index=['A'])
desired_ccy = "CAD"
instr_fx = pd.Series(['USD'], index=['A'])
fx_rate = pd.Series([1.32, 1.32], index=['USDCAD', 'USDCAD'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults, desired_ccy,
instr_fx, fx_rate)
def test_to_notional_bad_fx():
instrs = pd.Series([1], index=['A'])
prices = pd.Series([200.37], index=['A'])
mults = pd.Series([100], index=['A'])
instr_fx = pd.Series(['JPY'], index=['A'])
fx_rates = pd.Series([1.32], index=['GBPCAD'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults, desired_ccy='USD',
instr_fx=instr_fx, fx_rates=fx_rates)
def test_to_contracts_rounder():
prices = pd.Series([30.20, 30.5], index=['CLZ6', 'COZ6'])
multipliers = pd.Series([1, 1], index=['CLZ6', 'COZ6'])
# 30.19 / 30.20 is slightly less than 1 so will round to 0
notional = pd.Series([30.19, 2 * 30.5], index=['CLZ6', 'COZ6'])
res = util.to_contracts(notional, prices, multipliers,
rounder=pd.np.floor)
res_exp = pd.Series([0, 2], index=['CLZ6', 'COZ6'])
assert_series_equal(res, res_exp)
def test_to_contract_different_fx_with_multiplier():
notionals = pd.Series([-30.20, 2 * 30.5 / 1.32 * 10, 10.2 * 0.8 * 100],
index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5, 10.2], index=['CLZ6', 'COZ6', 'GCZ6'])
instr_fx = pd.Series(['USD', 'CAD', 'AUD'],
index=['CLZ6', 'COZ6', 'GCZ6'])
fx_rates = pd.Series([1.32, 0.8], index=['USDCAD', 'AUDUSD'])
multipliers = pd.Series([1, 10, 100], index=['CLZ6', 'COZ6', 'GCZ6'])
res_exp = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_contracts(notionals, prices, desired_ccy='USD',
instr_fx=instr_fx, fx_rates=fx_rates,
multipliers=multipliers)
assert_series_equal(res, res_exp)
def test_to_contract_different_fx_with_multiplier_rounding():
# won't work out to integer number of contracts so this tests rounding
notionals = pd.Series([-30.21, 2 * 30.5 / 1.32 * 10, 10.2 * 0.8 * 100],
index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5, 10.2], index=['CLZ6', 'COZ6', 'GCZ6'])
instr_fx = pd.Series(['USD', 'CAD', 'AUD'],
index=['CLZ6', 'COZ6', 'GCZ6'])
fx_rates = pd.Series([1.32, 0.8], index=['USDCAD', 'AUDUSD'])
multipliers = pd.Series([1, 10, 100], index=['CLZ6', 'COZ6', 'GCZ6'])
res_exp = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_contracts(notionals, prices, desired_ccy='USD',
instr_fx=instr_fx, fx_rates=fx_rates,
multipliers=multipliers)
assert_series_equal(res, res_exp)
def test_trade_with_zero_amount():
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
desired_holdings = pd.Series([200000, 0], index=[0, 1])
current_contracts = pd.Series([0, 1, 0],
index=['CLX16', 'CLZ16', 'CLF17'])
prices = pd.Series([50.32, 50.41, 50.48],
index=['CLX16', 'CLZ16', 'CLF17'])
multiplier = pd.Series([100, 100, 100],
index=['CLX16', 'CLZ16', 'CLF17'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
# 200000 * 0.5 / (50.32*100) - 0,
# 200000 * 0.5 / (50.41*100) + 0 * 0.5 / (50.41*100) - 1,
# 0 * 0.5 / (50.48*100) - 0,
exp_trades = pd.Series([20, 19], index=['CLX16', 'CLZ16'])
assert_series_equal(trades, exp_trades)
def test_trade_all_zero_amount_return_empty():
wts = pd.DataFrame([1], index=["CLX16"], columns=[0])
desired_holdings = pd.Series([13], index=[0])
current_contracts = 0
prices = pd.Series([50.32], index=['CLX16'])
multiplier = pd.Series([100], index=['CLX16'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
exp_trades = pd.Series(dtype="int64")
assert_series_equal(trades, exp_trades)
def test_trade_one_asset():
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
desired_holdings = pd.Series([200000, -50000], index=[0, 1])
current_contracts = pd.Series([0, 1, 0],
index=['CLX16', 'CLZ16', 'CLF17'])
prices = pd.Series([50.32, 50.41, 50.48],
index=['CLX16', 'CLZ16', 'CLF17'])
multiplier = pd.Series([100, 100, 100],
index=['CLX16', 'CLZ16', 'CLF17'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
# 200000 * 0.5 / (50.32*100) - 0,
# 200000 * 0.5 / (50.41*100) - 50000 * 0.5 / (50.41*100) - 1,
# -50000 * 0.5 / (50.48*100) - 0,
exp_trades = pd.Series([20, 14, -5], index=['CLX16', 'CLZ16', 'CLF17'])
exp_trades = exp_trades.sort_index()
assert_series_equal(trades, exp_trades)
def test_trade_multi_asset():
wts1 = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=["CL0", "CL1"])
wts2 = pd.DataFrame([1], index=["COX16"], columns=["CO0"])
wts = {"CL": wts1, "CO": wts2}
desired_holdings = pd.Series([200000, -50000, 100000],
index=["CL0", "CL1", "CO0"])
current_contracts = pd.Series([0, 1, 0, 5],
index=['CLX16', 'CLZ16', 'CLF17',
'COX16'])
prices = pd.Series([50.32, 50.41, 50.48, 49.50],
index=['CLX16', 'CLZ16', 'CLF17', 'COX16'])
multiplier = pd.Series([100, 100, 100, 100],
index=['CLX16', 'CLZ16', 'CLF17', 'COX16'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
# 200000 * 0.5 / (50.32*100) - 0,
# 200000 * 0.5 / (50.41*100) - 50000 * 0.5 / (50.41*100) - 1,
# -50000 * 0.5 / (50.48*100) - 0,
# 100000 * 1 / (49.50*100) - 5,
exp_trades = pd.Series([20, 14, -5, 15], index=['CLX16', 'CLZ16',
'CLF17', 'COX16'])
exp_trades = exp_trades.sort_index()
assert_series_equal(trades, exp_trades)
def test_trade_extra_desired_holdings_without_weights():
wts = pd.DataFrame([0], index=["CLX16"], columns=["CL0"])
desired_holdings = pd.Series([200000, 10000], index=["CL0", "CL1"])
current_contracts = pd.Series([0], index=['CLX16'])
prices = pd.Series([50.32], index=['CLX16'])
multipliers = pd.Series([1], index=['CLX16'])
with pytest.raises(ValueError):
util.calc_trades(current_contracts, desired_holdings, wts, prices,
multipliers)
def test_trade_extra_desired_holdings_without_current_contracts():
# this should treat the missing holdings as 0, since this would often
# happen when adding new positions without any current holdings
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
desired_holdings = pd.Series([200000, -50000], index=[0, 1])
current_contracts = pd.Series([0, 1],
index=['CLX16', 'CLZ16'])
prices = pd.Series([50.32, 50.41, 50.48],
index=['CLX16', 'CLZ16', 'CLF17'])
multiplier = pd.Series([100, 100, 100],
index=['CLX16', 'CLZ16', 'CLF17'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
# 200000 * 0.5 / (50.32*100) - 0,
# 200000 * 0.5 / (50.41*100) - 50000 * 0.5 / (50.41*100) - 1,
# -50000 * 0.5 / (50.48*100) - 0,
exp_trades = pd.Series([20, 14, -5], index=['CLX16', 'CLZ16', 'CLF17'])
exp_trades = exp_trades.sort_index()
# non existent contract holdings result in fill value being a float,
# which casts to float64
assert_series_equal(trades, exp_trades, check_dtype=False)
def test_trade_extra_weights():
# extra weights should be ignored
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
desired_holdings = pd.Series([200000], index=[0])
current_contracts = pd.Series([0, 2], index=['CLX16', 'CLZ16'])
prices = pd.Series([50.32, 50.41], index=['CLX16', 'CLZ16'])
multiplier = pd.Series([100, 100], index=['CLX16', 'CLZ16'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
# 200000 * 0.5 / (50.32*100) - 0,
# 200000 * 0.5 / (50.41*100) - 2,
exp_trades = pd.Series([20, 18], index=['CLX16', 'CLZ16'])
assert_series_equal(trades, exp_trades)
def test_get_multiplier_dataframe_weights():
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
ast_mult = pd.Series([1000], index=["CL"])
imults = util.get_multiplier(wts, ast_mult)
imults_exp = pd.Series([1000, 1000, 1000],
index=["CLF17", "CLX16", "CLZ16"])
assert_series_equal(imults, imults_exp)
def test_get_multiplier_dict_weights():
wts1 = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
wts2 = pd.DataFrame([0.5, 0.5], index=["COX16", "COZ16"], columns=[0])
wts = {"CL": wts1, "CO": wts2}
ast_mult = pd.Series([1000, 1000], index=["CL", "CO"])
imults = util.get_multiplier(wts, ast_mult)
imults_exp = pd.Series([1000, 1000, 1000, 1000, 1000],
index=["CLF17", "CLX16", "CLZ16", "COX16",
"COZ16"])
assert_series_equal(imults, imults_exp)
def test_get_multiplier_dataframe_weights_multiplier_asts_error():
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
ast_mult = pd.Series([1000, 1000], index=["CL", "CO"])
with pytest.raises(ValueError):
util.get_multiplier(wts, ast_mult)
def test_weighted_expiration_two_generics():
vals = [[1, 0, 1/2, 1/2, 0, 1, 0], [0, 1, 0, 1/2, 1/2, 0, 1]]
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF15'),
(TS('2015-01-03'), 'CLG15'),
(TS('2015-01-04'), 'CLF15'),
(TS('2015-01-04'), 'CLG15'),
(TS('2015-01-04'), 'CLH15'),
(TS('2015-01-05'), 'CLG15'),
(TS('2015-01-05'), 'CLH15')])
weights = pd.DataFrame({"CL1": vals[0], "CL2": vals[1]}, index=idx)
contract_dates = pd.Series([TS('2015-01-20'),
TS('2015-02-21'),
TS('2015-03-20')],
index=['CLF15', 'CLG15', 'CLH15'])
wexp = util.weighted_expiration(weights, contract_dates)
exp_wexp = pd.DataFrame([[17.0, 49.0], [32.0, 61.5], [47.0, 74.0]],
index=[TS('2015-01-03'),
TS('2015-01-04'),
TS('2015-01-05')],
columns=["CL1", "CL2"])
assert_frame_equal(wexp, exp_wexp)
def test_flatten():
vals = [[1, 0], [0, 1], [1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5')])
weights = pd.DataFrame(vals, index=widx, columns=["CL1", "CL2"])
flat_wts = util.flatten(weights)
flat_wts_exp = pd.DataFrame(
{"date": [TS('2015-01-03')] * 4 + [TS('2015-01-04')] * 4,
"contract": ['CLF5'] * 2 + ['CLG5'] * 4 + ['CLH5'] * 2,
"generic": ["CL1", "CL2"] * 4,
"weight": [1, 0, 0, 1, 1, 0, 0, 1]}
).loc[:, ["date", "contract", "generic", "weight"]]
assert_frame_equal(flat_wts, flat_wts_exp)
def test_flatten_dict():
vals = [[1, 0], [0, 1], [1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5')])
weights1 = pd.DataFrame(vals, index=widx, columns=["CL1", "CL2"])
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'COF5')])
weights2 = pd.DataFrame(1, index=widx, columns=["CO1"])
weights = {"CL": weights1, "CO": weights2}
flat_wts = util.flatten(weights)
flat_wts_exp = pd.DataFrame(
{"date": ([TS('2015-01-03')] * 4 + [TS('2015-01-04')] * 4
+ [TS('2015-01-03')]),
"contract": (['CLF5'] * 2 + ['CLG5'] * 4 + ['CLH5'] * 2
+ ["COF5"]),
"generic": ["CL1", "CL2"] * 4 + ["CO1"],
"weight": [1, 0, 0, 1, 1, 0, 0, 1, 1],
"key": ["CL"] * 8 + ["CO"]}
).loc[:, ["date", "contract", "generic", "weight", "key"]]
assert_frame_equal(flat_wts, flat_wts_exp)
def test_flatten_bad_input():
dummy = 0
with pytest.raises(ValueError):
util.flatten(dummy)
def test_unflatten():
flat_wts = pd.DataFrame(
{"date": [TS('2015-01-03')] * 4 + [TS('2015-01-04')] * 4,
"contract": ['CLF5'] * 2 + ['CLG5'] * 4 + ['CLH5'] * 2,
"generic": ["CL1", "CL2"] * 4,
"weight": [1, 0, 0, 1, 1, 0, 0, 1]}
).loc[:, ["date", "contract", "generic", "weight"]]
wts = util.unflatten(flat_wts)
vals = [[1, 0], [0, 1], [1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5')],
names=("date", "contract"))
cols = pd.Index(["CL1", "CL2"], name="generic")
wts_exp = pd.DataFrame(vals, index=widx, columns=cols)
assert_frame_equal(wts, wts_exp)
def test_unflatten_dict():
flat_wts = pd.DataFrame(
{"date": ([TS('2015-01-03')] * 4 + [TS('2015-01-04')] * 4
+ [TS('2015-01-03')]),
"contract": (['CLF5'] * 2 + ['CLG5'] * 4 + ['CLH5'] * 2
+ ["COF5"]),
"generic": ["CL1", "CL2"] * 4 + ["CO1"],
"weight": [1, 0, 0, 1, 1, 0, 0, 1, 1],
"key": ["CL"] * 8 + ["CO"]}
).loc[:, ["date", "contract", "generic", "weight", "key"]]
wts = util.unflatten(flat_wts)
vals = [[1, 0], [0, 1], [1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5')],
names=("date", "contract"))
cols = pd.Index(["CL1", "CL2"], name="generic")
weights1 = pd.DataFrame(vals, index=widx, columns=cols)
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'COF5')],
names=("date", "contract"))
cols = pd.Index(["CO1"], name="generic")
weights2 = pd.DataFrame(1, index=widx, columns=cols)
wts_exp = {"CL": weights1, "CO": weights2}
assert_dict_of_frames(wts, wts_exp)
def test_reindex():
# related to https://github.com/matthewgilbert/mapping/issues/11
# no op
idx = pd.MultiIndex.from_tuples([(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-02'), 'CLH5'),
(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLH5')])
prices = pd.Series([103, 101, 102, 100], index=idx)
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLH5')])
new_prices = util.reindex(prices, widx, limit=0)
exp_prices = prices
assert_series_equal(exp_prices, new_prices)
# missing front prices error
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5')])
prices = pd.Series([100], index=idx)
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5')])
with pytest.raises(ValueError):
util.reindex(prices, widx, 0)
# NaN returns introduced and filled
idx = pd.MultiIndex.from_tuples([(TS('2015-01-01'), 'CLF5'),
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-02'), 'CLH5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLH5')])
prices = pd.Series([100, 101, 102, 103, 104], index=idx)
widx = pd.MultiIndex.from_tuples([(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-02'), 'CLH5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLF5'),
(TS('2015-01-05'), 'CLH5'),
(TS('2015-01-06'), 'CLF5'),
(TS('2015-01-06'), 'CLH5')])
new_prices = util.reindex(prices, widx, limit=1)
idx = pd.MultiIndex.from_tuples([(TS('2015-01-01'), 'CLF5'),
(TS('2015-01-01'), 'CLH5'),
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-02'), 'CLH5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLF5'),
(TS('2015-01-05'), 'CLH5'),
(TS('2015-01-06'), 'CLF5'),
(TS('2015-01-06'), 'CLH5')
])
exp_prices = pd.Series([100, np.NaN, 101, 102, 103, 104, 103,
104, np.NaN, np.NaN], index=idx)
assert_series_equal(exp_prices, new_prices)
# standard subset
idx = pd.MultiIndex.from_tuples([(TS('2015-01-01'), 'CLF5'),
(TS('2015-01-01'), 'CHF5'),
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-02'), 'CLH5'),
(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLH5'),
( | TS('2015-01-04') | pandas.Timestamp |
# -*- coding: utf-8 -*-
"""
Pipeline-GUI for Analysis with MNE-Python
@author: <NAME>
@email: <EMAIL>
@github: https://github.com/marsipu/mne_pipeline_hd
License: BSD (3-clause)
Written on top of MNE-Python
Copyright © 2011-2020, authors of MNE-Python (https://doi.org/10.3389/fnins.2013.00267)
inspired by <NAME>. (2018) (https://doi.org/10.3389/fnins.2018.00006)
"""
import inspect
import os
import shutil
from ast import literal_eval
from functools import partial
from importlib import util
from os import mkdir
from os.path import isdir, isfile, join
from pathlib import Path
from types import FunctionType
import pandas as pd
from PyQt5.QtCore import QSize, Qt
from PyQt5.QtGui import QFont
from PyQt5.QtWidgets import (QButtonGroup, QComboBox, QDialog, QFileDialog, QFormLayout, QGroupBox,
QHBoxLayout, QLabel, QLineEdit, QListView, QListWidget, QListWidgetItem,
QMessageBox, QPushButton, QSizePolicy, QStyle, QTabWidget, QVBoxLayout, QGridLayout,
QProgressBar, QCheckBox)
from mne_pipeline_hd import QS
from mne_pipeline_hd.gui import parameter_widgets
from mne_pipeline_hd.gui.base_widgets import CheckDictList, CheckList, EditDict, EditList, SimpleDialog, SimpleList
from mne_pipeline_hd.gui.gui_utils import CodeEditor, ErrorDialog, center, get_exception_tuple, set_ratio_geometry, \
get_std_icon, MainConsoleWidget
from mne_pipeline_hd.gui.models import CustomFunctionModel, RunModel
from mne_pipeline_hd.pipeline_functions.function_utils import QRunController
class RunDialog(QDialog):
def __init__(self, main_win):
super().__init__(main_win)
self.mw = main_win
self.init_controller()
self.init_ui()
set_ratio_geometry(0.6, self)
self.show()
self.start()
def init_controller(self):
self.rc = QRunController(run_dialog=self, controller=self.mw.ct,
pool=self.mw.mp_pool)
def init_ui(self):
layout = QVBoxLayout()
view_layout = QGridLayout()
view_layout.addWidget(QLabel('Objects: '), 0, 0)
self.object_view = QListView()
self.object_model = RunModel(self.rc.all_objects, mode='object')
self.object_view.setModel(self.object_model)
self.object_view.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Maximum)
view_layout.addWidget(self.object_view, 1, 0)
view_layout.addWidget(QLabel('Functions: '), 0, 1)
self.func_view = QListView()
self.func_model = RunModel(self.rc.current_all_funcs, mode='func')
self.func_view.setModel(self.func_model)
self.func_view.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Maximum)
view_layout.addWidget(self.func_view, 1, 1)
view_layout.addWidget(QLabel('Errors: '), 0, 2)
self.error_widget = SimpleList(list())
self.error_widget.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Maximum)
# Connect Signal from error_widget to function to enable inspecting the errors
self.error_widget.currentChanged.connect(self.show_error)
view_layout.addWidget(self.error_widget, 1, 2)
layout.addLayout(view_layout)
self.console_widget = MainConsoleWidget()
layout.addWidget(self.console_widget)
self.pgbar = QProgressBar()
self.pgbar.setValue(0)
self.pgbar.setMaximum(len(self.rc.all_steps))
layout.addWidget(self.pgbar)
bt_layout = QHBoxLayout()
self.continue_bt = QPushButton('Continue')
self.continue_bt.setFont(QFont('AnyStyle', 14))
self.continue_bt.setIcon(get_std_icon('SP_MediaPlay'))
self.continue_bt.clicked.connect(self.start)
bt_layout.addWidget(self.continue_bt)
self.pause_bt = QPushButton('Pause')
self.pause_bt.setFont(QFont('AnyStyle', 14))
self.pause_bt.setIcon(get_std_icon('SP_MediaPause'))
self.pause_bt.clicked.connect(self.pause_funcs)
bt_layout.addWidget(self.pause_bt)
self.restart_bt = QPushButton('Restart')
self.restart_bt.setFont(QFont('AnyStyle', 14))
self.restart_bt.setIcon(get_std_icon('SP_BrowserReload'))
self.restart_bt.clicked.connect(self.restart)
bt_layout.addWidget(self.restart_bt)
if QS().value('use_qthread'):
self.reload_chbx = None
else:
self.reload_chbx = QCheckBox('Reload Modules')
bt_layout.addWidget(self.reload_chbx)
self.autoscroll_bt = QPushButton('Auto-Scroll')
self.autoscroll_bt.setCheckable(True)
self.autoscroll_bt.setChecked(True)
self.autoscroll_bt.setIcon(get_std_icon('SP_DialogOkButton'))
self.autoscroll_bt.clicked.connect(self.toggle_autoscroll)
bt_layout.addWidget(self.autoscroll_bt)
self.close_bt = QPushButton('Close')
self.close_bt.setFont(QFont('AnyStyle', 14))
self.close_bt.setIcon(get_std_icon('SP_MediaStop'))
self.close_bt.clicked.connect(self.close)
bt_layout.addWidget(self.close_bt)
layout.addLayout(bt_layout)
self.setLayout(layout)
def start(self):
# Set paused to false
self.rc.paused = False
# Enable/Disable Buttons
self.continue_bt.setEnabled(False)
self.pause_bt.setEnabled(True)
self.restart_bt.setEnabled(False)
self.close_bt.setEnabled(False)
self.rc.start()
def pause_funcs(self):
self.rc.paused = True
self.console_widget.write_html('<br><b>Finishing last function...</b><br>')
def restart(self):
# Reinitialize controller
self.init_controller()
if self.reload_chbx and self.reload_chbx.isChecked():
self.mw.init_mp_pool()
# Clear Console-Widget
self.console_widget.clear()
# Redo References to display-widgets
self.object_model._data = self.rc.all_objects
self.object_model.layoutChanged.emit()
self.func_model._data = self.rc.current_all_funcs
self.func_model.layoutChanged.emit()
self.error_widget.replace_data(list(self.rc.errors.keys()))
# Reset Progress-Bar
self.pgbar.setValue(0)
# Restart
self.start()
def toggle_autoscroll(self, state):
if state:
self.console_widget.set_autoscroll(True)
else:
self.console_widget.set_autoscroll(False)
def show_error(self, current, _):
self.console_widget.set_autoscroll(False)
self.autoscroll_bt.setChecked(False)
self.console_widget.scrollToAnchor(str(self.rc.errors[current][1]))
def closeEvent(self, event):
self.mw.pipeline_running = False
event.accept()
class EditGuiArgsDlg(QDialog):
def __init__(self, cf_dialog):
super().__init__(cf_dialog)
self.cf = cf_dialog
self.gui_args = dict()
self.default_gui_args = dict()
if self.cf.current_parameter:
covered_params = ['data', 'param_name', 'param_alias', 'default', 'param_unit', 'description']
# Get possible default GUI-Args additional to those covered by the Main-GUI
gui_type = self.cf.add_pd_params.loc[self.cf.current_parameter, 'gui_type']
if pd.notna(gui_type):
gui_handle = getattr(parameter_widgets, gui_type)
psig = inspect.signature(gui_handle).parameters
self.default_gui_args = {p: psig[p].default for p in psig if p not in covered_params}
# Get current GUI-Args
loaded_gui_args = self.cf.add_pd_params.loc[self.cf.current_parameter, 'gui_args']
if pd.notna(loaded_gui_args):
self.gui_args = literal_eval(loaded_gui_args)
else:
self.gui_args = dict()
# Fill in all possible Options, which are not already changed
for arg_key in [ak for ak in self.default_gui_args if ak not in self.gui_args]:
self.gui_args[arg_key] = self.default_gui_args[arg_key]
if len(self.gui_args) > 0:
self.init_ui()
self.open()
def init_ui(self):
layout = QVBoxLayout()
layout.addWidget(EditDict(data=self.gui_args, ui_buttons=False))
close_bt = QPushButton('Close')
close_bt.clicked.connect(self.close)
layout.addWidget(close_bt)
self.setLayout(layout)
def closeEvent(self, event):
# Remove all options which don't differ from the default
for arg_key in [ak for ak in self.gui_args if self.gui_args[ak] == self.default_gui_args[ak]]:
self.gui_args.pop(arg_key)
if len(self.gui_args) > 0:
self.cf.pguiargs_changed(self.gui_args)
event.accept()
class ChooseOptions(QDialog):
def __init__(self, cf_dialog, gui_type, options):
super().__init__(cf_dialog)
self.cf = cf_dialog
self.gui_type = gui_type
self.options = options
self.init_ui()
# If open(), execution doesn't stop after the dialog
self.exec()
def init_ui(self):
layout = QVBoxLayout()
layout.addWidget(QLabel(f'For {self.gui_type}, you need to specify the options to choose from'))
layout.addWidget(EditList(data=self.options))
close_bt = QPushButton('Close')
close_bt.clicked.connect(self.close)
layout.addWidget(close_bt)
self.setLayout(layout)
# ToDo:
# Bug1: After saving a new function, the parameters stay in the table-view,
# Bug2: When editing existing functions, the proprietary parameters can not be edited (they land in exising_params)
# Bug3: When hitting Enter, the focus still lies on the AddFunc/EditFunc-Buttons which can disrupt setup
class CustomFunctionImport(QDialog):
def __init__(self, main_win):
super().__init__(main_win)
self.mw = main_win
self.ct = main_win.ct
self.file_path = None
self.pkg_name = None
self.current_function = None
self.current_parameter = None
self.oblig_func = ['target', 'tab', 'group', 'matplotlib', 'mayavi']
self.oblig_params = ['default', 'gui_type']
self.exst_functions = list(self.ct.pd_funcs.index)
self.exst_parameters = ['mw', 'pr', 'meeg', 'fsmri', 'group']
self.exst_parameters += list(self.ct.settings.keys())
self.exst_parameters += list(QS().childKeys())
self.exst_parameters += list(self.ct.pr.parameters[self.ct.pr.p_preset].keys())
self.param_exst_dict = dict()
self.code_editor = None
self.code_dict = dict()
# Get available parameter-guis
self.available_param_guis = [pg for pg in dir(parameter_widgets) if 'Gui' in pg and pg != 'QtGui']
self.add_pd_funcs = pd.DataFrame(columns=['alias', 'target', 'tab', 'group', 'matplotlib',
'mayavi', 'dependencies', 'module', 'func_args', 'ready'])
self.add_pd_params = pd.DataFrame(columns=['alias', 'group', 'default', 'unit', 'description', 'gui_type',
'gui_args', 'functions', 'ready'])
self.yes_icon = get_std_icon('SP_DialogApplyButton')
self.no_icon = get_std_icon('SP_DialogCancelButton')
self.setWindowTitle('Custom-Functions-Setup')
self.init_ui()
self.open()
def init_ui(self):
layout = QVBoxLayout()
# Import Button and Combobox
add_bt_layout = QHBoxLayout()
addfn_bt = QPushButton('Load Function/s')
addfn_bt.setFont(QFont(QS().value('app_font'), 12))
addfn_bt.clicked.connect(self.get_functions)
add_bt_layout.addWidget(addfn_bt)
editfn_bt = QPushButton('Edit Function/s')
editfn_bt.setFont(QFont(QS().value('app_font'), 12))
editfn_bt.clicked.connect(self.edit_functions)
add_bt_layout.addWidget(editfn_bt)
layout.addLayout(add_bt_layout)
# Function-ComboBox
func_cmbx_layout = QHBoxLayout()
self.func_cmbx = QComboBox()
self.func_cmbx.currentTextChanged.connect(self.func_item_selected)
func_cmbx_layout.addWidget(self.func_cmbx)
self.func_chkl = QLabel()
self.func_chkl.setPixmap(self.no_icon.pixmap(16, 16))
self.func_chkl.setSizePolicy(QSizePolicy.Maximum, QSizePolicy.Maximum)
func_cmbx_layout.addWidget(self.func_chkl)
layout.addLayout(func_cmbx_layout)
# Hint for obligatory items
# There may be a better way to center the labels instead of with the space-labels
obl_hint_layout = QHBoxLayout()
space_label1 = QLabel('')
obl_hint_layout.addWidget(space_label1)
obl_hint_label1 = QLabel()
obl_hint_label1.setPixmap(self.no_icon.pixmap(16, 16))
obl_hint_label1.setSizePolicy(QSizePolicy.Maximum, QSizePolicy.Maximum)
obl_hint_layout.addWidget(obl_hint_label1)
obl_hint_label2 = QLabel()
obl_hint_label2.setPixmap(get_std_icon('SP_ArrowForward').pixmap(16, 16))
obl_hint_label2.setSizePolicy(QSizePolicy.Maximum, QSizePolicy.Maximum)
obl_hint_layout.addWidget(obl_hint_label2)
obl_hint_label3 = QLabel()
obl_hint_label3.setPixmap(self.yes_icon.pixmap(16, 16))
obl_hint_label3.setSizePolicy(QSizePolicy.Maximum, QSizePolicy.Maximum)
obl_hint_layout.addWidget(obl_hint_label3)
obl_hint_label4 = QLabel('(= The items marked are obligatory)')
obl_hint_label4.setSizePolicy(QSizePolicy.Maximum, QSizePolicy.Maximum)
obl_hint_layout.addWidget(obl_hint_label4)
space_label2 = QLabel('')
obl_hint_layout.addWidget(space_label2)
layout.addLayout(obl_hint_layout)
setup_layout = QHBoxLayout()
# The Function-Setup-Groupbox
func_setup_gbox = QGroupBox('Function-Setup')
func_setup_gbox.setAlignment(Qt.AlignHCenter)
func_setup_formlayout = QFormLayout()
self.falias_le = QLineEdit()
self.falias_le.setToolTip('Set a name if you want something other than the functions-name')
self.falias_le.textEdited.connect(self.falias_changed)
func_setup_formlayout.addRow('Alias', self.falias_le)
target_layout = QHBoxLayout()
self.target_cmbx = QComboBox()
self.target_cmbx.setToolTip('Set the target on which the function shall operate')
self.target_cmbx.setEditable(False)
self.target_cmbx.activated.connect(self.target_cmbx_changed)
target_layout.addWidget(self.target_cmbx)
self.target_chkl = QLabel()
target_layout.addWidget(self.target_chkl)
func_setup_formlayout.addRow('Target', target_layout)
tab_layout = QHBoxLayout()
self.tab_cmbx = QComboBox()
self.tab_cmbx.setToolTip('Choose the Tab for the function (Compute/Plot/...)')
self.tab_cmbx.setEditable(True)
self.tab_cmbx.activated.connect(self.tab_cmbx_changed)
self.tab_cmbx.editTextChanged.connect(self.tab_cmbx_edited)
tab_layout.addWidget(self.tab_cmbx)
self.tab_chkl = QLabel()
tab_layout.addWidget(self.tab_chkl)
func_setup_formlayout.addRow('Tab', tab_layout)
group_layout = QHBoxLayout()
self.group_cmbx = QComboBox()
self.group_cmbx.setToolTip('Choose the function-group for the function or create a new one')
self.group_cmbx.setEditable(True)
self.group_cmbx.activated.connect(self.group_cmbx_changed)
self.group_cmbx.editTextChanged.connect(self.group_cmbx_edited)
group_layout.addWidget(self.group_cmbx)
self.group_chkl = QLabel()
group_layout.addWidget(self.group_chkl)
func_setup_formlayout.addRow('Group', group_layout)
mtpl_layout = QHBoxLayout()
self.mtpl_bts = QButtonGroup(self)
self.mtpl_yesbt = QPushButton('Yes')
self.mtpl_yesbt.setCheckable(True)
self.mtpl_nobt = QPushButton('No')
self.mtpl_nobt.setCheckable(True)
self.mtpl_void = QPushButton('')
self.mtpl_void.setCheckable(True)
self.mtpl_bts.addButton(self.mtpl_yesbt)
self.mtpl_bts.addButton(self.mtpl_nobt)
self.mtpl_bts.addButton(self.mtpl_void)
mtpl_layout.addWidget(self.mtpl_yesbt)
mtpl_layout.addWidget(self.mtpl_nobt)
self.mtpl_yesbt.setToolTip('Choose, if the function contains an interactive Matplotlib-Plot')
self.mtpl_nobt.setToolTip('Choose, if the function contains no interactive Matplotlib-Plot')
self.mtpl_bts.buttonToggled.connect(self.mtpl_changed)
self.mtpl_chkl = QLabel()
mtpl_layout.addWidget(self.mtpl_chkl)
func_setup_formlayout.addRow('Matplotlib?', mtpl_layout)
myv_layout = QHBoxLayout()
self.myv_bts = QButtonGroup(self)
self.myv_yesbt = QPushButton('Yes')
self.myv_yesbt.setCheckable(True)
self.myv_nobt = QPushButton('No')
self.myv_nobt.setCheckable(True)
self.myv_void = QPushButton('')
self.myv_void.setCheckable(True)
self.myv_bts.addButton(self.myv_yesbt)
self.myv_bts.addButton(self.myv_nobt)
self.myv_bts.addButton(self.myv_void)
myv_layout.addWidget(self.myv_yesbt)
myv_layout.addWidget(self.myv_nobt)
self.myv_yesbt.setToolTip('Choose, if the function contains a Pyvista/Mayavi-Plot')
self.myv_nobt.setToolTip('Choose, if the function contains a Pyvista/Mayavi-Plot')
self.myv_bts.buttonToggled.connect(self.myv_changed)
self.myv_chkl = QLabel()
myv_layout.addWidget(self.myv_chkl)
func_setup_formlayout.addRow('Pyvista/Mayavi?', myv_layout)
self.dpd_bt = QPushButton('Set Dependencies')
self.dpd_bt.setToolTip('Set the functions that must be activated before or the files that must be present '
'for this function to work')
self.dpd_bt.clicked.connect(partial(SelectDependencies, self))
func_setup_formlayout.addRow('Dependencies', self.dpd_bt)
func_setup_gbox.setLayout(func_setup_formlayout)
setup_layout.addWidget(func_setup_gbox)
# The Parameter-Setup-Group-Box
self.param_setup_gbox = QGroupBox('Parameter-Setup')
self.param_setup_gbox.setAlignment(Qt.AlignHCenter)
param_setup_layout = QVBoxLayout()
self.exstparam_l = QLabel()
self.exstparam_l.setWordWrap(True)
self.exstparam_l.hide()
param_setup_layout.addWidget(self.exstparam_l)
self.param_view = QListView()
self.param_model = CustomFunctionModel(self.add_pd_params)
self.param_view.setModel(self.param_model)
self.param_view.selectionModel().currentChanged.connect(self.param_item_selected)
param_setup_layout.addWidget(self.param_view)
param_setup_formlayout = QFormLayout()
self.palias_le = QLineEdit()
self.palias_le.setToolTip('Set a name if you want something other than the parameters-name')
self.palias_le.textEdited.connect(self.palias_changed)
param_setup_formlayout.addRow('Alias', self.palias_le)
default_layout = QHBoxLayout()
self.default_le = QLineEdit()
self.default_le.setToolTip('Set the default for the parameter (it has to fit the gui-type!)')
self.default_le.textEdited.connect(self.pdefault_changed)
default_layout.addWidget(self.default_le)
self.default_chkl = QLabel()
default_layout.addWidget(self.default_chkl)
param_setup_formlayout.addRow('Default', default_layout)
self.unit_le = QLineEdit()
self.unit_le.setToolTip('Set the unit for the parameter (optional)')
self.unit_le.textEdited.connect(self.punit_changed)
param_setup_formlayout.addRow('Unit', self.unit_le)
self.description_le = QLineEdit()
self.description_le.setToolTip('Short description of the parameter (optional)')
self.description_le.textEdited.connect(self.pdescription_changed)
param_setup_formlayout.addRow('Description', self.description_le)
guitype_layout = QHBoxLayout()
self.guitype_cmbx = QComboBox()
self.guitype_cmbx.setToolTip('Choose the GUI from the available GUIs')
self.guitype_cmbx.activated.connect(self.guitype_cmbx_changed)
guitype_layout.addWidget(self.guitype_cmbx)
test_bt = QPushButton('Test')
test_bt.clicked.connect(self.show_param_gui)
guitype_layout.addWidget(test_bt)
self.guitype_chkl = QLabel()
guitype_layout.addWidget(self.guitype_chkl)
param_setup_formlayout.addRow('GUI-Type', guitype_layout)
self.guiargs_bt = QPushButton('Edit')
self.guiargs_bt.clicked.connect(partial(EditGuiArgsDlg, self))
self.guiargs_bt.setToolTip('Set Arguments for the GUI in a dict (optional)')
param_setup_formlayout.addRow('Additional Options', self.guiargs_bt)
param_setup_layout.addLayout(param_setup_formlayout)
self.param_setup_gbox.setLayout(param_setup_layout)
setup_layout.addWidget(self.param_setup_gbox)
layout.addLayout(setup_layout)
bt_layout = QHBoxLayout()
save_bt = QPushButton('Save')
save_bt.setFont(QFont(QS().value('app_font'), 16))
save_bt.clicked.connect(self.save_pkg)
bt_layout.addWidget(save_bt)
src_bt = QPushButton('Show Code')
src_bt.setFont(QFont(QS().value('app_font'), 16))
src_bt.clicked.connect(self.show_code)
bt_layout.addWidget(src_bt)
close_bt = QPushButton('Quit')
close_bt.setFont(QFont(QS().value('app_font'), 16))
close_bt.clicked.connect(self.close)
bt_layout.addWidget(close_bt)
layout.addLayout(bt_layout)
self.setLayout(layout)
self.populate_target_cmbx()
self.populate_tab_cmbx()
self.populate_group_cmbx()
self.populate_guitype_cmbx()
def update_func_cmbx(self):
self.func_cmbx.clear()
self.func_cmbx.insertItems(0, self.add_pd_funcs.index)
try:
current_index = list(self.add_pd_funcs.index).index(self.current_function)
except ValueError:
current_index = 0
self.func_cmbx.setCurrentIndex(current_index)
def clear_func_items(self):
self.falias_le.clear()
self.target_cmbx.setCurrentIndex(-1)
self.target_chkl.setPixmap(self.no_icon.pixmap(QSize(16, 16)))
self.tab_cmbx.setCurrentIndex(-1)
self.tab_chkl.setPixmap(self.no_icon.pixmap(QSize(16, 16)))
self.group_cmbx.setCurrentIndex(-1)
self.group_chkl.setPixmap(self.no_icon.pixmap(QSize(16, 16)))
self.mtpl_yesbt.setChecked(False)
self.mtpl_nobt.setChecked(False)
self.mtpl_chkl.setPixmap(self.no_icon.pixmap(QSize(16, 16)))
self.myv_nobt.setChecked(False)
self.myv_nobt.setChecked(False)
self.myv_chkl.setPixmap(self.no_icon.pixmap(QSize(16, 16)))
def clear_param_items(self):
self.update_param_view()
self.palias_le.clear()
self.default_le.clear()
self.default_chkl.setPixmap(self.no_icon.pixmap(QSize(16, 16)))
self.unit_le.clear()
self.guitype_cmbx.setCurrentIndex(-1)
self.guitype_chkl.setPixmap(self.yes_icon.pixmap(QSize(16, 16)))
self.param_setup_gbox.setEnabled(False)
def func_item_selected(self, text):
if text:
self.current_function = text
self.update_code_editor()
self.update_func_setup()
if any([self.current_function in str(x) for x in self.add_pd_params['functions']]):
self.param_setup_gbox.setEnabled(True)
self.update_param_view()
self.current_parameter = \
self.add_pd_params.loc[
[self.current_function in str(x) for x in self.add_pd_params['functions']]].index[0]
self.update_exst_param_label()
self.update_param_setup()
else:
self.update_exst_param_label()
# Clear existing entries
self.clear_param_items()
def param_item_selected(self, current):
self.current_parameter = self.param_model.getData(current)
self.update_param_setup()
self.update_code_editor()
def update_func_setup(self):
if pd.notna(self.add_pd_funcs.loc[self.current_function, 'alias']):
self.falias_le.setText(self.add_pd_funcs.loc[self.current_function, 'alias'])
else:
self.falias_le.clear()
if pd.notna(self.add_pd_funcs.loc[self.current_function, 'target']):
self.target_cmbx.setCurrentText(self.add_pd_funcs.loc[self.current_function, 'target'])
self.target_chkl.setPixmap(self.yes_icon.pixmap(QSize(16, 16)))
else:
self.target_cmbx.setCurrentIndex(-1)
self.target_chkl.setPixmap(self.no_icon.pixmap(QSize(16, 16)))
if pd.notna(self.add_pd_funcs.loc[self.current_function, 'tab']):
self.tab_cmbx.setCurrentText(self.add_pd_funcs.loc[self.current_function, 'tab'])
self.tab_chkl.setPixmap(self.yes_icon.pixmap(QSize(16, 16)))
else:
self.tab_cmbx.setCurrentIndex(-1)
self.tab_chkl.setPixmap(self.no_icon.pixmap(QSize(16, 16)))
if pd.notna(self.add_pd_funcs.loc[self.current_function, 'group']):
self.group_cmbx.setCurrentText(self.add_pd_funcs.loc[self.current_function, 'group'])
self.group_chkl.setPixmap(self.yes_icon.pixmap(QSize(16, 16)))
else:
self.group_cmbx.setCurrentIndex(-1)
self.group_chkl.setPixmap(self.no_icon.pixmap(QSize(16, 16)))
if pd.notna(self.add_pd_funcs.loc[self.current_function, 'matplotlib']):
if self.add_pd_funcs.loc[self.current_function, 'matplotlib']:
self.mtpl_yesbt.setChecked(True)
else:
self.mtpl_nobt.setChecked(True)
self.mtpl_chkl.setPixmap(self.yes_icon.pixmap(QSize(16, 16)))
else:
self.mtpl_void.setChecked(True)
self.mtpl_chkl.setPixmap(self.no_icon.pixmap(QSize(16, 16)))
if pd.notna(self.add_pd_funcs.loc[self.current_function, 'mayavi']):
if self.add_pd_funcs.loc[self.current_function, 'mayavi']:
self.myv_yesbt.setChecked(True)
else:
self.myv_nobt.setChecked(True)
self.myv_chkl.setPixmap(self.yes_icon.pixmap(QSize(16, 16)))
else:
self.myv_void.setChecked(True)
self.myv_chkl.setPixmap(self.no_icon.pixmap(QSize(16, 16)))
def update_exst_param_label(self):
if self.current_function:
if len(self.param_exst_dict[self.current_function]) > 0:
self.exstparam_l.setText(f'Already existing Parameters: {self.param_exst_dict[self.current_function]}')
self.exstparam_l.show()
else:
self.exstparam_l.hide()
def update_param_setup(self):
if pd.notna(self.add_pd_params.loc[self.current_parameter, 'alias']):
self.palias_le.setText(self.add_pd_params.loc[self.current_parameter, 'alias'])
else:
self.palias_le.clear()
if pd.notna(self.add_pd_params.loc[self.current_parameter, 'default']):
self.default_le.setText(self.add_pd_params.loc[self.current_parameter, 'default'])
self.default_chkl.setPixmap(self.yes_icon.pixmap(QSize(16, 16)))
else:
self.default_le.clear()
self.default_chkl.setPixmap(self.no_icon.pixmap(QSize(16, 16)))
if pd.notna(self.add_pd_params.loc[self.current_parameter, 'unit']):
self.unit_le.setText(self.add_pd_params.loc[self.current_parameter, 'unit'])
else:
self.unit_le.clear()
if pd.notna(self.add_pd_params.loc[self.current_parameter, 'description']):
self.description_le.setText(self.add_pd_params.loc[self.current_parameter, 'description'])
else:
self.description_le.clear()
if pd.notna(self.add_pd_params.loc[self.current_parameter, 'gui_type']):
self.guitype_cmbx.setCurrentText(self.add_pd_params.loc[self.current_parameter, 'gui_type'])
self.guitype_chkl.setPixmap(self.yes_icon.pixmap(QSize(16, 16)))
else:
self.guitype_cmbx.setCurrentIndex(-1)
self.guitype_chkl.setPixmap(self.no_icon.pixmap(QSize(16, 16)))
def check_func_setup(self):
# Check, that all obligatory items of the Subject-Setup and the Parameter-Setup are set
if all([pd.notna(self.add_pd_funcs.loc[self.current_function, i]) for i in self.oblig_func]):
function_params = self.add_pd_params.loc[
[self.current_function in str(x) for x in self.add_pd_params['functions']]]
if pd.notna(self.add_pd_params.loc[function_params.index, self.oblig_params]).all().all():
self.func_chkl.setPixmap(self.yes_icon.pixmap(16, 16))
self.add_pd_funcs.loc[self.current_function, 'ready'] = 1
else:
self.func_chkl.setPixmap(self.no_icon.pixmap(16, 16))
self.add_pd_funcs.loc[self.current_function, 'ready'] = 0
def update_param_view(self):
# Update Param-Model with new pd_params of current_function
current_pd_params = self.add_pd_params.loc[
[self.current_function in str(x) for x in self.add_pd_params['functions']]
]
self.param_model.updateData(current_pd_params)
def check_param_setup(self):
# Check, that all obligatory items of the Parameter-Setup are set
if all([pd.notna(self.add_pd_params.loc[self.current_parameter, i]) for i in self.oblig_params]):
self.add_pd_params.loc[self.current_parameter, 'ready'] = 1
else:
self.add_pd_params.loc[self.current_parameter, 'ready'] = 0
self.update_param_view()
# Line-Edit Change-Signals
def falias_changed(self, text):
if self.current_function:
self.add_pd_funcs.loc[self.current_function, 'alias'] = text
def mtpl_changed(self, current_button, state):
if self.current_function:
if state and current_button == self.mtpl_yesbt:
self.add_pd_funcs.loc[self.current_function, 'matplotlib'] = True
elif state and current_button == self.mtpl_nobt:
self.add_pd_funcs.loc[self.current_function, 'matplotlib'] = False
if current_button != self.mtpl_void:
self.mtpl_chkl.setPixmap(self.yes_icon.pixmap(QSize(16, 16)))
self.check_func_setup()
def myv_changed(self, current_button, state):
if self.current_function:
if state and current_button == self.myv_yesbt:
self.add_pd_funcs.loc[self.current_function, 'mayavi'] = True
elif state and current_button == self.myv_nobt:
self.add_pd_funcs.loc[self.current_function, 'mayavi'] = False
if current_button != self.myv_void:
self.myv_chkl.setPixmap(self.yes_icon.pixmap(QSize(16, 16)))
self.check_func_setup()
def palias_changed(self, text):
if self.current_parameter:
self.add_pd_params.loc[self.current_parameter, 'alias'] = text
def pdefault_changed(self, text):
if self.current_parameter:
self.add_pd_params.loc[self.current_parameter, 'default'] = text
self.default_chkl.setPixmap(self.yes_icon.pixmap(QSize(16, 16)))
self.check_param_setup()
self.check_func_setup()
def punit_changed(self, text):
if self.current_parameter:
self.add_pd_params.loc[self.current_parameter, 'unit'] = text
def pdescription_changed(self, text):
if self.current_parameter:
self.add_pd_params.loc[self.current_parameter, 'description'] = text
def populate_target_cmbx(self):
self.target_cmbx.insertItems(0, ['MEEG', 'FSMRI', 'Group', 'Other'])
def populate_tab_cmbx(self):
self.tab_cmbx.clear()
self.tab_cmbx.insertItems(0, set(self.ct.pd_funcs['tab']) |
set(self.add_pd_funcs.loc[pd.notna(self.add_pd_funcs['tab']), 'tab']))
def populate_group_cmbx(self):
self.group_cmbx.clear()
self.group_cmbx.insertItems(0, set(self.ct.pd_funcs['group']) |
set(self.add_pd_funcs.loc[pd.notna(self.add_pd_funcs['group']), 'group']))
def populate_guitype_cmbx(self):
self.guitype_cmbx.insertItems(0, self.available_param_guis)
def target_cmbx_changed(self, idx):
if self.current_function:
self.add_pd_funcs.loc[self.current_function, 'target'] = self.target_cmbx.itemText(idx)
self.target_chkl.setPixmap(self.yes_icon.pixmap(QSize(16, 16)))
self.check_func_setup()
def tab_cmbx_changed(self, idx):
# Insert changes from other functions if edited
self.populate_tab_cmbx()
self.tab_cmbx.setCurrentIndex(idx)
if self.current_function:
self.add_pd_funcs.loc[self.current_function, 'tab'] = self.tab_cmbx.itemText(idx)
self.tab_chkl.setPixmap(self.yes_icon.pixmap(QSize(16, 16)))
self.check_func_setup()
def tab_cmbx_edited(self, text):
if self.current_function and text != '':
self.add_pd_funcs.loc[self.current_function, 'tab'] = text
self.tab_chkl.setPixmap(self.yes_icon.pixmap(QSize(16, 16)))
self.check_func_setup()
def group_cmbx_changed(self, idx):
# Insert changes from other functions if edited
self.populate_group_cmbx()
self.group_cmbx.setCurrentIndex(idx)
group_name = self.group_cmbx.itemText(idx)
if self.current_function:
self.add_pd_funcs.loc[self.current_function, 'group'] = group_name
for param in self.add_pd_params.index:
self.add_pd_params.loc[param, 'group'] = group_name
self.group_chkl.setPixmap(self.yes_icon.pixmap(QSize(16, 16)))
self.check_func_setup()
def group_cmbx_edited(self, text):
if self.current_function and text != '':
self.add_pd_funcs.loc[self.current_function, 'group'] = text
for param in self.add_pd_params.index:
self.add_pd_params.loc[param, 'group'] = text
self.group_chkl.setPixmap(self.yes_icon.pixmap(QSize(16, 16)))
self.check_func_setup()
def guitype_cmbx_changed(self, idx):
text = self.guitype_cmbx.itemText(idx)
gui_args = dict()
options = list()
if self.current_parameter:
# If ComboGui or CheckListGui, options have to be set:
if text in ['ComboGui', 'CheckListGui']:
# Check if options already in gui_args
loaded_gui_args = self.add_pd_params.loc[self.current_parameter, 'gui_args']
if pd.notna(loaded_gui_args):
gui_args = literal_eval(loaded_gui_args)
if 'options' in gui_args:
options = gui_args['options']
ChooseOptions(self, text, options)
# Save the gui_args in add_pd_params
gui_args['options'] = options
self.add_pd_params.loc[self.current_parameter, 'gui_args'] = str(gui_args)
# Check, if default_value and gui_type match
if pd.notna(self.add_pd_params.loc[self.current_parameter, 'default']):
result, _ = self.test_param_gui(
default_string=self.add_pd_params.loc[self.current_parameter, 'default'],
gui_type=text, gui_args=gui_args)
else:
result = None
if not result:
self.add_pd_params.loc[self.current_parameter, 'gui_type'] = text
self.guitype_chkl.setPixmap(self.yes_icon.pixmap(QSize(16, 16)))
self.check_param_setup()
self.check_func_setup()
else:
self.guitype_cmbx.setCurrentIndex(-1)
self.add_pd_params.loc[self.current_parameter, 'gui_type'] = None
self.check_param_setup()
self.check_func_setup()
def pguiargs_changed(self, gui_args):
if self.current_parameter:
# Check, if default_value and gui_type match
if pd.notna(self.add_pd_params.loc[self.current_parameter, ['default', 'gui_type']]).all():
result, _ = self.test_param_gui(
default_string=self.add_pd_params.loc[self.current_parameter, 'default'],
gui_type=self.add_pd_params.loc[self.current_parameter, 'gui_type'],
gui_args=gui_args)
else:
result = None
if not result:
self.add_pd_params.loc[self.current_parameter, 'gui_args'] = str(gui_args)
else:
self.add_pd_params.loc[self.current_parameter, 'gui_args'] = None
def get_functions(self):
# Clear Function- and Parameter-DataFrame
self.add_pd_funcs.drop(index=self.add_pd_funcs.index, inplace=True)
self.add_pd_params.drop(index=self.add_pd_funcs.index, inplace=True)
self.clear_func_items()
self.clear_param_items()
# Returns tuple of files-list and file-type
cf_path_string = QFileDialog.getOpenFileName(self,
'Choose the Python-File containing your function to import',
filter='Python-File (*.py)')[0]
if cf_path_string:
self.file_path = Path(cf_path_string)
ImportFuncs(self)
def edit_functions(self):
# Clear Function- and Parameter-DataFrame
self.add_pd_funcs.drop(index=self.add_pd_funcs.index, inplace=True)
self.add_pd_params.drop(index=self.add_pd_funcs.index, inplace=True)
self.clear_func_items()
self.clear_param_items()
# Returns tuple of files-list and file-type
cf_path_string = QFileDialog.getOpenFileName(self,
'Choose the Python-File containing the functions to edit',
filter='Python-File (*.py)', directory=self.ct.custom_pkg_path)[0]
if cf_path_string:
self.file_path = Path(cf_path_string)
ImportFuncs(self, edit_existing=True)
def test_param_gui(self, default_string, gui_type, gui_args=None):
# Test ParamGui with Value
if gui_args is None:
gui_args = {}
test_parameters = dict()
try:
test_parameters[self.current_parameter] = literal_eval(default_string)
except (ValueError, SyntaxError):
# Allow parameters to be defined by functions by numpy, etc.
if self.add_pd_params.loc[self.current_parameter, 'gui_type'] == 'FuncGui':
test_parameters[self.current_parameter] = eval(default_string)
else:
test_parameters[self.current_parameter] = default_string
if pd.notna(self.add_pd_params.loc[self.current_parameter, 'alias']):
param_alias = self.add_pd_params.loc[self.current_parameter, 'alias']
else:
param_alias = self.current_parameter
if pd.notna(self.add_pd_params.loc[self.current_parameter, 'description']):
description = self.add_pd_params.loc[self.current_parameter, 'description']
else:
description = None
if pd.notna(self.add_pd_params.loc[self.current_parameter, 'unit']):
param_unit = self.add_pd_params.loc[self.current_parameter, 'unit']
else:
param_unit = None
gui_handle = getattr(parameter_widgets, gui_type)
handle_params = inspect.signature(gui_handle).parameters
try:
if 'param_unit' in handle_params:
gui = gui_handle(data=test_parameters, param_name=self.current_parameter,
param_alias=param_alias, description=description, param_unit=param_unit, **gui_args)
else:
gui = gui_handle(data=test_parameters, param_name=self.current_parameter,
param_alias=param_alias, description=description, **gui_args)
except Exception as e:
gui = None
result = e
QMessageBox.warning(self, 'Error in ParamGui',
f'The execution of {gui_type} with {default_string} as default '
f'and {gui_args} as additional parameters raises the following error:\n'
f'{result}')
else:
result = None
return result, gui
def show_param_gui(self):
if self.current_parameter and pd.notna(self.add_pd_params.loc[self.current_parameter, 'gui_type']):
TestParamGui(self)
def update_code_editor(self):
if self.code_editor:
self.code_editor.clear()
self.code_editor.insertPlainText(self.code_dict[self.current_function])
def show_code(self):
self.code_editor = CodeEditor(self)
self.code_editor.setReadOnly(True)
self.update_code_editor()
code_dialog = SimpleDialog(self.code_editor, parent=self, modal=False, window_title='Source-Code')
set_ratio_geometry(0.5, code_dialog)
center(code_dialog)
def save_pkg(self):
if any(self.add_pd_funcs['ready'] == 1):
SavePkgDialog(self)
def closeEvent(self, event):
drop_funcs = [f for f in self.add_pd_funcs.index if not self.add_pd_funcs.loc[f, 'ready']]
if len(drop_funcs) > 0:
answer = QMessageBox.question(self, 'Close Custom-Functions?', f'There are still unfinished functions:\n'
f'{drop_funcs}\n'
f'Do you still want to quit?')
else:
answer = None
if answer == QMessageBox.Yes or answer is None:
event.accept()
else:
event.ignore()
class ImportFuncs(QDialog):
def __init__(self, cf_dialog, edit_existing=False):
super().__init__(cf_dialog)
self.cf = cf_dialog
self.edit_existing = edit_existing
self.module = None
self.loaded_cfs = []
self.edit_loaded_cfs = []
self.selected_cfs = []
self.selected_edit_cfs = []
self.already_existing_funcs = []
self.load_function_list()
self.init_ui()
self.open()
def load_function_list(self):
# Load .csv-Files if
try:
if self.edit_existing:
self.cf.pkg_name = self.cf.file_path.parent.name
pd_funcs_path = join(self.cf.file_path.parent, f'{self.cf.pkg_name}_functions.csv')
pd_params_path = join(self.cf.file_path.parent, f'{self.cf.pkg_name}_parameters.csv')
self.cf.add_pd_funcs = pd.read_csv(pd_funcs_path, sep=';', index_col=0)
self.cf.add_pd_params = pd.read_csv(pd_params_path, sep=';', index_col=0)
# Can be removed soon, when nobody uses old packages anymore (10.11.2020)
if 'target' not in self.cf.add_pd_funcs.columns:
self.cf.add_pd_funcs['target'] = None
else:
self.cf.pkg_name = None
spec = util.spec_from_file_location(self.cf.file_path.stem, self.cf.file_path)
self.module = util.module_from_spec(spec)
spec.loader.exec_module(self.module)
except:
err = get_exception_tuple()
ErrorDialog(err, self)
else:
for func_key in self.module.__dict__:
func = self.module.__dict__[func_key]
# Only functions are allowed (Classes should be called from function)
if type(func) == FunctionType and func.__module__ == self.module.__name__:
# Check, if function is already existing
if func_key in self.cf.exst_functions:
if self.edit_existing and func_key in self.cf.add_pd_funcs.index:
self.edit_loaded_cfs.append(func_key)
else:
self.already_existing_funcs.append(func_key)
else:
self.loaded_cfs.append(func_key)
def init_ui(self):
layout = QVBoxLayout()
if len(self.already_existing_funcs) > 0:
exst_label = QLabel(f'These functions already exist: {self.already_existing_funcs}')
exst_label.setWordWrap(True)
layout.addWidget(exst_label)
view_layout = QHBoxLayout()
load_list = CheckList(self.loaded_cfs, self.selected_cfs, ui_button_pos='bottom', title='New functions')
view_layout.addWidget(load_list)
if len(self.edit_loaded_cfs) > 0:
edit_list = CheckList(self.edit_loaded_cfs, self.selected_edit_cfs, ui_button_pos='bottom',
title='Functions to edit')
view_layout.addWidget(edit_list)
layout.addLayout(view_layout)
close_bt = QPushButton('Close')
close_bt.clicked.connect(self.close)
layout.addWidget(close_bt)
self.setWindowTitle('Choose Functions')
self.setLayout(layout)
def load_selected_functions(self):
selected_funcs = [cf for cf in self.loaded_cfs if cf in self.selected_cfs] + \
[cf for cf in self.edit_loaded_cfs if cf in self.selected_edit_cfs]
if self.edit_existing:
# Drop Functions which are not selected
self.cf.add_pd_funcs.drop(index=[f for f in self.cf.add_pd_funcs.index if f not in selected_funcs],
inplace=True)
for func_key in selected_funcs:
func = self.module.__dict__[func_key]
self.cf.add_pd_funcs.loc[func_key, 'module'] = self.module.__name__
self.cf.add_pd_funcs.loc[func_key, 'ready'] = 0
self.cf.code_dict[func_key] = inspect.getsource(func)
# Get Parameters and divide them in existing and setup
all_parameters = list(inspect.signature(func).parameters)
self.cf.add_pd_funcs.loc[func_key, 'func_args'] = ','.join(all_parameters)
existing_parameters = []
for param_key in all_parameters:
if param_key in self.cf.exst_parameters:
existing_parameters.append(param_key)
else:
# Check if ready (possible when editing functions)
self.cf.add_pd_params.loc[param_key, 'ready'] = 0
if pd.notna(self.cf.add_pd_params.loc[param_key, self.cf.oblig_params]).all():
self.cf.add_pd_params.loc[param_key, 'ready'] = 1
# functions (which are using param) is a continuous string
# (because pandas can't store a list as item)
if param_key in self.cf.add_pd_params.index:
if 'functions' in self.cf.add_pd_params.columns:
if pd.notna(self.cf.add_pd_params.loc[param_key, 'functions']):
self.cf.add_pd_params.loc[param_key, 'functions'] += func_key
else:
self.cf.add_pd_params.loc[param_key, 'functions'] = func_key
else:
self.cf.add_pd_params.loc[param_key, 'functions'] = func_key
else:
self.cf.add_pd_params.loc[param_key, 'functions'] = func_key
self.cf.param_exst_dict[func_key] = existing_parameters
# Check, if mandatory columns exist
if 'ready' not in self.cf.add_pd_params.columns:
self.cf.add_pd_params['ready'] = 0
if 'functions' not in self.cf.add_pd_params.columns:
self.cf.add_pd_params['functions'] = ''
def closeEvent(self, event):
self.load_selected_functions()
self.cf.update_func_cmbx()
self.cf.update_exst_param_label()
if self.cf.code_editor:
self.cf.code_editor.update_code()
event.accept()
class SelectDependencies(QDialog):
def __init__(self, cf_dialog):
super().__init__(cf_dialog)
self.cf_dialog = cf_dialog
if pd.notna(cf_dialog.add_pd_funcs.loc[cf_dialog.current_function, 'dependencies']):
self.dpd_list = literal_eval(cf_dialog.add_pd_funcs.loc[cf_dialog.current_function, 'dependencies'])
else:
self.dpd_list = []
layout = QVBoxLayout()
self.listw = QListWidget()
self.listw.itemChanged.connect(self.item_checked)
layout.addWidget(self.listw)
ok_bt = QPushButton('OK')
ok_bt.clicked.connect(self.close_dlg)
layout.addWidget(ok_bt)
self.populate_listw()
self.setLayout(layout)
self.open()
def populate_listw(self):
for function in self.cf_dialog.ct.pd_funcs.index:
item = QListWidgetItem(function)
item.setFlags(item.flags() | Qt.ItemIsUserCheckable)
if function in self.dpd_list:
item.setCheckState(Qt.Checked)
else:
item.setCheckState(Qt.Unchecked)
self.listw.addItem(item)
def item_checked(self, item):
if item.checkState == Qt.Checked:
self.dpd_list.append(item.text())
elif item.text() in self.dpd_list:
self.dpd_list.remove(item.text())
def close_dlg(self):
self.cf_dialog.add_pd_funcs.loc[self.cf_dialog.current_function, 'dependencies'] = str(self.dpd_list)
self.close()
class TestParamGui(QDialog):
def __init__(self, cf_dialog):
super().__init__(cf_dialog)
self.cf = cf_dialog
# Dict as Replacement for Parameters in Project for Testing
self.test_parameters = dict()
default_string = self.cf.add_pd_params.loc[self.cf.current_parameter, 'default']
gui_type = self.cf.add_pd_params.loc[self.cf.current_parameter, 'gui_type']
try:
gui_args = literal_eval(self.cf.add_pd_params.loc[self.cf.current_parameter, 'gui_args'])
except (SyntaxError, ValueError):
gui_args = {}
self.result, self.gui = self.cf.test_param_gui(default_string, gui_type, gui_args)
if not self.result:
self.init_ui()
self.open()
def init_ui(self):
layout = QVBoxLayout()
# Allow Enter-Press without closing the dialog
if self.cf.add_pd_params.loc[self.cf.current_parameter, 'gui_type'] == 'FuncGui':
void_bt = QPushButton()
void_bt.setDefault(True)
layout.addWidget(void_bt)
layout.addWidget(self.gui)
close_bt = QPushButton('Close')
close_bt.clicked.connect(self.close)
layout.addWidget(close_bt)
self.setLayout(layout)
class SavePkgDialog(QDialog):
def __init__(self, cf_dialog):
super().__init__(cf_dialog)
self.cf_dialog = cf_dialog
self.my_pkg_name = None
self.pkg_path = None
self.init_ui()
self.open()
def init_ui(self):
layout = QVBoxLayout()
self.func_list = SimpleList(list(self.cf_dialog.add_pd_funcs.loc[self.cf_dialog.add_pd_funcs['ready'] == 1].index))
layout.addWidget(self.func_list)
pkg_name_label = QLabel('Package-Name:')
layout.addWidget(pkg_name_label)
self.pkg_le = QLineEdit()
if self.cf_dialog.pkg_name:
self.pkg_le.setText(self.cf_dialog.pkg_name)
self.pkg_le.textEdited.connect(self.pkg_le_changed)
layout.addWidget(self.pkg_le)
save_bt = QPushButton('Save')
save_bt.clicked.connect(self.save_pkg)
layout.addWidget(save_bt)
cancel_bt = QPushButton('Cancel')
cancel_bt.clicked.connect(self.close)
layout.addWidget(cancel_bt)
self.setLayout(layout)
def pkg_le_changed(self, text):
if text != '':
self.my_pkg_name = text
def save_pkg(self):
if self.my_pkg_name or self.cf_dialog.pkg_name:
# Drop all functions with unfinished setup and add the remaining to the main_window-DataFrame
drop_funcs = self.cf_dialog.add_pd_funcs.loc[self.cf_dialog.add_pd_funcs['ready'] == 0].index
final_add_pd_funcs = self.cf_dialog.add_pd_funcs.drop(index=drop_funcs)
drop_params = list()
for param in self.cf_dialog.add_pd_params.index:
if not any([f in str(self.cf_dialog.add_pd_params.loc[param, 'functions']) for f in final_add_pd_funcs.index]):
drop_params.append(param)
final_add_pd_params = self.cf_dialog.add_pd_params.drop(index=drop_params)
# Remove no longer needed columns
del final_add_pd_funcs['ready']
del final_add_pd_params['ready']
del final_add_pd_params['functions']
# Todo: Make this more failproof (loading and saving already existing packages)
# This is only not None, when the function was imported by edit-functions
if self.cf_dialog.pkg_name:
# Update and overwrite existing settings for funcs and params
self.pkg_path = join(self.cf_dialog.ct.custom_pkg_path, self.cf_dialog.pkg_name)
pd_funcs_path = join(self.pkg_path, f'{self.cf_dialog.pkg_name}_functions.csv')
pd_params_path = join(self.pkg_path, f'{self.cf_dialog.pkg_name}_parameters.csv')
if isfile(pd_funcs_path):
read_pd_funcs = | pd.read_csv(pd_funcs_path, sep=';', index_col=0) | pandas.read_csv |
from .context import lux
import pytest
import pandas as pd
import numpy as np
from lux.utils import date_utils
from lux.executor.PandasExecutor import PandasExecutor
def test_dateformatter():
ldf = pd.read_csv("lux/data/car.csv")
ldf["Year"] = pd.to_datetime(ldf["Year"], format='%Y') # change pandas dtype for the column "Year" to datetype
timestamp = np.datetime64('2019-08-26')
ldf.maintain_metadata()
assert(date_utils.date_formatter(timestamp,ldf) == '2019')
ldf["Year"][0] = np.datetime64('1970-03-01') # make month non unique
assert (date_utils.date_formatter(timestamp, ldf) == '2019-8')
ldf["Year"][0] = np.datetime64('1970-03-03') # make day non unique
assert (date_utils.date_formatter(timestamp, ldf) == '2019-8-26')
def test_period_selection():
ldf = pd.read_csv("lux/data/car.csv")
ldf["Year"] = pd.to_datetime(ldf["Year"], format='%Y')
ldf["Year"] = pd.DatetimeIndex(ldf["Year"]).to_period(freq='A')
ldf.set_intent([lux.Clause(attribute = ["Horsepower", "Weight", "Acceleration"]), lux.Clause(attribute ="Year")])
PandasExecutor.execute(ldf.current_vis, ldf)
assert all([type(vlist.data) == lux.core.frame.LuxDataFrame for vlist in ldf.current_vis])
assert all(ldf.current_vis[2].data.columns == ["Year", 'Acceleration'])
def test_period_filter():
ldf = pd.read_csv("lux/data/car.csv")
ldf["Year"] = pd.to_datetime(ldf["Year"], format='%Y')
ldf["Year"] = | pd.DatetimeIndex(ldf["Year"]) | pandas.DatetimeIndex |
#!/usr/bin/env python3
from shapely.geometry import box
import datetime
import numpy as np
import pandas as pd
import geopandas as gpd
import argparse
from datetime import datetime
from make_boxes_from_bounds import find_hucs_of_bounding_boxes
import requests
from concurrent.futures import ThreadPoolExecutor,as_completed
import os
from tqdm import tqdm
from foss_fim.tools.inundation import read_nwm_forecast_file
def cygnss_preprocessing(bounding_boxes_file,wbd=None,projection_of_boxes='EPSG:4329',wbd_layer='WBDHU8',forecast_output_file=None,retrieve=True,workers=6,download_directory=None, daily_mean_forecast_files=None):
_, bounding_boxes = find_hucs_of_bounding_boxes(bounding_boxes_file,wbd=wbd,projection_of_boxes=projection_of_boxes,wbd_layer=wbd_layer)
# load bounding box file
bounding_boxes['event_date'] = pd.to_datetime(bounding_boxes['event_date'],utc=True)
bounding_boxes.reset_index(drop=True,inplace=True)
wbdcol_name = 'HUC'+wbd_layer[-1]
# expand dates
datetime_indices = bounding_boxes.apply(lambda df:pd.date_range(df['event_date'],periods=24,closed=None,freq='H',tz='UTC'),axis=1)
datetime_indices.name = 'date_time'
datetime_indices=pd.DataFrame(datetime_indices)
datetime_indices = datetime_indices.join(bounding_boxes[['Name',wbdcol_name]])
# append columns to expanded dates
forecast_df = | pd.DataFrame() | pandas.DataFrame |
"""
Functions to facilitate maintenance of apero sheet.
Primarly designed for automated updates with update_sheet.py, but also useful
for interactive editing.
@author: vandalt
"""
import glob
import os
import re
import numpy as np
import pandas as pd
import tqdm
from astropy.io import fits
from astropy.io.votable.tree import Table
import utils as ut
# =============================================================================
# Constants
# =============================================================================
# Pattern to search for raw files on disk
RAW_PATTERN = "/spirou/cfht_nights/common/raw/20*/*o.fits"
# Local file that stores info about previously parsed raw files
LOCAL_FILE = "object_info.csv"
# Directory with dfits outputs allowing to include objects from other databases
DFITS_DIR = "dfits_outputs"
OBJ_INFO_KEYS = [
"OBJECT",
"OBJRV",
"OBJTEMP",
"MJDEND",
"RA_DEG",
"DEC_DEG",
]
SHEET_ID = "1jwlux8AJjBMMVrbg6LszJIpFJrk6alhbT5HA7BiAHD8"
BAD_NAMES = [
"sky.*_.*",
".*_sky.*",
"sky",
"SKY",
"FakeTarget",
"Test.*",
".*Test",
"Engineering",
"Calibration",
"Nowhere"
]
# Known rejected names
REJ_NAMES = [
"Moon.*",
"Neptune.*",
"Saturn.*",
"Venus.*",
"17BE93-FT-1",
]
def get_object_info(
fpattern, local_file=None, dfits_dir=None, keys=None, verbose=False
):
"""
Fetch object information in headers of raw data from CFHT.
If a local file is found, files already read will be removed for speed.
Local txt files with dfits outputs can also be used to add objects from
external sources
Args:
fpattern (str): Glob-compatible pattern for raw files
local_file (str): Local file with observations already fetched.
(if this code has been run in the past)
dfits_dir (str): directory with dfits output as txt files.
keys (list): Header keys to extract.
Defaults to [OBJECT, OBJRV, OBJTEMP, MJDEND, RA_DEG, DEC_DEG].
verbose (bool): Additional information printed if True.
"""
if keys is None:
keys = OBJ_INFO_KEYS
if local_file is not None:
if os.path.isfile(local_file):
df_loc = pd.read_csv(local_file)
loc_files = df_loc.FILE.tolist()
else:
if verbose:
print("No local file named {}".format(local_file))
loc_files = []
else:
loc_files = []
if dfits_dir is not None:
dfits_frames = []
for dfile in glob.glob(os.path.join(dfits_dir, "*.txt")):
# dfits_frames.append(
# Table.read(dfile, format="ascii.tab").to_pandas(use_nullable_int=False)
# )
dfits_frames.append(
ut.read_dfits_tbl(dfile).to_pandas(use_nullable_int=False)
)
df_dfits = pd.concat(dfits_frames, ignore_index=True)
df_dfits.FILE = df_dfits.FILE.apply(os.path.basename)
df_dfits = df_dfits[["FILE"] + keys]
dfits_files = df_dfits.FILE.tolist()
else:
df_dfits = pd.DataFrame([])
dfits_files = []
# Find files to read
known_files = loc_files + dfits_files
full_list = glob.glob(fpattern)
all_files = [os.path.basename(f) for f in full_list]
loc_mask = np.isin(all_files, known_files)
iter_list = np.array(full_list)[~loc_mask].tolist()
# Initialize dict
outdict = dict()
for k in keys:
outdict[k] = []
# Handle files separately because not inside header
fnames = []
# Loop through files
# Slower than dfits | fitsort but avoids system calls
if verbose:
print("Fetching data from headers. This might take a few minutes.")
for filepath in tqdm.tqdm(iter_list):
hdr = fits.getheader(filepath)
for k in keys:
ut.hdr_to_dict(outdict, k, hdr)
fnames.append(os.path.basename(filepath))
# Create dataframe of new observationsfrom dict
df = pd.DataFrame([])
df["FILE"] = fnames
for k in keys:
df[k] = outdict[k]
# Append to local df if any
if len(loc_files) > 0:
df = df_loc.append(df, ignore_index=True)
if len(dfits_files) > 0:
df = df.append(df_dfits, ignore_index=True)
df = df.sort_values("FILE")
df = df.drop_duplicates("FILE") # Could have duplicates from externalf fits
return df
def bad_and_rejected(names, sh=None, bad=None, rej=None, verbose=False):
"""
Handle bad (calibration, sky, etc.) and rejected names.
Rejected names are objects that we do not wish to include in the list for
various reasons (e.g. solar system objects). They are stored in a separate
tab of the Google Sheet.
Args:
names (pd.Series): series of names to update
sh (gspread_pandas.Spread): Spreadsheet object to update with rejected
names.
Default: None, no remote update
bad (list): list of names to completely remove, by default uses a
pre defined list.
rej (list): list of rejected object names, by default uses a
pre defined list.
Returns:
good_names (pd.Series): Series of good names.
"""
# Pre-process lists into regex list
if bad is None:
bad = BAD_NAMES
if rej is None:
rej = REJ_NAMES
bad = "|".join(bad) # pipe symbol as OR in regex
rej = "|".join(rej) # pipe symbol as OR in regex
# Remove bad
good_names = names[~names.str.fullmatch(bad)]
# Remove rejected
rej_names = good_names[good_names.str.fullmatch(rej)]
# Update rejected names in google sheet
if sh is not None:
rej_names = | pd.DataFrame(rej_names) | pandas.DataFrame |
'''
Data pre process
@author:
<NAME> (<EMAIL>)
@ created:
25/8/2017
@references:
'''
import os
import json
import pandas as pd
# import pickle
import numpy as np
import dill as pickle
dataset_name = "movies"
TPS_DIR = '../data2014/%s' % dataset_name
# TP_file = os.path.join(TPS_DIR, 'Musical_Instruments_5.json')
# TP_file = os.path.join(TPS_DIR, 'Digital_Music_5.json')
# TP_file = os.path.join(TPS_DIR, 'Industrial_and_Scientific_5.json')
# TP_file = os.path.join(TPS_DIR, 'Toys_and_Games_5.json')
# TP_file = os.path.join(TPS_DIR, 'CDs_and_Vinyl_5.json')
# TP_file = os.path.join(TPS_DIR, 'Kindle_Store_5.json')
TP_file = os.path.join(TPS_DIR, 'Movies_and_TV_5.json')
f = open(TP_file)
users_id = []
items_id = []
ratings = []
reviews = []
np.random.seed(2017)
null = 0
# 读取评分
print("loading ratings...")
for line in f:
js = json.loads(line)
if str(js['reviewerID']) == 'unknown':
print("reviewerID unknown")
continue
if str(js['asin']) == 'unknown':
print("asin unknown")
continue
try:
reviews.append(str(js['reviewText']))
users_id.append(str(js['reviewerID']))
items_id.append(str(js['asin']))
ratings.append(str(js['overall']))
except KeyError:
null += 1
print("num of reviews:", len(reviews))
print("%s null reviews jumped. " % null)
data = pd.DataFrame({'user_id': pd.Series(users_id),
'item_id': | pd.Series(items_id) | pandas.Series |
import requests
import re
from bs4 import BeautifulSoup
import pandas as pd
import sys
from PyQt4.QtGui import QApplication
from PyQt4.QtCore import QUrl
from PyQt4.QtWebKit import QWebPage
import bs4 as bs
import urllib.request
import time
import random
import urllib3
import os
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
#path_of_url_file = 'C:\\Users\\<NAME>\\Desktop\\'
path_of_reference_files = 'C:\\LavaWebScraper\\GSMARENA\\GSMarena_complete\\reference_files\\'
path_of_gsmarena_complete_csv = 'C:\\LavaWebScraper\\GSMARENA\\GSMarena_complete\\'
user_agent = {'User-agent':'Mozilla/5.0'}
base_url = 'https://www.gsmarena.com/makers.php3'
ur='https://www.gsmarena.com/'
##country = 'TAIWAN'
##company = 'hTC'
model_list = []
usp = []
display_list = []
memory_list = []
processor_list = []
camera_list = []
battery_list = []
thickness_list = []
extras_links = []
records = []
href = []
st_list_heads=[]
st_list_dets=[]
hr=[]
spec_url=[]
HREF=[]
BRANDS=[]
device_num=[]
price_list=[]
launch_date_list=[]
company_name_list=[]
devnum=[]
######################################################## PARAMETERS READ FROM THE FILES ##############################################################################
href_file = []
usp_file = []
company_name_list_file = []
model_list_file = []
with open(os.path.join(path_of_reference_files, 'gsmarena_href.txt'), 'r') as f:
href_file = f.readlines()
with open(os.path.join(path_of_reference_files, 'gsmarena_usp.txt'), 'r') as f:
usp_file = f.readlines()
with open(os.path.join(path_of_reference_files, 'gsmarena_modelnames.txt'), 'r') as f:
model_list_file = f.readlines()
with open(os.path.join(path_of_reference_files, 'gsmarena_company_name_list.txt'), 'r') as f:
company_name_list_file = f.readlines()
for i in range(len(href_file)):
href_file[i] = href_file[i].replace('\n','')
model_list_file[i] = model_list_file[i].replace('\n','')
usp_file[i] = usp_file[i].replace('\n','')
company_name_list_file[i] = company_name_list_file[i].replace('\n','')
######################################################################################################################################################################
#PAGES=[]
#r=requests.get(base_url, headers=user_agent)
http = urllib3.PoolManager()
#####################################
response = http.request('GET', base_url)
soup = BeautifulSoup(response.data, 'html.parser')
#####################################
results=soup.find_all('div',attrs={'class':'st-text'})
#print(len(results))
for a in range(len(results)):
sa=results[a].find_all('table')
#print(len(sa))
for b in range(len(sa)):
sb=sa[b].find_all('tr')
#print(len(sb))
for c in range(len(sb)):
sc=sb[c].find_all('td')
for d in range(len(sc)):
HREF.append(sc[d].find('a')['href'])
sd=sc[d].find('a')
BRANDS.append(sd.contents[0])
device_num.append(sd.contents[2].text)
#print(len(HREF))
for i in range(len(HREF)):
HREF[i]=ur+HREF[i]
print('------------------------------------------------------------------------------------------------------------------------------------------------------')
for i in range(len(BRANDS)):
print(BRANDS[i])
print('------------------------------------------------------------------------------------------------------------------------------------------------------')
for i in device_num:
print(i)
#print(HREF[i])
print(len(BRANDS))
print(len(device_num))
for a in range(len(HREF)):
PAGES=[]
print('9')
#r=requests.get(HREF[a])
r = http.request('GET', HREF[a])
soup = BeautifulSoup(r.data, 'html.parser')
results=soup.find_all('div',attrs={'class':'nav-pages'})
print(BRANDS[a])
print(device_num[a])
print(len(results))
PAGES.append(HREF[a])
if len(results)!=0:
for b in range(len(results)):
sa=results[b].find_all('a')
for c in range(len(sa)):
t=ur +sa[c]['href']
PAGES.append(t)
print('No.of pages are:- ',end='')
print(len(PAGES))
for i in PAGES:
print(i)
print('--------------------------------------------------------------------------------------------------------------------------------------------')
for i in range(len(PAGES)):
#r1=requests.get(PAGES[i])
r1 = http.request('GET', PAGES[i])
soup = BeautifulSoup(r1.data, 'html.parser')
results1=soup.find_all('div',attrs={'class':'makers'})
for d in range(len(results1)):
sb=results1[d].find_all('ul')
for e in range(len(sb)):
sc=sb[e].find_all('li')
for f in range(len(sc)):
ts = ur + sc[f].find('a')['href']
if ts not in href_file:
href.append(sc[f].find('a')['href'])
if sc[f].find('img')['title'].replace('″','"') not in usp_file:
usp.append(sc[f].find('img')['title'].replace('″','"'))
if sc[f].find('strong').text.strip() not in model_list_file:
model_list.append(sc[f].find('strong').text.strip())
if BRANDS[a] not in company_name_list_file:
company_name_list.append(BRANDS[a])
print('length of href is:- ',end='')
print(len(href))
for i in range(len(href)):
href[i]=ur+href[i]
## print(href[i])
print('--------------------------------------------------------------------------------------------------------------------------------------------')
print('length of usp is:- ',end='')
print(len(usp))
for i in usp:
print(i)
print('--------------------------------------------------------------------------------------------------------------------------------------------')
print('length of model_list is:- ',end='')
print(len(model_list))
print(len(company_name_list))
##for i in model_list:
## print(i)
print('--------------------------------------------------------------------------------------------------------------------------------------------')
var=1
##
##with open(os.path.join(path_of_url_file, 'gsmarena_href.txt'), 'w') as f:
## for k in range(len(href)):
## f.write(href[k] + '\n')
##
##with open(os.path.join(path_of_url_file, 'gsmarena_modelnames.txt'), 'w') as f:
## for k in range(len(model_list)):
## f.write(model_list[k] + '\n')
##
##with open(os.path.join(path_of_url_file, 'gsmarena_usp.txt'), 'w') as f:
## for k in range(len(usp)):
## try:
## f.write(usp[k] + '\n')
## except:
## f.write('Not Available' + '\n')
##
##with open(os.path.join(path_of_url_file, 'gsmarena_company_name_list.txt'), 'w') as f:
## for k in range(len(company_name_list)):
## try:
## f.write(company_name_list[k] + '\n')
## except:
## f.write('Not Available' + '\n')
for i in range(len(href)):
## r = random.randint(5,10)
## print('SLEEPING FOR %d SECONDS NOW.' %r)
## time.sleep(r)
#var=var+1
c1=''
d1=''
print('MODEL NO.%d' %i)
r=requests.get(href[i])
#r = http.request('GET', href[i])
soup = BeautifulSoup(r.text,'html5lib')
results=soup.find_all('div',attrs={'id':'specs-list'})
for a in range(len(results)):
sa=results[a].find_all('table',attrs={'cellspacing':'0'})
for b in range(len(sa)):
sb=sa[b].find_all('tbody')
for c in range(len(sb)):
sc=sb[c].find('th').text
if 'body' in sc.lower():
sd=sb[c].find_all('tr')
for d in range(len(sd)):
se=sd[d].find_all('td',attrs={'class':'ttl'})
sf=sd[d].find_all('td',attrs={'class':'nfo'})
for e in range(len(sf)):
if 'dimension' in se[e].text.lower():
thickness_list.append(sf[e].text.replace('μ','mu').replace('α','alpha').replace('”','"').replace(u'\x94', '"'))
if 'platform' in sc.lower():
sd=sb[c].find_all('tr')
for d in range(len(sd)):
se=sd[d].find_all('td',attrs={'class':'ttl'})
sf=sd[d].find_all('td',attrs={'class':'nfo'})
for e in range(len(sf)):
if 'cpu' in se[e].text.lower():
processor_list.append(sf[e].text.replace('μ','mu').replace('α','alpha').replace('”','"').replace(u'\x94', '"'))
if 'memory' in sc.lower():
sd=sb[c].find_all('tr')
for d in range(len(sd)):
se=sd[d].find_all('td',attrs={'class':'ttl'})
sf=sd[d].find_all('td',attrs={'class':'nfo'})
for e in range(len(sf)):
if 'internal' in se[e].text.lower():
memory_list.append(sf[e].text.replace('μ','mu').replace('α','alpha').replace('”','"').replace(u'\x94', '"'))
if 'camera' in sc.lower():
sd=sb[c].find_all('tr')
for d in range(len(sd)):
se=sd[d].find_all('td',attrs={'class':'ttl'})
sf=sd[d].find_all('td',attrs={'class':'nfo'})
for e in range(len(sf)):
if 'primary' in se[e].text.lower() or 'secondary' in se[e].text.lower():
c1=c1+se[e].text.replace('μ','mu').replace('α','alpha').replace('”','"').replace(u'\x94', '"')+':- '+sf[e].text.replace('μ','mu').replace('α','alpha').replace('”','"').replace(u'\x94', '"')+' || '
if 'display' in sc.lower():
sd=sb[c].find_all('tr')
for d in range(len(sd)):
se=sd[d].find_all('td',attrs={'class':'ttl'})
sf=sd[d].find_all('td',attrs={'class':'nfo'})
for e in range(len(sf)):
if 'type' in se[e].text.lower() or 'size' in se[e].text.lower():
d1=d1+se[e].text.replace('μ','mu').replace('α','alpha').replace('”','"').replace(u'\x94', '"')+':- '+sf[e].text.replace('μ','mu').replace('α','alpha').replace('”','"').replace(u'\x94', '"')+' || '
if 'battery' in sc.lower():
sd=sb[c].find_all('tr')
for d in range(len(sd)):
se=sd[d].find_all('td',attrs={'class':'ttl'})
sf=sd[d].find_all('td',attrs={'class':'nfo'})
for e in range(len(sf)):
if 'mah' in sf[e].text.lower():
battery_list.append(sf[e].text.replace('μ','mu').replace('α','alpha').replace('”','"').replace(u'\x94', '"'))
if 'launch' in sc.lower():
sd=sb[c].find_all('tr')
for d in range(len(sd)):
se=sd[d].find_all('td',attrs={'class':'ttl'})
sf=sd[d].find_all('td',attrs={'class':'nfo'})
for e in range(len(sf)):
if 'announce' in se[e].text.lower():
launch_date_list.append(sf[e].text.replace('μ','mu').replace('α','alpha').replace('”','"').replace(u'\x94', '"').strip())
if 'misc' in sc.lower():
sd=sb[c].find_all('tr')
for d in range(len(sd)):
se=sd[d].find_all('td',attrs={'class':'ttl'})
sf=sd[d].find_all('td',attrs={'class':'nfo'})
for e in range(len(sf)):
if 'price' in se[e].text.lower():
ts = sf[e].text.replace('α','alpha').replace('μ','mu').replace('”','"').replace(u'\x94', '"').strip()
#ts = ts.decode('utf-8').replace(u'\x94', '"')
price_list.append(ts)
if d1!='':
display_list.append(d1)
if c1!='':
camera_list.append(c1)
if len(battery_list)==i:
battery_list.append('Not Available')
if len(memory_list)==i:
memory_list.append('Not Available')
if len(processor_list)==i:
processor_list.append('Not Available')
if len(display_list)==i:
display_list.append('Not Available')
if len(thickness_list)==i:
thickness_list.append('Not Available')
if len(camera_list)==i:
camera_list.append('Not Available')
if len(price_list)==i:
price_list.append('Not Available')
if len(usp)==i:
usp.append('Not Available')
if len(launch_date_list)==i:
launch_date_list.append('Not Available')
## if var==500:
## break
print('DISPLAY LIST:- ')
print(len(display_list))
print('-------------------------------------------------------------------------------------------------------------------------------------------------------------')
print('PROCESSOR LIST:- ')
print(len(processor_list))
print('-------------------------------------------------------------------------------------------------------------------------------------------------------------')
print('MEMORY LIST:- ')
print(len(memory_list))
print('-------------------------------------------------------------------------------------------------------------------------------------------------------------')
print('CAMERA LIST:- ')
print(len(camera_list))
print('-------------------------------------------------------------------------------------------------------------------------------------------------------------')
print('BATTERY LIST:- ')
print(len(battery_list))
print('-------------------------------------------------------------------------------------------------------------------------------------------------------------')
print('-------------------------------------------------------------------------------------------------------------------------------------------------------------')
print('THICKNESS LIST:-')
print(len(thickness_list))
print('-------------------------------------------------------------------------------------------------------------------------------------------------------------')
print('PRICE LIST:_')
print(len(price_list))
print('-------------------------------------------------------------------------------------------------------------------------------------------------------------')
print('LAUNCH DATE:-')
print(len(launch_date_list))
extras_links = href
for i in range(len(model_list)):
records.append((company_name_list[i],model_list[i],price_list[i],launch_date_list[i], usp[i], display_list[i], camera_list[i], memory_list[i], battery_list[i], thickness_list[i], processor_list[i], extras_links[i]))
df = | pd.DataFrame(records, columns = ['COMPANY', 'MODEL', 'PRICE','LAUNCH DATE', 'USP', 'DISPLAY', 'CAMERA', 'MEMORY', 'BATTERY', 'THICKNESS', 'PROCESSOR', 'EXTRAS/ LINKS']) | pandas.DataFrame |
# This is a early version of Enumerat.py
import sys
import os
import copy
import time
import json
import pandas as pd
sys.path.append(os.path.abspath('..\\game'))
class Tree(object):
def __init__(self):
self.up = None # tree structure
self.down = None # tree structure
self.layer = None # current layer number
self.state = 'Unsettled'
self.nt = [1, 2, 3, 4, 5, 6, 7, 8, 9] # next possible move
self.board = [1, 2, 3, 4, 5, 6, 7, 8, 9] # current board
self.score_1 = 0.0
self.score_2 = 0.0
def final_chk(board):
print(len(board))
a = 0
for i in range(len(board)):
if board[i].layer == 9:
if board[i].state == 'Unsettled':
a += 1
print('error')
print(a)
def find_all():
def init_obj():
a = Tree()
a.up = []
a.down = []
a.layer = 0
return a
def chk_pos(board, num):
temp = num in range(1, 10)
temp_2 = board[num - 1] not in {'O', 'X'}
return temp and temp_2
def find_num(board):
A = []
for i in range(9):
if chk_pos(board, i + 1):
A.append(i + 1)
return A
def check_board(board):
win_combination = ((0, 1, 2), (3, 4, 5), (6, 7, 8),
(0, 3, 6), (1, 4, 7), (2, 5, 8), (0, 4, 8), (2, 4, 6))
count = 0
for a in win_combination:
if board[a[0]] == board[a[1]] == board[a[2]] == "X":
return 'X'
if board[a[0]] == board[a[1]] == board[a[2]] == "O":
return 'O'
for a in range(9):
if board[a] == "X" or board[a] == "O":
count += 1
if count == 9:
return 'Tie'
return 'Unsettled'
A = []
# 0th layer
A.append(init_obj())
A[0].nt = find_num(A[0].board)
A[0].State = check_board(A[0].board)
# 1st layer
for i in A[0].nt:
temp = init_obj()
temp.layer = 1
temp.up = 0
temp.board[i - 1] = 'X'
temp.nt = find_num(temp.board)
temp.state = check_board(temp.board)
A.append(temp)
for i in range(len(A)):
if i == 0:
continue
else:
if A[i].up == 0:
A[0].down.append(i)
# 2nd layer
for i in A[0].down:
for j in A[i].nt:
temp = init_obj()
temp.layer = 2
temp.up = i
temp.board = copy.deepcopy(A[i].board)
temp.board[j - 1] = 'O'
temp.nt = find_num(temp.board)
temp.state = check_board(temp.board)
A.append(temp)
for i in range(len(A)):
if A[i].layer == 0 or A[i].layer == 1:
continue
else:
if A[i].layer == 2:
A[A[i].up].down.append(i)
# 3rd layer
for i in range(len(A)):
if A[i].layer == 1:
for j in A[i].down:
for k in A[j].nt:
temp = init_obj()
temp.layer = 3
temp.up = j
temp.board = copy.deepcopy(A[j].board)
temp.board[k - 1] = 'X'
temp.nt = find_num(temp.board)
temp.state = check_board(temp.board)
A.append(temp)
for i in range(len(A)):
if A[i].layer == 3:
A[A[i].up].down.append(i)
# 4th layer
for i in range(len(A)):
if A[i].layer == 2:
for j in A[i].down:
for k in A[j].nt:
temp = init_obj()
temp.layer = 4
temp.up = j
temp.board = copy.deepcopy(A[j].board)
temp.board[k - 1] = 'O'
temp.nt = find_num(temp.board)
temp.state = check_board(temp.board)
A.append(temp)
for i in range(len(A)):
if A[i].layer == 4:
A[A[i].up].down.append(i)
# 5th layer
for i in range(len(A)):
if A[i].layer == 3:
for j in A[i].down:
for k in A[j].nt:
temp = init_obj()
temp.layer = 5
temp.up = j
temp.board = copy.deepcopy(A[j].board)
temp.board[k - 1] = 'X'
temp.nt = find_num(temp.board)
temp.state = check_board(temp.board)
if temp.state != 'Unsettled':
temp.nt = []
A.append(temp)
for i in range(len(A)):
if A[i].layer == 5:
A[A[i].up].down.append(i)
# 6th layer
for i in range(len(A)):
if A[i].layer == 4:
for j in A[i].down:
for k in A[j].nt:
temp = init_obj()
temp.layer = 6
temp.up = j
temp.board = copy.deepcopy(A[j].board)
temp.board[k - 1] = 'O'
temp.nt = find_num(temp.board)
temp.state = check_board(temp.board)
if temp.state != 'Unsettled':
temp.nt = []
A.append(temp)
for i in range(len(A)):
if A[i].layer == 6:
A[A[i].up].down.append(i)
# 7th layer
for i in range(len(A)):
if A[i].layer == 5:
for j in A[i].down:
for k in A[j].nt:
temp = init_obj()
temp.layer = 7
temp.up = j
temp.board = copy.deepcopy(A[j].board)
temp.board[k - 1] = 'X'
temp.nt = find_num(temp.board)
temp.state = check_board(temp.board)
if temp.state != 'Unsettled':
temp.nt = []
A.append(temp)
for i in range(len(A)):
if A[i].layer == 7:
A[A[i].up].down.append(i)
# 8th layer
for i in range(len(A)):
if A[i].layer == 6:
for j in A[i].down:
for k in A[j].nt:
temp = init_obj()
temp.layer = 8
temp.up = j
temp.board = copy.deepcopy(A[j].board)
temp.board[k - 1] = 'O'
temp.nt = find_num(temp.board)
temp.state = check_board(temp.board)
if temp.state != 'Unsettled':
temp.nt = []
A.append(temp)
for i in range(len(A)):
if A[i].layer == 8:
A[A[i].up].down.append(i)
# 9th layer
for i in range(len(A)):
if A[i].layer == 7:
for j in A[i].down:
for k in A[j].nt:
temp = init_obj()
temp.layer = 9
temp.up = j
temp.board = copy.deepcopy(A[j].board)
temp.board[k - 1] = 'X'
temp.nt = find_num(temp.board)
temp.state = check_board(temp.board)
if temp.state != 'Unsettled':
temp.nt = []
A.append(temp)
for i in range(len(A)):
if A[i].layer == 9:
A[A[i].up].down.append(i)
return A
def score(A):
for i in range(len(A)):
if A[i].layer == 9:
if A[i].state == 'X':
A[i].score_1 = 1
elif A[i].state == 'O':
A[i].score_2 = 1
else:
pass
for k in [8, 7, 6, 5, 4, 3, 2, 1, 0]:
for i in range(len(A)):
if A[i].layer == k:
if A[i].state == 'X':
A[i].score_1 = 1
elif A[i].state == 'O':
A[i].score_2 = 1
else:
temp_1 = 0
temp_2 = 0
for j in A[i].down:
temp_1 = temp_1 + A[j].score_1
temp_2 = temp_2 + A[j].score_2
A[i].score_1 = temp_1 / len(A[i].down)
A[i].score_2 = temp_2 / len(A[i].down)
return A
def to_json(A):
O = {}
for i in range(len(A)):
O[i] = {
'up': A[i].up,
'down': A[i].down,
'layer': A[i].layer,
'state': A[i].state,
'nt': A[i].nt,
'board': A[i].board,
'score_1': A[i].score_1,
'score_2': A[i].score_2}
with open('assets//data.json', 'w') as outfile:
json.dump(O, outfile)
def to_df(A):
O = {
'up': [
0 for i in range(
len(A))], 'down': [
0 for i in range(
len(A))], 'layer': [
0 for i in range(
len(A))], 'state': [
0 for i in range(
len(A))], 'nt': [
0 for i in range(
len(A))], 'board': [
0 for i in range(
len(A))], 'score_1': [
0 for i in range(
len(A))], 'score_2': [
0 for i in range(
len(A))]}
for i in range(len(A)):
O['up'][i] = A[i].up
O['down'][i] = A[i].down
O['layer'][i] = A[i].layer
O['state'][i] = A[i].state
O['nt'][i] = A[i].nt
O['board'][i] = A[i].board
O['score_1'][i] = A[i].score_1
O['score_2'][i] = A[i].score_2
P = pd.DataFrame(data=O)
| pd.DataFrame.to_csv(P, 'assets//data.csv', encoding='utf-8') | pandas.DataFrame.to_csv |
'''
@Description: code
@Author: MiCi
@Date: 2020-03-13 17:17:47
@LastEditTime: 2020-03-14 08:47:08
@LastEditors: MiCi
'''
import pandas as pd
# import numpy as np
class Basic4(object):
def __init__(self):
return
def basic_use(self):
df1 = pd.DataFrame({'A': ['A0', 'A1', 'A2', 'A3'],
'B': ['B0', 'B1', 'B2', 'B3'],
'C': ['C0', 'C1', 'C2', 'C3'],
'D': ['D0', 'D1', 'D2', 'D3']},
index=[0, 1, 2, 3])
df2 = pd.DataFrame({'A': ['A4', 'A5', 'A6', 'A7'],
'B': ['B4', 'B5', 'B6', 'B7'],
'C': ['C4', 'C5', 'C6', 'C7'],
'D': ['D4', 'D5', 'D6', 'D7']},
index=[4, 5, 6, 7])
# append末尾追加数据
print(df1.append(df2))
# concat列后添加
print( | pd.concat([df1, df2], axis=1) | pandas.concat |
from collections import deque
from datetime import datetime
import operator
import re
import numpy as np
import pytest
import pytz
import pandas as pd
from pandas import DataFrame, MultiIndex, Series
import pandas._testing as tm
import pandas.core.common as com
from pandas.core.computation.expressions import _MIN_ELEMENTS, _NUMEXPR_INSTALLED
from pandas.tests.frame.common import _check_mixed_float, _check_mixed_int
# -------------------------------------------------------------------
# Comparisons
class TestFrameComparisons:
# Specifically _not_ flex-comparisons
def test_frame_in_list(self):
# GH#12689 this should raise at the DataFrame level, not blocks
df = pd.DataFrame(np.random.randn(6, 4), columns=list("ABCD"))
msg = "The truth value of a DataFrame is ambiguous"
with pytest.raises(ValueError, match=msg):
df in [None]
def test_comparison_invalid(self):
def check(df, df2):
for (x, y) in [(df, df2), (df2, df)]:
# we expect the result to match Series comparisons for
# == and !=, inequalities should raise
result = x == y
expected = pd.DataFrame(
{col: x[col] == y[col] for col in x.columns},
index=x.index,
columns=x.columns,
)
tm.assert_frame_equal(result, expected)
result = x != y
expected = pd.DataFrame(
{col: x[col] != y[col] for col in x.columns},
index=x.index,
columns=x.columns,
)
tm.assert_frame_equal(result, expected)
msgs = [
r"Invalid comparison between dtype=datetime64\[ns\] and ndarray",
"invalid type promotion",
(
# npdev 1.20.0
r"The DTypes <class 'numpy.dtype\[.*\]'> and "
r"<class 'numpy.dtype\[.*\]'> do not have a common DType."
),
]
msg = "|".join(msgs)
with pytest.raises(TypeError, match=msg):
x >= y
with pytest.raises(TypeError, match=msg):
x > y
with pytest.raises(TypeError, match=msg):
x < y
with pytest.raises(TypeError, match=msg):
x <= y
# GH4968
# invalid date/int comparisons
df = pd.DataFrame(np.random.randint(10, size=(10, 1)), columns=["a"])
df["dates"] = pd.date_range("20010101", periods=len(df))
df2 = df.copy()
df2["dates"] = df["a"]
check(df, df2)
df = pd.DataFrame(np.random.randint(10, size=(10, 2)), columns=["a", "b"])
df2 = pd.DataFrame(
{
"a": pd.date_range("20010101", periods=len(df)),
"b": pd.date_range("20100101", periods=len(df)),
}
)
check(df, df2)
def test_timestamp_compare(self):
# make sure we can compare Timestamps on the right AND left hand side
# GH#4982
df = pd.DataFrame(
{
"dates1": pd.date_range("20010101", periods=10),
"dates2": pd.date_range("20010102", periods=10),
"intcol": np.random.randint(1000000000, size=10),
"floatcol": np.random.randn(10),
"stringcol": list(tm.rands(10)),
}
)
df.loc[np.random.rand(len(df)) > 0.5, "dates2"] = pd.NaT
ops = {"gt": "lt", "lt": "gt", "ge": "le", "le": "ge", "eq": "eq", "ne": "ne"}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# no nats
if left in ["eq", "ne"]:
expected = left_f(df, pd.Timestamp("20010109"))
result = right_f(pd.Timestamp("20010109"), df)
tm.assert_frame_equal(result, expected)
else:
msg = (
"'(<|>)=?' not supported between "
"instances of 'numpy.ndarray' and 'Timestamp'"
)
with pytest.raises(TypeError, match=msg):
left_f(df, pd.Timestamp("20010109"))
with pytest.raises(TypeError, match=msg):
right_f(pd.Timestamp("20010109"), df)
# nats
expected = left_f(df, pd.Timestamp("nat"))
result = right_f(pd.Timestamp("nat"), df)
tm.assert_frame_equal(result, expected)
def test_mixed_comparison(self):
# GH#13128, GH#22163 != datetime64 vs non-dt64 should be False,
# not raise TypeError
# (this appears to be fixed before GH#22163, not sure when)
df = pd.DataFrame([["1989-08-01", 1], ["1989-08-01", 2]])
other = pd.DataFrame([["a", "b"], ["c", "d"]])
result = df == other
assert not result.any().any()
result = df != other
assert result.all().all()
def test_df_boolean_comparison_error(self):
# GH#4576, GH#22880
# comparing DataFrame against list/tuple with len(obj) matching
# len(df.columns) is supported as of GH#22800
df = pd.DataFrame(np.arange(6).reshape((3, 2)))
expected = pd.DataFrame([[False, False], [True, False], [False, False]])
result = df == (2, 2)
tm.assert_frame_equal(result, expected)
result = df == [2, 2]
tm.assert_frame_equal(result, expected)
def test_df_float_none_comparison(self):
df = pd.DataFrame(
np.random.randn(8, 3), index=range(8), columns=["A", "B", "C"]
)
result = df.__eq__(None)
assert not result.any().any()
def test_df_string_comparison(self):
df = pd.DataFrame([{"a": 1, "b": "foo"}, {"a": 2, "b": "bar"}])
mask_a = df.a > 1
tm.assert_frame_equal(df[mask_a], df.loc[1:1, :])
tm.assert_frame_equal(df[-mask_a], df.loc[0:0, :])
mask_b = df.b == "foo"
tm.assert_frame_equal(df[mask_b], df.loc[0:0, :])
tm.assert_frame_equal(df[-mask_b], df.loc[1:1, :])
class TestFrameFlexComparisons:
# TODO: test_bool_flex_frame needs a better name
def test_bool_flex_frame(self):
data = np.random.randn(5, 3)
other_data = np.random.randn(5, 3)
df = pd.DataFrame(data)
other = pd.DataFrame(other_data)
ndim_5 = np.ones(df.shape + (1, 3))
# Unaligned
def _check_unaligned_frame(meth, op, df, other):
part_o = other.loc[3:, 1:].copy()
rs = meth(part_o)
xp = op(df, part_o.reindex(index=df.index, columns=df.columns))
tm.assert_frame_equal(rs, xp)
# DataFrame
assert df.eq(df).values.all()
assert not df.ne(df).values.any()
for op in ["eq", "ne", "gt", "lt", "ge", "le"]:
f = getattr(df, op)
o = getattr(operator, op)
# No NAs
tm.assert_frame_equal(f(other), o(df, other))
_check_unaligned_frame(f, o, df, other)
# ndarray
tm.assert_frame_equal(f(other.values), o(df, other.values))
# scalar
tm.assert_frame_equal(f(0), o(df, 0))
# NAs
msg = "Unable to coerce to Series/DataFrame"
tm.assert_frame_equal(f(np.nan), o(df, np.nan))
with pytest.raises(ValueError, match=msg):
f(ndim_5)
# Series
def _test_seq(df, idx_ser, col_ser):
idx_eq = df.eq(idx_ser, axis=0)
col_eq = df.eq(col_ser)
idx_ne = df.ne(idx_ser, axis=0)
col_ne = df.ne(col_ser)
tm.assert_frame_equal(col_eq, df == pd.Series(col_ser))
tm.assert_frame_equal(col_eq, -col_ne)
tm.assert_frame_equal(idx_eq, -idx_ne)
tm.assert_frame_equal(idx_eq, df.T.eq(idx_ser).T)
tm.assert_frame_equal(col_eq, df.eq(list(col_ser)))
tm.assert_frame_equal(idx_eq, df.eq(pd.Series(idx_ser), axis=0))
tm.assert_frame_equal(idx_eq, df.eq(list(idx_ser), axis=0))
idx_gt = df.gt(idx_ser, axis=0)
col_gt = df.gt(col_ser)
idx_le = df.le(idx_ser, axis=0)
col_le = df.le(col_ser)
tm.assert_frame_equal(col_gt, df > pd.Series(col_ser))
tm.assert_frame_equal(col_gt, -col_le)
tm.assert_frame_equal(idx_gt, -idx_le)
tm.assert_frame_equal(idx_gt, df.T.gt(idx_ser).T)
idx_ge = df.ge(idx_ser, axis=0)
col_ge = df.ge(col_ser)
idx_lt = df.lt(idx_ser, axis=0)
col_lt = df.lt(col_ser)
tm.assert_frame_equal(col_ge, df >= pd.Series(col_ser))
tm.assert_frame_equal(col_ge, -col_lt)
tm.assert_frame_equal(idx_ge, -idx_lt)
tm.assert_frame_equal(idx_ge, df.T.ge(idx_ser).T)
idx_ser = pd.Series(np.random.randn(5))
col_ser = pd.Series(np.random.randn(3))
_test_seq(df, idx_ser, col_ser)
# list/tuple
_test_seq(df, idx_ser.values, col_ser.values)
# NA
df.loc[0, 0] = np.nan
rs = df.eq(df)
assert not rs.loc[0, 0]
rs = df.ne(df)
assert rs.loc[0, 0]
rs = df.gt(df)
assert not rs.loc[0, 0]
rs = df.lt(df)
assert not rs.loc[0, 0]
rs = df.ge(df)
assert not rs.loc[0, 0]
rs = df.le(df)
assert not rs.loc[0, 0]
def test_bool_flex_frame_complex_dtype(self):
# complex
arr = np.array([np.nan, 1, 6, np.nan])
arr2 = np.array([2j, np.nan, 7, None])
df = pd.DataFrame({"a": arr})
df2 = pd.DataFrame({"a": arr2})
msg = "|".join(
[
"'>' not supported between instances of '.*' and 'complex'",
r"unorderable types: .*complex\(\)", # PY35
]
)
with pytest.raises(TypeError, match=msg):
# inequalities are not well-defined for complex numbers
df.gt(df2)
with pytest.raises(TypeError, match=msg):
# regression test that we get the same behavior for Series
df["a"].gt(df2["a"])
with pytest.raises(TypeError, match=msg):
# Check that we match numpy behavior here
df.values > df2.values
rs = df.ne(df2)
assert rs.values.all()
arr3 = np.array([2j, np.nan, None])
df3 = pd.DataFrame({"a": arr3})
with pytest.raises(TypeError, match=msg):
# inequalities are not well-defined for complex numbers
df3.gt(2j)
with pytest.raises(TypeError, match=msg):
# regression test that we get the same behavior for Series
df3["a"].gt(2j)
with pytest.raises(TypeError, match=msg):
# Check that we match numpy behavior here
df3.values > 2j
def test_bool_flex_frame_object_dtype(self):
# corner, dtype=object
df1 = pd.DataFrame({"col": ["foo", np.nan, "bar"]})
df2 = pd.DataFrame({"col": ["foo", datetime.now(), "bar"]})
result = df1.ne(df2)
exp = pd.DataFrame({"col": [False, True, False]})
tm.assert_frame_equal(result, exp)
def test_flex_comparison_nat(self):
# GH 15697, GH 22163 df.eq(pd.NaT) should behave like df == pd.NaT,
# and _definitely_ not be NaN
df = pd.DataFrame([pd.NaT])
result = df == pd.NaT
# result.iloc[0, 0] is a np.bool_ object
assert result.iloc[0, 0].item() is False
result = df.eq(pd.NaT)
assert result.iloc[0, 0].item() is False
result = df != pd.NaT
assert result.iloc[0, 0].item() is True
result = df.ne(pd.NaT)
assert result.iloc[0, 0].item() is True
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_df_flex_cmp_constant_return_types(self, opname):
# GH 15077, non-empty DataFrame
df = pd.DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]})
const = 2
result = getattr(df, opname)(const).dtypes.value_counts()
tm.assert_series_equal(result, pd.Series([2], index=[np.dtype(bool)]))
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_df_flex_cmp_constant_return_types_empty(self, opname):
# GH 15077 empty DataFrame
df = pd.DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]})
const = 2
empty = df.iloc[:0]
result = getattr(empty, opname)(const).dtypes.value_counts()
tm.assert_series_equal(result, pd.Series([2], index=[np.dtype(bool)]))
def test_df_flex_cmp_ea_dtype_with_ndarray_series(self):
ii = pd.IntervalIndex.from_breaks([1, 2, 3])
df = pd.DataFrame({"A": ii, "B": ii})
ser = pd.Series([0, 0])
res = df.eq(ser, axis=0)
expected = pd.DataFrame({"A": [False, False], "B": [False, False]})
tm.assert_frame_equal(res, expected)
ser2 = pd.Series([1, 2], index=["A", "B"])
res2 = df.eq(ser2, axis=1)
tm.assert_frame_equal(res2, expected)
# -------------------------------------------------------------------
# Arithmetic
class TestFrameFlexArithmetic:
def test_floordiv_axis0(self):
# make sure we df.floordiv(ser, axis=0) matches column-wise result
arr = np.arange(3)
ser = pd.Series(arr)
df = pd.DataFrame({"A": ser, "B": ser})
result = df.floordiv(ser, axis=0)
expected = pd.DataFrame({col: df[col] // ser for col in df.columns})
tm.assert_frame_equal(result, expected)
result2 = df.floordiv(ser.values, axis=0)
tm.assert_frame_equal(result2, expected)
@pytest.mark.skipif(not _NUMEXPR_INSTALLED, reason="numexpr not installed")
@pytest.mark.parametrize("opname", ["floordiv", "pow"])
def test_floordiv_axis0_numexpr_path(self, opname):
# case that goes through numexpr and has to fall back to masked_arith_op
op = getattr(operator, opname)
arr = np.arange(_MIN_ELEMENTS + 100).reshape(_MIN_ELEMENTS // 100 + 1, -1) * 100
df = pd.DataFrame(arr)
df["C"] = 1.0
ser = df[0]
result = getattr(df, opname)(ser, axis=0)
expected = pd.DataFrame({col: op(df[col], ser) for col in df.columns})
tm.assert_frame_equal(result, expected)
result2 = getattr(df, opname)(ser.values, axis=0)
tm.assert_frame_equal(result2, expected)
def test_df_add_td64_columnwise(self):
# GH 22534 Check that column-wise addition broadcasts correctly
dti = pd.date_range("2016-01-01", periods=10)
tdi = pd.timedelta_range("1", periods=10)
tser = pd.Series(tdi)
df = pd.DataFrame({0: dti, 1: tdi})
result = df.add(tser, axis=0)
expected = pd.DataFrame({0: dti + tdi, 1: tdi + tdi})
tm.assert_frame_equal(result, expected)
def test_df_add_flex_filled_mixed_dtypes(self):
# GH 19611
dti = pd.date_range("2016-01-01", periods=3)
ser = pd.Series(["1 Day", "NaT", "2 Days"], dtype="timedelta64[ns]")
df = pd.DataFrame({"A": dti, "B": ser})
other = pd.DataFrame({"A": ser, "B": ser})
fill = pd.Timedelta(days=1).to_timedelta64()
result = df.add(other, fill_value=fill)
expected = pd.DataFrame(
{
"A": pd.Series(
["2016-01-02", "2016-01-03", "2016-01-05"], dtype="datetime64[ns]"
),
"B": ser * 2,
}
)
tm.assert_frame_equal(result, expected)
def test_arith_flex_frame(
self, all_arithmetic_operators, float_frame, mixed_float_frame
):
# one instance of parametrized fixture
op = all_arithmetic_operators
def f(x, y):
# r-versions not in operator-stdlib; get op without "r" and invert
if op.startswith("__r"):
return getattr(operator, op.replace("__r", "__"))(y, x)
return getattr(operator, op)(x, y)
result = getattr(float_frame, op)(2 * float_frame)
expected = f(float_frame, 2 * float_frame)
tm.assert_frame_equal(result, expected)
# vs mix float
result = getattr(mixed_float_frame, op)(2 * mixed_float_frame)
expected = f(mixed_float_frame, 2 * mixed_float_frame)
tm.assert_frame_equal(result, expected)
_check_mixed_float(result, dtype=dict(C=None))
@pytest.mark.parametrize("op", ["__add__", "__sub__", "__mul__"])
def test_arith_flex_frame_mixed(
self, op, int_frame, mixed_int_frame, mixed_float_frame
):
f = getattr(operator, op)
# vs mix int
result = getattr(mixed_int_frame, op)(2 + mixed_int_frame)
expected = f(mixed_int_frame, 2 + mixed_int_frame)
# no overflow in the uint
dtype = None
if op in ["__sub__"]:
dtype = dict(B="uint64", C=None)
elif op in ["__add__", "__mul__"]:
dtype = dict(C=None)
tm.assert_frame_equal(result, expected)
_check_mixed_int(result, dtype=dtype)
# vs mix float
result = getattr(mixed_float_frame, op)(2 * mixed_float_frame)
expected = f(mixed_float_frame, 2 * mixed_float_frame)
tm.assert_frame_equal(result, expected)
_check_mixed_float(result, dtype=dict(C=None))
# vs plain int
result = getattr(int_frame, op)(2 * int_frame)
expected = f(int_frame, 2 * int_frame)
tm.assert_frame_equal(result, expected)
def test_arith_flex_frame_raise(self, all_arithmetic_operators, float_frame):
# one instance of parametrized fixture
op = all_arithmetic_operators
# Check that arrays with dim >= 3 raise
for dim in range(3, 6):
arr = np.ones((1,) * dim)
msg = "Unable to coerce to Series/DataFrame"
with pytest.raises(ValueError, match=msg):
getattr(float_frame, op)(arr)
def test_arith_flex_frame_corner(self, float_frame):
const_add = float_frame.add(1)
tm.assert_frame_equal(const_add, float_frame + 1)
# corner cases
result = float_frame.add(float_frame[:0])
tm.assert_frame_equal(result, float_frame * np.nan)
result = float_frame[:0].add(float_frame)
tm.assert_frame_equal(result, float_frame * np.nan)
with pytest.raises(NotImplementedError, match="fill_value"):
float_frame.add(float_frame.iloc[0], fill_value=3)
with pytest.raises(NotImplementedError, match="fill_value"):
float_frame.add(float_frame.iloc[0], axis="index", fill_value=3)
def test_arith_flex_series(self, simple_frame):
df = simple_frame
row = df.xs("a")
col = df["two"]
# after arithmetic refactor, add truediv here
ops = ["add", "sub", "mul", "mod"]
for op in ops:
f = getattr(df, op)
op = getattr(operator, op)
tm.assert_frame_equal(f(row), op(df, row))
tm.assert_frame_equal(f(col, axis=0), op(df.T, col).T)
# special case for some reason
tm.assert_frame_equal(df.add(row, axis=None), df + row)
# cases which will be refactored after big arithmetic refactor
tm.assert_frame_equal(df.div(row), df / row)
tm.assert_frame_equal(df.div(col, axis=0), (df.T / col).T)
# broadcasting issue in GH 7325
df = pd.DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype="int64")
expected = pd.DataFrame([[np.nan, np.inf], [1.0, 1.5], [1.0, 1.25]])
result = df.div(df[0], axis="index")
tm.assert_frame_equal(result, expected)
df = pd.DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype="float64")
expected = pd.DataFrame([[np.nan, np.inf], [1.0, 1.5], [1.0, 1.25]])
result = df.div(df[0], axis="index")
tm.assert_frame_equal(result, expected)
def test_arith_flex_zero_len_raises(self):
# GH 19522 passing fill_value to frame flex arith methods should
# raise even in the zero-length special cases
ser_len0 = pd.Series([], dtype=object)
df_len0 = pd.DataFrame(columns=["A", "B"])
df = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
with pytest.raises(NotImplementedError, match="fill_value"):
df.add(ser_len0, fill_value="E")
with pytest.raises(NotImplementedError, match="fill_value"):
df_len0.sub(df["A"], axis=None, fill_value=3)
def test_flex_add_scalar_fill_value(self):
# GH#12723
dat = np.array([0, 1, np.nan, 3, 4, 5], dtype="float")
df = pd.DataFrame({"foo": dat}, index=range(6))
exp = df.fillna(0).add(2)
res = df.add(2, fill_value=0)
tm.assert_frame_equal(res, exp)
class TestFrameArithmetic:
def test_td64_op_nat_casting(self):
# Make sure we don't accidentally treat timedelta64(NaT) as datetime64
# when calling dispatch_to_series in DataFrame arithmetic
ser = pd.Series(["NaT", "NaT"], dtype="timedelta64[ns]")
df = pd.DataFrame([[1, 2], [3, 4]])
result = df * ser
expected = pd.DataFrame({0: ser, 1: ser})
tm.assert_frame_equal(result, expected)
def test_df_add_2d_array_rowlike_broadcasts(self):
# GH#23000
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
rowlike = arr[[1], :] # shape --> (1, ncols)
assert rowlike.shape == (1, df.shape[1])
expected = pd.DataFrame(
[[2, 4], [4, 6], [6, 8]],
columns=df.columns,
index=df.index,
# specify dtype explicitly to avoid failing
# on 32bit builds
dtype=arr.dtype,
)
result = df + rowlike
tm.assert_frame_equal(result, expected)
result = rowlike + df
tm.assert_frame_equal(result, expected)
def test_df_add_2d_array_collike_broadcasts(self):
# GH#23000
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
collike = arr[:, [1]] # shape --> (nrows, 1)
assert collike.shape == (df.shape[0], 1)
expected = pd.DataFrame(
[[1, 2], [5, 6], [9, 10]],
columns=df.columns,
index=df.index,
# specify dtype explicitly to avoid failing
# on 32bit builds
dtype=arr.dtype,
)
result = df + collike
tm.assert_frame_equal(result, expected)
result = collike + df
tm.assert_frame_equal(result, expected)
def test_df_arith_2d_array_rowlike_broadcasts(self, all_arithmetic_operators):
# GH#23000
opname = all_arithmetic_operators
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
rowlike = arr[[1], :] # shape --> (1, ncols)
assert rowlike.shape == (1, df.shape[1])
exvals = [
getattr(df.loc["A"], opname)(rowlike.squeeze()),
getattr(df.loc["B"], opname)(rowlike.squeeze()),
getattr(df.loc["C"], opname)(rowlike.squeeze()),
]
expected = pd.DataFrame(exvals, columns=df.columns, index=df.index)
result = getattr(df, opname)(rowlike)
tm.assert_frame_equal(result, expected)
def test_df_arith_2d_array_collike_broadcasts(self, all_arithmetic_operators):
# GH#23000
opname = all_arithmetic_operators
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
collike = arr[:, [1]] # shape --> (nrows, 1)
assert collike.shape == (df.shape[0], 1)
exvals = {
True: getattr(df[True], opname)(collike.squeeze()),
False: getattr(df[False], opname)(collike.squeeze()),
}
dtype = None
if opname in ["__rmod__", "__rfloordiv__"]:
# Series ops may return mixed int/float dtypes in cases where
# DataFrame op will return all-float. So we upcast `expected`
dtype = np.common_type(*[x.values for x in exvals.values()])
expected = pd.DataFrame(exvals, columns=df.columns, index=df.index, dtype=dtype)
result = getattr(df, opname)(collike)
tm.assert_frame_equal(result, expected)
def test_df_bool_mul_int(self):
# GH 22047, GH 22163 multiplication by 1 should result in int dtype,
# not object dtype
df = pd.DataFrame([[False, True], [False, False]])
result = df * 1
# On appveyor this comes back as np.int32 instead of np.int64,
# so we check dtype.kind instead of just dtype
kinds = result.dtypes.apply(lambda x: x.kind)
assert (kinds == "i").all()
result = 1 * df
kinds = result.dtypes.apply(lambda x: x.kind)
assert (kinds == "i").all()
def test_arith_mixed(self):
left = pd.DataFrame({"A": ["a", "b", "c"], "B": [1, 2, 3]})
result = left + left
expected = pd.DataFrame({"A": ["aa", "bb", "cc"], "B": [2, 4, 6]})
tm.assert_frame_equal(result, expected)
def test_arith_getitem_commute(self):
df = pd.DataFrame({"A": [1.1, 3.3], "B": [2.5, -3.9]})
def _test_op(df, op):
result = op(df, 1)
if not df.columns.is_unique:
raise ValueError("Only unique columns supported by this test")
for col in result.columns:
tm.assert_series_equal(result[col], op(df[col], 1))
_test_op(df, operator.add)
_test_op(df, operator.sub)
_test_op(df, operator.mul)
_test_op(df, operator.truediv)
_test_op(df, operator.floordiv)
_test_op(df, operator.pow)
_test_op(df, lambda x, y: y + x)
_test_op(df, lambda x, y: y - x)
_test_op(df, lambda x, y: y * x)
_test_op(df, lambda x, y: y / x)
_test_op(df, lambda x, y: y ** x)
_test_op(df, lambda x, y: x + y)
_test_op(df, lambda x, y: x - y)
_test_op(df, lambda x, y: x * y)
_test_op(df, lambda x, y: x / y)
_test_op(df, lambda x, y: x ** y)
@pytest.mark.parametrize(
"values", [[1, 2], (1, 2), np.array([1, 2]), range(1, 3), deque([1, 2])]
)
def test_arith_alignment_non_pandas_object(self, values):
# GH#17901
df = pd.DataFrame({"A": [1, 1], "B": [1, 1]})
expected = pd.DataFrame({"A": [2, 2], "B": [3, 3]})
result = df + values
tm.assert_frame_equal(result, expected)
def test_arith_non_pandas_object(self):
df = pd.DataFrame(
np.arange(1, 10, dtype="f8").reshape(3, 3),
columns=["one", "two", "three"],
index=["a", "b", "c"],
)
val1 = df.xs("a").values
added = pd.DataFrame(df.values + val1, index=df.index, columns=df.columns)
tm.assert_frame_equal(df + val1, added)
added = | pd.DataFrame((df.values.T + val1).T, index=df.index, columns=df.columns) | pandas.DataFrame |
import pandas as pd
import matplotlib
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
import numpy as np
import seaborn as sns; sns.set()
import csv
from scipy.stats import ranksums
"""
Load data song data
"""
# load in song data
data_path = "C:/Users/abiga/Box " \
"Sync/Abigail_Nicole/ChippiesTimeOfDay" \
"/FinalChippiesDataReExportedAs44100Hz_LogTransformed_forTOD.csv"
log_song_data = pd.DataFrame.from_csv(data_path, header=0, index_col=None)
col_to_skip = ['Latitude', 'Longitude', 'RecordingDay',
'RecordingMonth', 'RecordingYear', 'RecordingTime',
'RecordingTimeSeconds']
data_subset = log_song_data.drop(col_to_skip, axis=1)
# load in time data --> before or after sunrise, twilights, and noon (only going to use sunrise and noon)
data_path = "C:/Users/abiga/Box " \
"Sync/Abigail_Nicole/ChippiesTimeOfDay" \
"/FinalChippiesDataReExportedAs44100Hz_LogTransformed" \
"_forTOD_SunriseTwilightNoon.csv"
time_data = pd.DataFrame.from_csv(data_path, header=0, index_col=None)
# must remove duplicates -- have more than one bird from same recording -- duplicate catalog number and time data
time_data = time_data.drop_duplicates()
# combine tables using catalog no
combined_df = | pd.merge(data_subset, time_data, on='CatalogNo') | pandas.merge |
import copy
import itertools
import multiprocessing
import string
import traceback
import warnings
from multiprocessing import Pool
from operator import itemgetter
import jellyfish as jf
import numpy as np
import pandas as pd
from scipy.optimize import linear_sum_assignment
from scipy.stats import wasserstein_distance
from simod.configuration import Configuration, Metric
from . import alpha_oracle as ao
from .alpha_oracle import Rel
from ..support_utils import progress_bar_async
class SimilarityEvaluator:
"""Evaluates the similarity of two event-logs."""
def __init__(self, log_data: pd.DataFrame, simulation_data: pd.DataFrame, settings: Configuration, max_cases=500,
dtype='log'):
self.dtype = dtype
self.log_data = copy.deepcopy(log_data)
self.simulation_data = copy.deepcopy(simulation_data)
self.max_cases = max_cases
self.one_timestamp = settings.read_options.one_timestamp
self._preprocess_data(dtype)
def _preprocess_data(self, dtype):
preprocessor = self._get_preprocessor(dtype)
return preprocessor()
def _get_preprocessor(self, dtype):
if dtype == 'log':
return self._preprocess_log
elif dtype == 'serie':
return self._preprocess_serie
else:
raise ValueError(dtype)
def _preprocess_log(self):
self.ramp_io_perc = 0.2
self.log_data['source'] = 'log'
self.simulation_data['source'] = 'simulation'
data = pd.concat([self.log_data, self.simulation_data], axis=0, ignore_index=True)
if (('processing_time' not in data.columns) or ('waiting_time' not in data.columns)):
data = self.calculate_times(data)
data = self.scaling_data(data)
# save data
self.log_data = data[data.source == 'log']
self.simulation_data = data[data.source == 'simulation']
self.alias = self.create_task_alias(data, 'task')
self.alpha_concurrency = ao.AlphaOracle(self.log_data, self.alias, self.one_timestamp, True)
# reformat and sampling data
self.log_data = self.reformat_events(self.log_data.to_dict('records'), 'task')
self.simulation_data = self.reformat_events(self.simulation_data.to_dict('records'), 'task')
num_traces = int(len(self.simulation_data) * self.ramp_io_perc)
self.simulation_data = self.simulation_data[num_traces:-num_traces]
self.log_data = list(map(lambda i: self.log_data[i],
np.random.randint(0, len(self.log_data), len(self.simulation_data))))
def _preprocess_serie(self):
# load data
self.log_data['source'] = 'log'
self.simulation_data['source'] = 'simulation'
def measure_distance(self, metric: Metric, verbose=False):
"""
Measures the distance of two event-logs
with with tsd or dl and mae distance
Returns
-------
distance : float
"""
self.verbose = verbose
# similarity measurement and matching
evaluator = self._get_evaluator(metric)
if metric in [Metric.DAY_EMD, Metric.DAY_HOUR_EMD, Metric.CAL_EMD]:
distance = evaluator(self.log_data, self.simulation_data, criteria=metric)
else:
distance = evaluator(self.log_data, self.simulation_data, metric)
self.similarity = {'metric': metric, 'sim_val': np.mean([x['sim_score'] for x in distance])}
def _get_evaluator(self, metric: Metric):
if self.dtype == 'log':
if metric in [Metric.TSD, Metric.DL, Metric.MAE, Metric.DL_MAE]:
return self._evaluate_seq_distance
elif metric is Metric.LOG_MAE:
return self.log_mae_metric
elif metric in [Metric.HOUR_EMD, Metric.DAY_EMD, Metric.DAY_HOUR_EMD, Metric.CAL_EMD]:
return self.log_emd_metric
else:
raise ValueError(metric)
elif self.dtype == 'serie':
if metric in [Metric.HOUR_EMD, Metric.DAY_EMD, Metric.DAY_HOUR_EMD, Metric.CAL_EMD]:
return self.serie_emd_metric
else:
raise ValueError(metric)
else:
raise ValueError(self.dtype)
# =============================================================================
# Timed string distance
# =============================================================================
def _evaluate_seq_distance(self, log_data, simulation_data, metric: Metric):
"""
Timed string distance calculation
Parameters
----------
log_data : Ground truth list
simulation_data : List
Returns
-------
similarity : tsd similarity
"""
similarity = list()
# define the type of processing sequencial or parallel
cases = len(set([x['caseid'] for x in log_data]))
if cases <= self.max_cases:
args = (metric, simulation_data, log_data,
self.alpha_concurrency.oracle,
({'min': 0, 'max': len(simulation_data)},
{'min': 0, 'max': len(log_data)}))
df_matrix = self._compare_traces(args)
else:
cpu_count = multiprocessing.cpu_count()
mx_len = len(log_data)
ranges = self.define_ranges(mx_len, int(np.ceil(cpu_count / 2)))
ranges = list(itertools.product(*[ranges, ranges]))
reps = len(ranges)
pool = Pool(processes=cpu_count)
# Generate
args = [(metric, simulation_data[r[0]['min']:r[0]['max']],
log_data[r[1]['min']:r[1]['max']],
self.alpha_concurrency.oracle,
r) for r in ranges]
p = pool.map_async(self._compare_traces, args)
if self.verbose:
progress_bar_async(p, f'evaluating {metric}:', reps)
pool.close()
# Save results
df_matrix = pd.concat(list(p.get()), axis=0, ignore_index=True)
df_matrix.sort_values(by=['i', 'j'], inplace=True)
df_matrix = df_matrix.reset_index().set_index(['i', 'j'])
if metric == Metric.DL_MAE:
dl_matrix = df_matrix[['dl_distance']].unstack().to_numpy()
mae_matrix = df_matrix[['mae_distance']].unstack().to_numpy()
# MAE normalized
max_mae = mae_matrix.max()
mae_matrix = np.divide(mae_matrix, max_mae)
# multiple both matrixes by Beta equal to 0.5
dl_matrix = np.multiply(dl_matrix, 0.5)
mae_matrix = np.multiply(mae_matrix, 0.5)
# add each point in between
cost_matrix = np.add(dl_matrix, mae_matrix)
else:
cost_matrix = df_matrix[['distance']].unstack().to_numpy()
row_ind, col_ind = linear_sum_assignment(np.array(cost_matrix))
# Create response
for idx, idy in zip(row_ind, col_ind):
similarity.append(dict(caseid=simulation_data[idx]['caseid'],
sim_order=simulation_data[idx]['profile'],
log_order=log_data[idy]['profile'],
sim_score=(cost_matrix[idx][idy]
if metric == Metric.MAE else
(1 - (cost_matrix[idx][idy])))
)
)
return similarity
@staticmethod
def _compare_traces(args):
def ae_distance(et_1, et_2, st_1, st_2):
cicle_time_s1 = (et_1 - st_1).total_seconds()
cicle_time_s2 = (et_2 - st_2).total_seconds()
ae = np.abs(cicle_time_s1 - cicle_time_s2)
return ae
def tsd_alpha(s_1, s_2, p_1, p_2, w_1, w_2, alpha_concurrency):
"""
Compute the Damerau-Levenshtein distance between two given
strings (s_1 and s_2)
Parameters
----------
comp_sec : dict
alpha_concurrency : dict
Returns
-------
Float
"""
def calculate_cost(s1_idx, s2_idx):
t_1 = p_1[s1_idx] + w_1[s1_idx]
if t_1 > 0:
b_1 = (p_1[s1_idx] / t_1)
cost = ((b_1 * np.abs(p_2[s2_idx] - p_1[s1_idx])) +
((1 - b_1) * np.abs(w_2[s2_idx] - w_1[s1_idx])))
else:
cost = 0
return cost
dist = {}
lenstr1 = len(s_1)
lenstr2 = len(s_2)
for i in range(-1, lenstr1 + 1):
dist[(i, -1)] = i + 1
for j in range(-1, lenstr2 + 1):
dist[(-1, j)] = j + 1
for i in range(0, lenstr1):
for j in range(0, lenstr2):
if s_1[i] == s_2[j]:
cost = calculate_cost(i, j)
else:
cost = 1
dist[(i, j)] = min(
dist[(i - 1, j)] + 1, # deletion
dist[(i, j - 1)] + 1, # insertion
dist[(i - 1, j - 1)] + cost # substitution
)
if i and j and s_1[i] == s_2[j - 1] and s_1[i - 1] == s_2[j]:
if alpha_concurrency[(s_1[i], s_2[j])] == Rel.PARALLEL:
cost = calculate_cost(i, j - 1)
dist[(i, j)] = min(dist[(i, j)], dist[i - 2, j - 2] + cost) # transposition
return dist[lenstr1 - 1, lenstr2 - 1]
def gen(metric: Metric, serie1, serie2, oracle, r):
"""Reads the simulation results stats"""
try:
df_matrix = list()
for i, s1_ele in enumerate(serie1):
for j, s2_ele in enumerate(serie2):
element = {'i': r[0]['min'] + i, 'j': r[1]['min'] + j}
if metric in [Metric.TSD, Metric.DL, Metric.DL_MAE]:
element['s_1'] = s1_ele['profile']
element['s_2'] = s2_ele['profile']
element['length'] = max(len(s1_ele['profile']), len(s2_ele['profile']))
if metric is Metric.TSD:
element['p_1'] = s1_ele['proc_act_norm']
element['p_2'] = s2_ele['proc_act_norm']
element['w_1'] = s1_ele['wait_act_norm']
element['w_2'] = s2_ele['wait_act_norm']
if metric in [Metric.MAE, Metric.DL_MAE]:
element['et_1'] = s1_ele['end_time']
element['et_2'] = s2_ele['end_time']
element['st_1'] = s1_ele['start_time']
element['st_2'] = s2_ele['start_time']
df_matrix.append(element)
df_matrix = pd.DataFrame(df_matrix)
if metric is Metric.TSD:
df_matrix['distance'] = df_matrix.apply(
lambda x: tsd_alpha(x.s_1, x.s_2, x.p_1, x.p_2, x.w_1, x.w_2, oracle) / x.length, axis=1)
elif metric is Metric.DL:
df_matrix['distance'] = df_matrix.apply(
lambda x: jf.damerau_levenshtein_distance(''.join(x.s_1), ''.join(x.s_2)) / x.length, axis=1)
elif metric is Metric.MAE:
df_matrix['distance'] = df_matrix.apply(
lambda x: ae_distance(x.et_1, x.et_2, x.st_1, x.st_2), axis=1)
elif metric is Metric.DL_MAE:
df_matrix['dl_distance'] = df_matrix.apply(
lambda x: jf.damerau_levenshtein_distance(''.join(x.s_1), ''.join(x.s_2)) / x.length, axis=1)
df_matrix['mae_distance'] = df_matrix.apply(
lambda x: ae_distance(x.et_1, x.et_2, x.st_1, x.st_2), axis=1)
else:
raise ValueError(metric)
return df_matrix
except Exception:
traceback.print_exc()
return gen(*args)
# =============================================================================
# whole log MAE
# =============================================================================
def log_mae_metric(self, log_data: list, simulation_data: list, metric: Metric) -> list:
"""
Measures the MAE distance between two whole logs
Parameters
----------
log_data : list
simulation_data : list
Returns
-------
list
"""
similarity = list()
log_data = pd.DataFrame(log_data)
simulation_data = pd.DataFrame(simulation_data)
log_timelapse = (log_data.end_time.max() - log_data.start_time.min()).total_seconds()
sim_timelapse = (simulation_data.end_time.max() - simulation_data.start_time.min()).total_seconds()
similarity.append({'sim_score': np.abs(sim_timelapse - log_timelapse)})
return similarity
# =============================================================================
# Log emd distance
# =============================================================================
def log_emd_metric(self, log_data: list, simulation_data: list, criteria: Metric = Metric.HOUR_EMD) -> list:
"""
Measures the EMD distance between two logs on different aggregation
levels specified by user by defaul per hour
Parameters
----------
log_data : list
simulation_data : list
criteria : TYPE, optional
DESCRIPTION. The default is 'hour'.
Returns
-------
list
"""
similarity = list()
window = 1
# hist_range = [0, int((window * 3600))]
log_data = pd.DataFrame(log_data)
simulation_data = pd.DataFrame(simulation_data)
def split_date_time(dataframe, feature, source):
day_hour = lambda x: x[feature].hour
dataframe['hour'] = dataframe.apply(day_hour, axis=1)
date = lambda x: x[feature].date()
dataframe['date'] = dataframe.apply(date, axis=1)
# create time windows
i = 0
daily_windows = dict()
for hour in range(24):
if hour % window == 0:
i += 1
daily_windows[hour] = i
dataframe = dataframe.merge(
| pd.DataFrame.from_dict(daily_windows, orient='index') | pandas.DataFrame.from_dict |
import tensorflow as tf
from tensorflow.contrib.tensorboard.plugins import projector
import argparse, sys, glob, os
import pandas as pd
import numpy as np
from PIL import Image
from tensorflow.python.ops import data_flow_ops
import validate_on_lfw
import lfw
import facenet
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--schema_dir', type=str,
help='Directory with schema.',
default='../datasets/main_dataset/Style Sku Family.csv')
parser.add_argument('--model', type=str,
help='Directory with preprocessed data.',
default='../models/20210407-203650')
parser.add_argument('--log', type=str,
help='Directory with preprocessed data.',
default='../logs/20210407-203650')
parser.add_argument('--data_dir', type=str,
help='Directory with data.',
default='../datasets/raw_pangrams/')
parser.add_argument('--write_dir', type=str,
help='Directory with data.',
default='../datasets/main_dataset/')
parser.add_argument('--train_dir', type=str,
help='Directory with data.',
default='../datasets/crop7_train')
parser.add_argument('--test_dir', type=str,
help='Directory with data.',
default='../datasets/crop7_test')
return parser.parse_args(argv)
def evaluate(sess, enqueue_op, image_paths_placeholder, labels_placeholder, phase_train_placeholder, batch_size_placeholder, control_placeholder,
embeddings, labels, image_paths, batch_size):
# Run forward pass to calculate embeddings
print('Runnning forward pass on LFW images')
# Enqueue one epoch of image paths and labels
nrof_images = len(image_paths)
labels_array = np.expand_dims(np.arange(0,nrof_images),1)
image_paths_array = np.expand_dims(np.repeat(np.array(image_paths),1),1)
control_array = np.zeros_like(labels_array, np.int32)
control_array += np.ones_like(labels_array)*facenet.FIXED_STANDARDIZATION
sess.run(enqueue_op, {image_paths_placeholder: image_paths_array, labels_placeholder: labels_array, control_placeholder: control_array})
embedding_size = int(embeddings.get_shape()[1])
assert nrof_images % batch_size == 0, 'The number of LFW images must be an integer multiple of the LFW batch size'
nrof_batches = nrof_images // batch_size
emb_array = np.zeros((nrof_images, embedding_size))
lab_array = np.zeros((nrof_images,))
for i in range(nrof_batches):
feed_dict = {phase_train_placeholder:False, batch_size_placeholder:batch_size}
emb, lab = sess.run([embeddings, labels], feed_dict=feed_dict)
lab_array[lab] = lab
emb_array[lab, :] = emb
if i % 10 == 9:
#print('.', end='')
sys.stdout.flush()
print('')
embeddings = np.zeros((nrof_images, embedding_size))
embeddings = emb_array
return embeddings
def get_embeddings(image_paths, model, batch_size):
embeddings = None
with tf.Graph().as_default():
with tf.Session() as sess:
paths = image_paths
actual_issame = None
image_paths_placeholder = tf.placeholder(tf.string, shape=(None,1), name='image_paths')
labels_placeholder = tf.placeholder(tf.int32, shape=(None,1), name='labels')
batch_size_placeholder = tf.placeholder(tf.int32, name='batch_size')
control_placeholder = tf.placeholder(tf.int32, shape=(None,1), name='control')
phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
nrof_preprocess_threads = 4
image_size = (100, 100)
eval_input_queue = data_flow_ops.FIFOQueue(capacity=2000000,
dtypes=[tf.string, tf.int32, tf.int32],
shapes=[(1,), (1,), (1,)],
shared_name=None, name=None)
eval_enqueue_op = eval_input_queue.enqueue_many([image_paths_placeholder, labels_placeholder, control_placeholder], name='eval_enqueue_op')
image_batch, label_batch = facenet.create_input_pipeline(eval_input_queue, image_size, nrof_preprocess_threads, batch_size_placeholder)
# Load the model
input_map = {'image_batch': image_batch, 'label_batch': label_batch, 'phase_train': phase_train_placeholder}
facenet.load_model(model, input_map=input_map)
# Get output tensor
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
coord = tf.train.Coordinator()
tf.train.start_queue_runners(coord=coord, sess=sess)
#for the first round save the input map, so that we can visualize images of positives
embeddings = evaluate(sess, eval_enqueue_op, image_paths_placeholder, labels_placeholder, phase_train_placeholder, batch_size_placeholder, control_placeholder,
embeddings, label_batch, paths, batch_size)
return embeddings
def find_images(args):
schema = | pd.read_csv(args.schema_dir) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 24 18:15:35 2022
Used for plottinf future H2 scenarios for Section 3.4
@author: <NAME>
"""
# Standard Library imports
import argparse
import gzip
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import netCDF4
import numpy as np
import os
import pandas as pd
import sys
import xarray as xr
import csv
import random
import matplotlib.cm as cm
import scipy.linalg
import scipy.stats
from scipy.stats import pearsonr
from numpy import genfromtxt
# Third party imports
from collections import OrderedDict
from datetime import datetime
# Semi-local imports
import name_qch4_couple.io
import name_qch4_couple.name
import name_qch4_couple.plot_h2
# Local imports
import chem_co
# Plots
date = '2018-04'
# Dates
dates_tHour = pd.date_range(
pd.to_datetime(date),
pd.to_datetime(date) + pd.DateOffset(months=12),
closed='left',
freq='1H'
)
# import scenarios
mod_0, sigma_obs_H2 = chem_co.read_obs(dates_tHour, 'mod', 0)
mod_4, sigma_obs_H2 = chem_co.read_obs(dates_tHour, 'mod', 0.035)
mod_8, sigma_obs_H2 = chem_co.read_obs(dates_tHour, 'mod', 0.165)
mod_12, sigma_obs_H2 = chem_co.read_obs(dates_tHour, 'mod', 0.241)
# import modelled 'baselines'
bas_mhd, sigma_obs_H2 = chem_co.read_obs(dates_tHour, 'bas_mhd', 0)
bas_wao, sigma_obs_H2 = chem_co.read_obs(dates_tHour, 'bas_wao', 0)
# plot H2 concentraion scenarios
fig = {}
ax = {}
fig_param = {
'w': 10, 'h': 3,
'px0': 0.80, 'py0': 0.50,
'pw': 9.15, 'ph': 2.45,
'ylblx': 0.05, 'ylbly': 1.5, # left, centre aligned
'fontsize': 6,
}
plt.close('all')
# Concentration
fig['main'] = plt.figure(figsize=(fig_param['w'], fig_param['h']), dpi=300)
for i in ['H2']:
fig['main'].clf()
ax['main'] = name_qch4_couple.plot_h2.generic(
fig=fig['main'],
idata={
'bas_mhd': [
'fill',
[dates_tHour, np.array(bas_mhd), np.array(bas_wao)],
{ 'facecolor': '#9D9D9D', 'lw': 0.5, 'label': '', 'ls':'-'}
],
'mod13': [
'line',
[dates_tHour, np.array(mod_12), '-'],
{'c': '#d73027', 'lw': 0.5, 'label': ''}
],
'mod9': [
'line',
[dates_tHour, np.array(mod_8), '-'],
{'c': '#fc8d59', 'lw': 0.5, 'label': ''}
],
'mod5': [
'line',
[dates_tHour, np.array(mod_4), '-'],
{'c': '#91bfdb', 'lw': 0.5, 'label': ''}
],
# 'mod1': [
# 'line',
# [dates_tHour, np.array(mod_0), '--'],
# {'c': '#fee090', 'lw': 0.5, 'label': ''}
# ],
},
texts=[
{
'x': fig_param['ylblx'] / fig_param['w'],
'y': fig_param['ylbly'] / fig_param['h'],
's': (
u'$\chi$ H$_{2}$ (nmol mol$^{-1}$)'
),
'ha': 'left', 'va': 'center',
'size': fig_param['fontsize'], 'rotation': 90
}
],
xlim=[
pd.to_datetime(date),
| pd.to_datetime(date) | pandas.to_datetime |
#! /usr/bin/env python
from __future__ import print_function
import pandas as pd
import numpy as np
import argparse
def generate_csv(num_rows, num_cols, num_distinct_vals, fname):
cols = [str('A' + str(i)) for i in range(num_cols)]
data = []
if type(num_distinct_vals) is list or type(num_distinct_vals) is tuple:
for i in range(num_rows):
vals = [i] # first column is the primary key
vals += list(
np.random.choice(num_distinct_vals[j])
for j in range(0, num_cols - 1))
data.append(vals)
else:
for i in range(num_rows):
vals = (np.random.choice(num_distinct_vals)
for j in range(num_cols))
data.append(vals)
df = | pd.DataFrame(data=data, columns=cols) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: <NAME>
NHK COVID-19 Dataset
Data link: https://www3.nhk.or.jp/n-data/opendata/coronavirus/nhk_news_covid19_prefectures_daily_data.csv
Q: How it works?
A: It gets NHK COVID-19 dataset automatically and saves as working csv, then plots them.
Q: How to use this file?
A: Input to choose which column of data.
And input to choose the prefecture(s) to plot.
"""
#import numpy as np
import pandas as pd
import matplotlib.dates as mdates
import matplotlib.ticker as ticker
import matplotlib.pyplot as plt
import io
import requests
# NHK COVID-19 Dataset.
url="https://www3.nhk.or.jp/n-data/opendata/coronavirus/nhk_news_covid19_prefectures_daily_data.csv"
s=requests.get(url).content
data_all=pd.read_csv(io.StringIO(s.decode('utf-8')))
df0 = data_all.rename(columns = {'日付': 'Date',
'都道府県コード': 'Prefecture_ID',
'都道府県名':'Name',
'各地の感染者数_1日ごとの発表数':'Daily_Confirmed_Cases',
'各地の感染者数_累計':'Daily_Confirmed_Cases_Total',
'各地の死者数_1日ごとの発表数':'Daily_Death',
'各地の死者数_累計':'Death_Total',
'各地の直近1週間の人口10万人あたりの感染者数':'Death_Ratio_of_Population(100k)'}, inplace = False)
# Set data-filter.
# INPUTS START.
Chosen_Col = '6' # in Col_Dict
Chosen_Prefecture_ID = [1,13,47] # Cho0se 3 in Prefecture_Dict
# INPUTS END.
# Inputs sources.
# Make columm names as dict data-type. Set ONE of them to analyze.
Col_Dict = {'0':'Prefecture_ID', '1':'Name', '2':'Daily_Confirmed_Cases', '3':'Daily_Confirmed_Cases_Total',
'4':'Daily_Death', '5':'Death_Total', '6':'Death_Ratio_of_Population(100k)'}
# Prefecture_Dict
Prefecture_Dict = {'1': 'Hokkaido','2': 'Aomori','3': 'Iwate','4': 'Miyagi','5': 'Akita','6': 'Yamagata','7': 'Fukushima',
'8': 'Ibaraki','9': 'Tochigi','10': 'Gunma','11': 'Saitama','12': 'Chiba','13': 'Tokyo','14': 'Kanagawa',
'15': 'Niggata','16': 'Toyama','17': 'Ishikawa','18': 'Fukui','19': 'Yamanashi','20': 'Nagano',
'21': 'Gifu','22': 'Shizuoka','23': 'Aichi','24': 'Mie','25': 'Shiga','26': 'Kyoto','27': 'Osaka',
'28': 'Hyogo','29': 'Nara','30': 'Wakayama','31': 'Totori','32': 'Shimane','33': 'Okayama','34': 'Hiroshima',
'35': 'Yamaguchi','36':'Tokushima','37': 'Kagawa','38': 'Ehime','39': 'Kochi','40': 'Fukuoka',
'41': 'Saga','42': 'Nagasaki','43': 'Kumamoto','44': 'Oita','45': 'Miyazaki','46': 'Kagoshima','47': 'Okinawa'}
# Filter data to plot and save.
# Pandas 查询筛选数据: https://www.gairuo.com/p/pandas-selecting-data
# Pandas filter 筛选标签: https://www.gairuo.com/p/pandas-filter
# Ref: https://qiita.com/FukuharaYohei/items/5b739a5ceb43d25aa2cd
# ref: https://www.cxyzjd.com/article/yeziand01/94412056
df00 = df0.loc[df0['Prefecture_ID'].isin(Chosen_Prefecture_ID),['Date','Prefecture_ID',Col_Dict[Chosen_Col]]]
df01 = pd.DataFrame(index=[], columns=[])
df_tmp = | pd.DataFrame(index=[], columns=[]) | pandas.DataFrame |
import coloredlogs
import datetime
import errno
import ipaddress
import logging
import maxminddb
import os
from numpy import source
import pandas as pd
import getpass
import pyesedb as esedb
import sqlite3
import sys
import traceback
import uuid
import binascii
import struct
import time
from argparse import ArgumentParser
from configparser import ConfigParser
from datetime import datetime, timedelta
from binascii import unhexlify
from pandas.core.frame import DataFrame
from pandas.io.parsers import ParserError
from struct import unpack
__author__ = '<NAME>'
__version__ = '20211106'
__credit__ = 'Inspired by BriMor Labs/KStrike'
"""
BSD 3-Clause License
Copyright (c) 2021, <NAME>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Requirements:
* Python3
* Colorlogs (pip install colorlog)
* maxminddb (pip install maxminddb)
* Pandas (pip install pandas)
* libesedb (pyesedb) (compile from source: https://github.com/libyal/libesedb) (pip install libesedb-python failed for 3.8 and 3.9, YMMVH)
* GeoLite2-City.mmdb (https://www.maxmind.com)
Artifact References:
https://www.crowdstrike.com/blog/user-access-logging-ual-overview/
https://advisory.kpmg.us/blog/2021/digital-forensics-incident-response.html
https://en.wikipedia.org/wiki/Extensible_Storage_Engine
"""
class LogClass:
def __init__(self, logname, debug_level=20):
"""
Critical == 50
Error == 40
Warning == 30
Info == 20
Debug == 10
Notset = 0
"""
current_user = getpass.getuser()
# log_format = '%(asctime)s:%(levelname)s:%(message)s'
# date_fmt = '%m/%d/%Y %I:%M:%S'
# logging.basicConfig(filename=logname, format=log_format, level=debug_level, filemode='a', datefmt=date_fmt)
# console = logging.StreamHandler()
# console.setLevel(debug_level)
# formatter = logging.Formatter(log_format)
# console.setFormatter(formatter)
# logging.getLogger('').addHandler(console)
clr_log_format = '%(asctime)s:%(hostname)s:%(programname)s:%(username)s[%(process)d]:%(levelname)s:%(message)s'
coloredlogs.install(level=debug_level, fmt=clr_log_format)
@staticmethod
def log(level='info', message=''):
if level.lower() == 'debug':
logging.debug(message)
if level.lower() == 'info':
logging.info(message)
if level.lower() == 'warning':
logging.warning(message)
if level.lower() == 'error':
logging.error(message)
if level.lower() == 'critical':
logging.critical(message)
class PrepClass:
def __init__(self, raw_output_path):
self.raw_output_path = raw_output_path
self.log_file = os.path.join(self.raw_output_path, 'Script Processing.log')
self.sql_db = os.path.join(self.raw_output_path, 'UAL_DB.sqlite')
self.sql_file = ''
self.db_setup = ''
self.p_log = ''
self.config = ''
def setup_logging(self, debug_mode=False):
log_level = 'info'
if debug_mode:
log_level = 'debug'
numeric_level = getattr(logging, log_level.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % log_level)
self.p_log = LogClass(self.log_file, numeric_level)
return self.p_log
def setup_output_directory(self):
if not os.path.exists(self.raw_output_path):
try:
os.makedirs(self.raw_output_path)
except OSError as error:
if error.errno != errno.EEXIST:
raise
class UALClass:
def __init__(self, config_dict):
self.config_dict = config_dict
self.source_path = config_dict['raw_input_path']
self.ese_dbs = self.get_ese_files(self.source_path)
self.out_path = config_dict['raw_output_path']
self.maxmind_db = config_dict['maxminddb']
self.ftype = config_dict['ftype']
self.plog = config_dict['p_log']
self.sql_db = os.path.join(self.out_path, 'UAL.db')
self.GUID = list()
self.chained_databases = dict()
self.system_identity = list()
self.series_list = list()
self.chain_db_df = pd.DataFrame()
self.role_ids_df = | pd.DataFrame() | pandas.DataFrame |
import os
import json
import pandas as pd
import zipfile
from werkzeug.utils import secure_filename
import shutil
import time
from random import randint
from datetime import timedelta
import tempfile
import sys
from elasticsearch import Elasticsearch
##
##
# dataframes
from dataframes import dataframe
# functions
from grading_checks import naming, usage, documentation_logging, error_handling
from soft_checks import activity_stats, project_folder_structure, project_structure, template_check, selector_check
from flask import Flask, request, render_template, redirect, url_for, session, jsonify, Response, send_file, make_response
from flask_session import Session
from flask_socketio import SocketIO, emit
from flask_mysqldb import MySQL
import MySQLdb.cursors
from passlib.hash import sha256_crypt
import pdfkit
# es = Elasticsearch(['https://098b8510b627461cb0e77d37d10c4511.us-east-1.aws.found.io:9243'],
# http_auth=('elastic', '<PASSWORD>'))
application = app = Flask(__name__, static_folder='./static/dist', template_folder="./static")
# dont save cache in web browser (updating results image correctly)
app.config["CACHE_TYPE"] = "null"
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
app.config['UPLOAD_PATH'] = '/file/'
app.config['ALLOWED_EXTENSIONS'] = set(['zip'])
app.config['SESSION_TYPE'] = 'filesystem'
app.config['PERMANENT_SESSION_LIFETIME'] = timedelta(hours=1)
app.config['SECRET_KEY'] = randint(0,99999999999999999999)
app.config['MYSQL_HOST'] = 'us-sql01.mysql.database.azure.com'
app.config['MYSQL_PORT'] = 3306
app.config['MYSQL_USER'] = 'us-evalsql01@us-sql01'
app.config['MYSQL_PASSWORD'] = '<PASSWORD>'
app.config['MYSQL_DB'] = 'sleipnir'
app.config['APP_ADMIN_RIGHT'] = 'admin'
mysql = MySQL(app)
# Check Configuration section for more details
Session(app)
socketio = SocketIO(app, async_mode="eventlet")
thread = None
######
gexf = ''
df_annotation = []
main_location = ""
dict_score = {}
df_invokeWf = []
pepper = 'zxf98g7yq3whretgih'
######
@app.route("/")
def login():
with app.app_context():
folderPathList = [os.getcwd().replace("\\", "/") + app.config['UPLOAD_PATH'] + path for path in
os.listdir(os.getcwd() + app.config['UPLOAD_PATH'])]
filteredFolderPathList = [path for path in folderPathList if time.time() - os.path.getmtime(path) > 900]
for folder in filteredFolderPathList:
shutil.rmtree(folder, True)
sessionPathList = [os.getcwd().replace("\\", "/") + '/flask_session/' + path for path in
os.listdir(os.getcwd() + '/flask_session/')]
filteredSessionPathList = [path for path in sessionPathList if time.time() - os.path.getmtime(path) > 900]
for ses in filteredSessionPathList:
print(ses)
os.remove(ses)
if session.get("loggedin"):
return redirect(url_for('upload'))
else:
return render_template('login.html')
@app.route("/login", methods=['POST'])
def validate_user():
with app.app_context():
requestData = json.loads(str(request.data, encoding="utf-8"))
tenant = requestData['tenant']
username = requestData['username']
password = requestData['password']
cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)
cursor.execute("SELECT id FROM tenants WHERE tenant_name = %s", (tenant,))
tenant_record = cursor.fetchone()
if tenant_record:
tenant_id = tenant_record["id"]
cursor.execute('SELECT id, password FROM users WHERE username = %s AND tenant_id = %s', (username, tenant_id,))
user_record = cursor.fetchone()
if user_record:
user_id = user_record["id"]
storedHashedPassword = user_record["password"]
if sha256_crypt.verify(password+pepper, storedHashedPassword):
resp = jsonify({"result": render_template('fileUpload.html',
username=username,
user_id=user_id)})
session['loggedin'] = True
session['id'] = user_id
session['username'] = username
cursor.close()
return make_response(resp, 200)
else:
cursor.close()
resp = jsonify({"message": "Wrong password"})
return make_response(resp, 400)
else:
cursor.close()
resp = jsonify({"message": "User not exists"})
return make_response(resp, 400)
else:
cursor.close()
resp = jsonify({"message": "Tenant not exists"})
return make_response(resp, 400)
@app.route('/logout')
def logout():
with app.app_context():
session.pop('loggedin', None)
session.pop('id', None)
session.pop('username', None)
return redirect(url_for('login'))
@app.route("/admin", methods=['GET'])
def admin():
with app.app_context():
if session.get('loggedin'):
cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)
cursor.execute("SELECT * FROM users_rights WHERE user_id = %s "
"AND right_id IN (SELECT id FROM rights WHERE `right` = %s);",
(int(session.get('id')), app.config['APP_ADMIN_RIGHT'],))
if cursor.fetchone():
cursor.close()
return render_template('adminPanel.html',
username=session.get('username'),
user_id=session.get('id'))
else:
cursor.close()
return "Not Authorized"
else:
return "Please login first"
@app.route('/upload')
def upload():
with app.app_context():
folderPathList = [os.getcwd().replace("\\", "/") + app.config['UPLOAD_PATH'] + path for path in os.listdir(os.getcwd() + app.config['UPLOAD_PATH'])]
filteredFolderPathList = [path for path in folderPathList if time.time() - os.path.getmtime(path) > 900]
for folder in filteredFolderPathList:
shutil.rmtree(folder, True)
if session.get('loggedin'):
return render_template('fileUpload.html',
username=session.get('username'),
user_id=session.get('id'))
else:
return "Please login first"
def background_thread():
while True:
socketio.emit('message', {'alive': "Alive"})
socketio.sleep(60)
@socketio.on('connect')
def connect():
global thread
if thread is None:
thread = socketio.start_background_task(target=background_thread)
@app.route("/processing", methods=["POST"])
def processing():
requestData=json.loads(str(request.data, encoding="utf-8"))
# Get related info from Project.json (name and description)
folderPath = session.get('folderPath')
fileLocationStr = session.get('fileLocationStr')
files = session.get('files')
df_json = documentation_logging.grade_project_json_name_desc(folderPath)
df_json_exp = pd.DataFrame(df_json.subfiles.tolist(), index=df_json['index']).stack().reset_index()
df_json_exp.columns = ['projectId', 'fileIndex', 'filePath']
lst_name = []
df_json['projectName'] = df_json.apply(lambda x: x['projectDetail']['projectName'], axis=1)
for name in list(df_json['projectName']):
if name not in lst_name:
lst_name.append(name)
else:
count = 2
dup_name = name + '_' + str(count)
while dup_name in lst_name:
count += 1
dup_name = name + '_' + str(count)
lst_name.append(dup_name)
df_json['projectName'] = lst_name
df_json_exp = pd.merge(df_json_exp, df_json.loc[:, ["mainFolder", 'projectName']].reset_index(), how="left",
left_on="projectId", right_on="index")
df_json_exp.drop(columns=['fileIndex', "index"], inplace=True)
if requestData["setting"]["jsonLog"]:
project_detail = list(df_json.copy().reset_index().loc[:, ['index', 'projectDetail', 'projectName']]
.T.to_dict().values())
json_name_score = df_json.namingScore.sum() / len(df_json.namingScore)
json_description_score = df_json.descriptionScore.sum() / len(df_json.descriptionScore)
else:
json_name_score = "[Not evaluated]"
json_description_score = "[Not evaluated]"
project_detail = ['Not evaluated']
# scans all project files and populates dataframes with relevant info
socketio.emit('progress', {'data': 'Processing Files ...'})
socketio.sleep(0.1)
lst_sub_df = [dataframe.populate_dataframe(files[i], df_json) for i in range(len(files))]
df_variable = pd.merge(pd.concat([x[0] for x in lst_sub_df], ignore_index=True).drop_duplicates(inplace=False),
df_json_exp, how="left", on="filePath")
df_argument = pd.merge(pd.concat([x[1] for x in lst_sub_df], ignore_index=True).drop_duplicates(inplace=False),
df_json_exp, how="left", on="filePath")
df_catches = pd.merge(pd.concat([x[2] for x in lst_sub_df], ignore_index=True).drop_duplicates(inplace=False),
df_json_exp, how="left", on="filePath")
df_activity = pd.merge(pd.concat([x[3] for x in lst_sub_df], ignore_index=True).drop_duplicates(inplace=False),
df_json_exp, how="left", on="filePath")
df_annotation = pd.concat([x[4] for x in lst_sub_df], ignore_index=True).drop_duplicates(inplace=False)
df_selector = pd.merge(pd.concat([x[5] for x in lst_sub_df], ignore_index=True).drop_duplicates(inplace=False),
df_json_exp, how="left", on="filePath")
dict_score = {}
# level 1: grading checks
# level 2: name
# if session.get("naming"):
if True:
# level 3: variable naming
if requestData["setting"]['varName']:
[variableNamingScore, improperNamedVariable] = naming.grade_variable_name(df_variable=df_variable)
improperNamedVar = improperNamedVariable
else:
improperNamedVar = pd.DataFrame(columns=['name', 'file', 'type', 'error', 'project'])
variableNamingScore = "[Not evaluated]"
# level 3: argument naming
if requestData["setting"]['argName']:
[argumentNamingScore, improperNamedArguments] = naming.grade_argument_name(df_argument=df_argument)
improperNamedArg = improperNamedArguments
else:
improperNamedArg = pd.DataFrame(columns=['name', 'file', 'type', 'error', 'project'])
argumentNamingScore = "[Not evaluated]"
# level 3: activity naming
if requestData["setting"]['actName']:
[activityNamingScore, improperNamedActivities] = naming.grade_activity_name(df_activity=df_activity)
improperNamedAct = improperNamedActivities
else:
improperNamedAct = pd.DataFrame(columns=['name', 'file', 'type', 'error', 'project'])
activityNamingScore = "[Not evaluated]"
lt_namingScore = [variableNamingScore, argumentNamingScore, activityNamingScore]
namingScore = 0
count = 0
for i in lt_namingScore:
if i != "[Not evaluated]":
namingScore += i
count += 1
if count==0:
namingScore = "[Not evaluated]"
else:
namingScore = int(namingScore / count)
else:
improperNamedVar = | pd.DataFrame(columns=['name', 'file', 'type', 'error', 'project']) | pandas.DataFrame |
from __future__ import print_function
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas import (Series, Index, Int64Index, Timestamp, Period,
DatetimeIndex, PeriodIndex, TimedeltaIndex,
Timedelta, timedelta_range, date_range, Float64Index,
_np_version_under1p10)
import pandas.tslib as tslib
import pandas.tseries.period as period
import pandas.util.testing as tm
from pandas.tests.test_base import Ops
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setUp(self):
super(TestDatetimeIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['date', 'time', 'microsecond', 'nanosecond',
'is_month_start', 'is_month_end',
'is_quarter_start',
'is_quarter_end', 'is_year_start',
'is_year_end', 'weekday_name'],
lambda x: isinstance(x, DatetimeIndex))
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
self.assertRaises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
self.assertEqual(s.year, 2000)
self.assertEqual(s.month, 1)
self.assertEqual(s.day, 10)
self.assertRaises(AttributeError, lambda: s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timestamp('2011-01-01', tz=tz))
self.assertEqual(idx.max(), Timestamp('2011-01-03', tz=tz))
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(dr),
Timestamp('2016-01-15 00:00:00', freq='D'))
self.assertEqual(np.max(dr),
Timestamp('2016-01-20 00:00:00', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, dr, out=0)
self.assertEqual(np.argmin(dr), 0)
self.assertEqual(np.argmax(dr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, dr, out=0)
def test_round(self):
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assertRaisesRegexp(ValueError, msg):
rng.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, rng.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
for tz in self.tz:
index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'],
tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01',
'NaT', 'NaT', 'NaT',
'2003-01-01', '2003-01-01', '2003-01-01'],
tz=tz)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_repeat(self):
reps = 2
msg = "the 'axis' parameter is not supported"
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=2,
freq='30Min', tz=tz)
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
])
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
self.assertIsNone(res.freq)
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
tm.assertRaisesRegexp(ValueError, msg, np.repeat,
rng, reps, axis=1)
def test_representation(self):
idx = []
idx.append(DatetimeIndex([], freq='D'))
idx.append(DatetimeIndex(['2011-01-01'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], freq='H', tz='Asia/Tokyo'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC'))
exp = []
exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', "
"freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', "
"'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"
", dtype='datetime64[ns, Asia/Tokyo]', freq='H')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', "
"'2011-01-01 10:00:00-05:00', 'NaT'], "
"dtype='datetime64[ns, US/Eastern]', freq=None)")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', "
"'2011-01-01 10:00:00+00:00', 'NaT'], "
"dtype='datetime64[ns, UTC]', freq=None)""")
with pd.option_context('display.width', 300):
for indx, expected in zip(idx, exp):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(indx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
exp1 = """Series([], dtype: datetime64[ns])"""
exp2 = """0 2011-01-01
dtype: datetime64[ns]"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: datetime64[ns]"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: datetime64[ns]"""
exp5 = """0 2011-01-01 09:00:00+09:00
1 2011-01-01 10:00:00+09:00
2 2011-01-01 11:00:00+09:00
dtype: datetime64[ns, Asia/Tokyo]"""
exp6 = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 NaT
dtype: datetime64[ns, US/Eastern]"""
exp7 = """0 2011-01-01 09:00:00
1 2011-01-02 10:15:00
dtype: datetime64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4,
idx5, idx6, idx7],
[exp1, exp2, exp3, exp4,
exp5, exp6, exp7]):
result = repr(Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = """DatetimeIndex: 0 entries
Freq: D"""
exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 "
"to 2011-01-01 11:00:00+09:00\n"
"Freq: H")
exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
'S', 'L', 'U'],
['day', 'day', 'day', 'day', 'hour',
'minute', 'second', 'millisecond',
'microsecond']):
for tz in self.tz:
idx = pd.date_range(start='2013-04-01', periods=30, freq=freq,
tz=tz)
self.assertEqual(idx.resolution, expected)
def test_union(self):
for tz in self.tz:
# union
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng + 1
expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add a datelike to a DatetimeIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx + Timestamp('2011-01-01')
with tm.assertRaisesRegexp(TypeError, msg):
Timestamp('2011-01-01') + idx
def test_add_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now raises
# TypeError (GH14164)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
with tm.assertRaises(TypeError):
dti + dti
with tm.assertRaises(TypeError):
dti_tz + dti_tz
with tm.assertRaises(TypeError):
dti_tz + dti
with tm.assertRaises(TypeError):
dti + dti_tz
def test_difference(self):
for tz in self.tz:
# diff
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=3, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_diff = rng.difference(other)
tm.assert_index_equal(result_diff, expected)
def test_sub_isub(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng - 1
expected = pd.date_range('2000-01-01 08:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with tm.assertRaises(TypeError):
dti_tz - dti
with tm.assertRaises(TypeError):
dti - dti_tz
with tm.assertRaises(TypeError):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range('20130101', periods=3)
dti2 = date_range('20130101', periods=4)
with tm.assertRaises(ValueError):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03'])
dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan])
expected = TimedeltaIndex(['1 days', np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'D']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_comp_nat(self):
left = pd.DatetimeIndex([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')])
right = pd.DatetimeIndex([pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
for tz in self.tz:
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)),
tz=tz)
exp_idx = pd.date_range('2011-01-01 18:00', freq='-1H', periods=10,
tz=tz)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.date_range('2011-01-01 09:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(idx.unique(), expected)
idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 09:00', '2013-01-01 08:00',
'2013-01-01 08:00', pd.NaT], tz=tz)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00'],
tz=tz)
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00',
pd.NaT], tz=tz)
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False),
expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(DatetimeIndex,
([0, 1, 0], [0, 0, -1], [0, -1, -1],
['2015', '2015', '2016'], ['2015', '2015', '2014'])):
tm.assertIn(idx[0], idx)
def test_order(self):
# with freq
idx1 = DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D', name='idx')
idx2 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H',
tz='Asia/Tokyo', name='tzidx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
# without freq
for tz in self.tz:
idx1 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx1')
exp1 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx1')
idx2 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx2')
exp2 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx2')
idx3 = DatetimeIndex([pd.NaT, '2011-01-03', '2011-01-05',
'2011-01-02', pd.NaT], tz=tz, name='idx3')
exp3 = DatetimeIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03',
'2011-01-05'], tz=tz, name='idx3')
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx[0]
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx[0:5]
expected = pd.date_range('2011-01-01', '2011-01-05', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.date_range('2011-01-01', '2011-01-09', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.date_range('2011-01-12', '2011-01-24', freq='3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = DatetimeIndex(['2011-01-05', '2011-01-04', '2011-01-03',
'2011-01-02', '2011-01-01'],
freq='-1D', tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx.take([0])
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx.take([0, 1, 2])
expected = pd.date_range('2011-01-01', '2011-01-03', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.date_range('2011-01-01', '2011-01-05', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.date_range('2011-01-08', '2011-01-02', freq='-3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = DatetimeIndex(['2011-01-04', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = DatetimeIndex(['2011-01-29', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['A', '2A', '-2A', 'Q', '-1Q', 'M', '-1M', 'D', '3D',
'-3D', 'W', '-1W', 'H', '2H', '-2H', 'T', '2T', 'S',
'-3S']:
idx = pd.date_range('2011-01-01 09:00:00', freq=freq, periods=10)
result = pd.DatetimeIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.date_range('2011-01-01', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.DatetimeIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
for tz in self.tz:
idx = pd.DatetimeIndex([], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-01 11:00'
'2011-01-01 12:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.DatetimeIndex(['2011-01-01 13:00', '2011-01-01 14:00'
'2011-01-01 15:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.DatetimeIndex(['2011-01-01 07:00', '2011-01-01 08:00'
'2011-01-01 09:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
def test_nat(self):
self.assertIs(pd.DatetimeIndex._na_value, pd.NaT)
self.assertIs(pd.DatetimeIndex([])._na_value, pd.NaT)
for tz in [None, 'US/Eastern', 'UTC']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.DatetimeIndex(['2011-01-01', 'NaT'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
for tz in [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'],
tz='US/Pacific')
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
# same internal, different tz
idx3 = pd.DatetimeIndex._simple_new(idx.asi8, tz='US/Pacific')
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
self.assertFalse(idx.equals(idx3))
self.assertFalse(idx.equals(idx3.copy()))
self.assertFalse(idx.equals(idx3.asobject))
self.assertFalse(idx.asobject.equals(idx3))
self.assertFalse(idx.equals(list(idx3)))
self.assertFalse(idx.equals(pd.Series(idx3)))
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
td.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
idx = TimedeltaIndex(['1 day', '2 day'])
msg = "cannot subtract a datelike from a TimedeltaIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx - Timestamp('2011-01-01')
result = Timestamp('2011-01-01') + idx
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
tm.assert_index_equal(result, expected)
def test_ops_compat(self):
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
rng = timedelta_range('1 days', '10 days', name='foo')
# multiply
for offset in offsets:
self.assertRaises(TypeError, lambda: rng * offset)
# divide
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected, exact=False)
# divide with nats
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected)
# don't allow division by NaT (make could in the future)
self.assertRaises(TypeError, lambda: rng / pd.NaT)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
self.assertRaises(TypeError, lambda: tdi - dt)
self.assertRaises(TypeError, lambda: tdi - dti)
self.assertRaises(TypeError, lambda: td - dt)
self.assertRaises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
self.assertEqual(result, expected)
self.assertIsInstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
self.assertRaises(TypeError, lambda: dt_tz - ts)
self.assertRaises(TypeError, lambda: dt_tz - dt)
self.assertRaises(TypeError, lambda: dt_tz - ts_tz2)
self.assertRaises(TypeError, lambda: dt - dt_tz)
self.assertRaises(TypeError, lambda: ts - dt_tz)
self.assertRaises(TypeError, lambda: ts_tz2 - ts)
self.assertRaises(TypeError, lambda: ts_tz2 - dt)
self.assertRaises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
self.assertRaises(TypeError, lambda: dti - ts_tz)
self.assertRaises(TypeError, lambda: dti_tz - ts)
self.assertRaises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'H']:
idx = pd.TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
self.assertRaises(ValueError, lambda: tdi + dti[0:1])
self.assertRaises(ValueError, lambda: tdi[0:1] + dti)
# random indexes
self.assertRaises(TypeError, lambda: tdi + Int64Index([1, 2, 3]))
# this is a union!
# self.assertRaises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
self.assertEqual(result, expected)
result = td + dt
expected = Timestamp('20130102')
self.assertEqual(result, expected)
def test_comp_nat(self):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
idx = timedelta_range('1 days 09:00:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = TimedeltaIndex(np.repeat(idx.values, range(1, len(idx) + 1)))
exp_idx = timedelta_range('1 days 18:00:00', freq='-1H', periods=10)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = timedelta_range('1 days 09:00:00', freq='H', periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = TimedeltaIndex(['1 days 09:00:00', '1 days 09:00:00',
'1 days 09:00:00', '1 days 08:00:00',
'1 days 08:00:00', pd.NaT])
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00'])
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00',
pd.NaT])
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(TimedeltaIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1],
['00:01:00', '00:01:00', '00:02:00'],
['00:01:00', '00:01:00', '00:00:01'])):
tm.assertIn(idx[0], idx)
def test_unknown_attribute(self):
# GH 9680
tdi = pd.timedelta_range(start=0, periods=10, freq='1s')
ts = pd.Series(np.random.normal(size=10), index=tdi)
self.assertNotIn('foo', ts.__dict__.keys())
self.assertRaises(AttributeError, lambda: ts.foo)
def test_order(self):
# GH 10295
idx1 = TimedeltaIndex(['1 day', '2 day', '3 day'], freq='D',
name='idx')
idx2 = TimedeltaIndex(
['1 hour', '2 hour', '3 hour'], freq='H', name='idx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, idx[::-1])
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
idx1 = TimedeltaIndex(['1 hour', '3 hour', '5 hour',
'2 hour ', '1 hour'], name='idx1')
exp1 = TimedeltaIndex(['1 hour', '1 hour', '2 hour',
'3 hour', '5 hour'], name='idx1')
idx2 = TimedeltaIndex(['1 day', '3 day', '5 day',
'2 day', '1 day'], name='idx2')
# TODO(wesm): unused?
# exp2 = TimedeltaIndex(['1 day', '1 day', '2 day',
# '3 day', '5 day'], name='idx2')
# idx3 = TimedeltaIndex([pd.NaT, '3 minute', '5 minute',
# '2 minute', pd.NaT], name='idx3')
# exp3 = TimedeltaIndex([pd.NaT, pd.NaT, '2 minute', '3 minute',
# '5 minute'], name='idx3')
for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx[0]
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx[0:5]
expected = pd.timedelta_range('1 day', '5 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.timedelta_range('1 day', '9 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.timedelta_range('12 day', '24 day', freq='3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = TimedeltaIndex(['5 day', '4 day', '3 day',
'2 day', '1 day'],
freq='-1D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx.take([0])
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx.take([-1])
self.assertEqual(result, pd.Timedelta('31 day'))
result = idx.take([0, 1, 2])
expected = pd.timedelta_range('1 day', '3 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.timedelta_range('1 day', '5 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.timedelta_range('8 day', '2 day', freq='-3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = TimedeltaIndex(['4 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = TimedeltaIndex(['29 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['D', '3D', '-3D', 'H', '2H', '-2H', 'T', '2T', 'S', '-3S'
]:
idx = pd.timedelta_range('1', freq=freq, periods=10)
result = pd.TimedeltaIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.timedelta_range('1', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.TimedeltaIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
idx = pd.TimedeltaIndex([], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.TimedeltaIndex(['8 hours', '9 hours', '12 hours'], name='xxx')
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.TimedeltaIndex(['2 hours', '3 hours', '6 hours'], name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
tm.assert_index_equal(idx.shift(0, freq='T'), idx)
exp = pd.TimedeltaIndex(['05:03:00', '06:03:00', '9:03:00'],
name='xxx')
tm.assert_index_equal(idx.shift(3, freq='T'), exp)
exp = pd.TimedeltaIndex(['04:57:00', '05:57:00', '8:57:00'],
name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='T'), exp)
def test_repeat(self):
index = pd.timedelta_range('1 days', periods=2, freq='D')
exp = pd.TimedeltaIndex(['1 days', '1 days', '2 days', '2 days'])
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = TimedeltaIndex(['1 days', 'NaT', '3 days'])
exp = TimedeltaIndex(['1 days', '1 days', '1 days',
'NaT', 'NaT', 'NaT',
'3 days', '3 days', '3 days'])
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_nat(self):
self.assertIs(pd.TimedeltaIndex._na_value, pd.NaT)
self.assertIs(pd.TimedeltaIndex([])._na_value, pd.NaT)
idx = pd.TimedeltaIndex(['1 days', '2 days'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.TimedeltaIndex(['1 days', 'NaT'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
idx = pd.TimedeltaIndex(['1 days', '2 days', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.TimedeltaIndex(['2 days', '1 days', 'NaT'])
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.asobject.equals(idx2.asobject))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
class TestPeriodIndexOps(Ops):
def setUp(self):
super(TestPeriodIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['qyear'],
lambda x: isinstance(x, PeriodIndex))
def test_asobject_tolist(self):
idx = pd.period_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [pd.Period('2013-01-31', freq='M'),
pd.Period('2013-02-28', freq='M'),
pd.Period('2013-03-31', freq='M'),
pd.Period('2013-04-30', freq='M')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = PeriodIndex(['2013-01-01', '2013-01-02', 'NaT',
'2013-01-04'], freq='D', name='idx')
expected_list = [pd.Period('2013-01-01', freq='D'),
pd.Period('2013-01-02', freq='D'),
pd.Period('NaT', freq='D'),
pd.Period('2013-01-04', freq='D')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
tm.assert_index_equal(result, expected)
for i in [0, 1, 3]:
self.assertEqual(result[i], expected[i])
self.assertIs(result[2], pd.NaT)
self.assertEqual(result.name, expected.name)
result_list = idx.tolist()
for i in [0, 1, 3]:
self.assertEqual(result_list[i], expected_list[i])
self.assertIs(result_list[2], pd.NaT)
def test_minmax(self):
# monotonic
idx1 = pd.PeriodIndex([pd.NaT, '2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.PeriodIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], freq='D')
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), pd.Period('2011-01-01', freq='D'))
self.assertEqual(idx.max(), pd.Period('2011-01-03', freq='D'))
self.assertEqual(idx1.argmin(), 1)
self.assertEqual(idx2.argmin(), 0)
self.assertEqual(idx1.argmax(), 3)
self.assertEqual(idx2.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = PeriodIndex([], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
obj = PeriodIndex([pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
obj = PeriodIndex([pd.NaT, pd.NaT, pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
def test_numpy_minmax(self):
pr = pd.period_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(pr), Period('2016-01-15', freq='D'))
self.assertEqual(np.max(pr), Period('2016-01-20', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, pr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, pr, out=0)
self.assertEqual(np.argmin(pr), 0)
self.assertEqual(np.argmax(pr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, pr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, pr, out=0)
def test_representation(self):
# GH 7601
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'],
freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00',
'NaT'], freq='H')
idx7 = | pd.period_range('2013Q1', periods=1, freq="Q") | pandas.period_range |
import datetime
import os
import time
import pandas as pd
import requests
HOST = 'https://wsn.latice.eu'
#HOST = 'http://localhost:8000' # For the developer
# Prepare the session
TOKEN = os.getenv('WSN_TOKEN') # export WSN_TOKEN=xxx
session = requests.Session()
session.headers.update({'Authorization': f'Token {TOKEN}'})
def query(
db, # postgresql or clickhouse
table=None, # clickhouse table name
fields=None, # Fields to return: all by default
tags=None, # postgresql metadata fields (none by default)
time__gte=None, time__lte=None, # Time range (sampled)
received__gte=None, received__lte=None, # Time range (received)
limit=100, # Limit
interval=None, interval_agg=None, # Aggregates
format='pandas', # pandas or json
time_index=True, # Return pandas dataframe with time as index
# (only valid if format 'pandas' is selected)
debug=False,
**kw # postgresql filters (name, serial, ...)
):
"""
query('clickhouse', table='', ...) -> dataframe or dict
query('postgresql', ...) -> dataframe or dict
Retrieves data from the UiO Django system.
Selecting data (rows)
=====================
First choose the database to pull data from, choices are
clickhouse (for raw data from finse/mobile flux), and postgresql (for
everything else). How data is selected depends on the database used.
ClickHouse:
query('clickhouse', table='finseflux_Biomet', ...)
Choices for table are: finseflux_Biomet, finseflux_StationStatus,
mobileflux_Biomet and mobileflux_StationStatus.
PostgreSQL:
query('postgresql', name='eddypro_Finseflux', ...)
query('postgresql', serial=0x1F566F057C105487, ...)
query('postgresql', source_addr_long=0x0013A2004105D4B6, ...)
Data from PostgreSQL can be queried by any metadata information, most often
the name is all you need.
Selecting fields (columns)
==========================
If the fields parameter is not given, all the fields will be returned. This
is only recommended to explore the available columns, because it may be too
slow and add a lot of work on the servers. So it is recommended to ask only
for the fields you need, it will be much faster.
Examples:
query('clickhouse', table='finseflux_Biomet', fields=['LWIN_6_14_1_1_1', 'LWOUT_6_15_1_1_1'], ...)
query('postgresql', name='eddypro_Finseflux', fields=['co2_flux'], ...)
The field 'time' is always included, do not specify it. It's a Unix
timestamp (seconds since the Unix epoch). The rows returned are ordered by
this field.
Selecting a time range
==========================
Use the parameters time__gte and/or time__lte to define the time range of
interest. The smaller the faster the query will run.
These parameters expect a datetime object. If the timezone is not specified
it will be interpreted as local time, but it's probably better to
explicitely use UTC.
Example:
query(
'clickhouse', table='finseflux_Biomet',
fields=['LWIN_6_14_1_1_1', 'LWOUT_6_15_1_1_1'],
time__gte=datetime.datetime(2018, 3, 1, tzinfo=datetime.timezone.utc),
time__lte=datetime.datetime(2018, 4, 1, tzinfo=datetime.timezone.utc),
...
)
NOTE: 'time' is the sampled time, when the value was recorded by the data
logger. It's also possible to filter by the received time, when the gateway
received the data frame. For that purpose use `received__gte` and
`received__lte`. Note that not all frames have this information, for
example those that have been uploaded to the server from the SD card.
Limiting the number of rows
===========================
The limit parameter defines the maximum number of rows to return. If not
given all selected rows will be returned.
Example:
query(
'clickhouse', table='finseflux_Biomet',
fields=['LWIN_6_14_1_1_1', 'LWOUT_6_15_1_1_1'],
time__gte=datetime.datetime(2018, 3, 1, tzinfo=datetime.timezone.utc),
time__lte=datetime.datetime(2018, 4, 1, tzinfo=datetime.timezone.utc),
limit=1000000,
)
Intervals and aggregates
===========================
Instead of returning every data point, it's possible split the time range
in intervals and return only one row per interval. This can greatly reduce
the amount of data returned, speeding up the query.
For this purpose pass the interval parameter, which defines the interval
size in seconds. The interval is left-closed and right-open.
By default the first row found within the interval will be returned. The
time column will be that of the first row. For example, if the interval
is 1 hour and the first row is at :05 then the time column will be :05
If the interval_agg parameter is passed, then an aggregate for every
column within the interval will be returned. The time column will be
the beginning of the interval. For example if the interval is 1h, the
time column will be :00
Example (interval size is 5 minutes):
query(
'clickhouse', table='finseflux_Biomet',
fields=['LWIN_6_14_1_1_1', 'LWOUT_6_15_1_1_1'],
time__gte=datetime.datetime(2018, 3, 1, tzinfo=datetime.timezone.utc),
time__lte=datetime.datetime(2018, 4, 1, tzinfo=datetime.timezone.utc),
limit=1000000,
interval=60*5,
interval_agg='avg',
)
If using postgresql the available functions are: avg, count, max, min,
stddev, sum and variance.
If using clickhouse any aggregate function supported by ClickHouse can be
used, see https://clickhouse-docs.readthedocs.io/en/latest/agg_functions/
Tags (PostgreSQL only)
===========================
With PostgreSQL only, you can pass the tags parameter to add metadata
information to every row.
Example:
query(
'postgresql', name='fw-001',
fields=['latitude', 'longitude'],
tags=['serial'],
)
In this example, the data from fw-001 may actually come from different
devices, maybe the device was replaced at some point. Using the tags
parameter we can add a column with the serial number of the devices.
Tags don't work with aggregated values.
Returns
===========================
This function returns by default a Pandas dataframe. Use format='json' to
return instead a Python dictionary, with the data as was sent by the server.
Debugging
===========================
With debug=True this function will print some information, useful for
testing. Default is False.
"""
t0 = time.perf_counter()
url = HOST + f'/api/query/{db}/'
# Parameters
to_timestamp = lambda x: int(x.timestamp()) if x else x
time__gte = to_timestamp(time__gte)
time__lte = to_timestamp(time__lte)
received__gte = to_timestamp(received__gte)
received__lte = to_timestamp(received__lte)
if interval_agg == 'mean':
interval_agg = 'avg'
params = {
'table': table,
'fields': fields,
'tags': tags,
'time__gte': time__gte, 'time__lte': time__lte,
'received__gte': received__gte, 'received__lte': received__lte,
'limit': limit,
'interval': interval, 'interval_agg': interval_agg,
}
# Filter inside json
for key, value in kw.items():
if value is None:
params[key] = None
continue
if type(value) is datetime.datetime:
value = int(value.timestamp())
if isinstance(value, int):
key += ':int'
params[key] = value
# Query
response = session.get(url, params=params)
response.raise_for_status()
json = response.json()
data = json
if format == 'pandas':
if data['format'] == 'sparse':
data = pd.json_normalize(data['rows'])
else:
data = | pd.DataFrame(data['rows'], columns=data['columns']) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: andreypoletaev
"""
# =============================================================================
# %% Block 1: initial imports
# =============================================================================
import os, sys, re, glob
if os.path.join(os.path.abspath(os.getcwd()), "utils") not in sys.path :
sys.path.append(os.path.join(os.path.abspath(os.getcwd()), "utils"))
import numpy as np
import pandas as pd
import hop_utils as hu
from crystal_utils import read_lmp
from scipy.optimize import curve_fit as cf
from scipy.interpolate import interp1d
from datetime import datetime as dt
from matplotlib import pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.patches import Rectangle
from batlow import cm_data
batlow_cm = LinearSegmentedColormap.from_list('batlow', cm_data)
batlow_even = LinearSegmentedColormap.from_list('batlow_even', hu.batlow_lightness_scaled(0.4,0.6))
from cycler import cycler
linecycler = cycler(linestyle=['-', '--', ':', '-.'])
markcycler = cycler(marker=['o', 's', 'v', 'd', '^'])
from itertools import cycle
markers = cycle(['o', 's', 'v', 'd', '^','D','<','>'])
lines = cycle(['-', '--', '-.', ':'])
## linear fitting
linfit = lambda x, *p : p[0] * x + p[1]
## cosmetic defaults for matplotlib plotting
plt.rc('legend', fontsize=10)
plt.rc('axes', labelsize=14)
plt.rc('axes', titlesize=14)
plt.rc('xtick', labelsize=12)
plt.rc('ytick', labelsize=12)
plt.rc('errorbar', capsize=3)
plt.rc('markers', fillstyle='none')
plt.rc("savefig", format='pdf')
## variables by which it is possible to plot
relevant_vars = ['metal','phase','T1','config','stoich','exclude','z']
## which atoms to query for species
## conductivity from bulk diffusion coefficient. Takes D_bulk [cm^2/sec], cell [AA]
## output is [Kelvin/ohm/cm] i.e. [Kelvin * siemens / cm]
## note that there is only one q in the formula because hu.kb is [eV/K]
q = 1.602e-19 ## [Coulomb] elementary charge
AA = 1e-8 ## [cm] 1 angstrom in cm
sigma_T = lambda N, cell, d_com : q * N / np.prod(np.diag(cell*AA))*d_com / hu.kb
unit_conv = 1e-4 ## [cm^2/sec] 1 AA^2/psec = 0.0001 cm^2/sec. No need to change this.
eps_0 = 8.854187e-12 ## [A^2 m^-3 kg^-1 sec^4]
T1 = 300
## dictionary of units
units = {'T1':'K', 'metal':'', 'stoich':'', 'exclude':'', 'config':'', 'z':'',
'phase':f' {T1}K'}
## shorthands for labels
bdp = r'$\beta^{\prime\prime}$'
beta = r'$\beta$'
phases = {'beta':beta, 'bdp':bdp}
# =============================================================================
# %% Block 2 : load files based on the index of conduction planes created in
# ## analysis_steadystate.py
# ## The a2_...fix files are assumed to be located in the same folders as their
# ## corresponding lammps structure files.
# =============================================================================
## database of all the hops: only combined planes matter for macro analyses.
all_planes = pd.read_csv('./sample_data/all_hop_planes.csv').query('z == "z_all"')
## flag for loading atoms
frac = False
## flag for loading CoM immediately
load_com = False
## flag for loading the output of the LAMMPS msd fix
load_r2 = False
## ===== BETA single-metal =====
## ===== BETA Ag =====
planes_to_load = all_planes.query('metal == "Ag" & config == "120_4" & T1 in [300,600,1000]')
## ===== BETA Na =====
# planes_to_load = all_planes.query('metal == "Na" & phase == "beta" & stoich == "120" ')
# planes_to_load = all_planes.query('metal == "Na" & phase == "beta" & stoich == "120" & T1 == 300')
# planes_to_load = all_planes.query('metal == "Na" & phase == "beta" & stoich == "120" & T1 == 600')
# planes_to_load = all_planes.query('metal == "Na" & phase == "beta" & config == "120_4" & T1 in [300,600,1000]')
# planes_to_load = all_planes.query('metal == "Na" & phase == "beta" & config == "120_4" & T1 in [300,600]')
# planes_to_load = all_planes.query('metal == "Na" & phase == "beta" & config == "120_4" & T1 == 300')
# planes_to_load = all_planes.query('metal == "Na" & phase == "beta" & config == "120_1"')
## ===== BETA K =====
# planes_to_load = all_planes.query('metal == "K" & stoich == "120" & 300 < T1 < 900')
# planes_to_load = all_planes.query('metal == "K" & stoich == "120" & T1 in [300, 600]')
# planes_to_load = all_planes.query('metal == "K" & config == "120_4"')
# planes_to_load = all_planes.query('metal == "K" & config == "120_4" & T1 in [300,600,1000]')
## ===== BETA all metals together =====
# planes_to_load = all_planes.query('phase == "beta" & config == "120_4" & T1 == 1000')
# planes_to_load = all_planes.query('phase == "beta" & config == "120_4" & T1 == 600')
# planes_to_load = all_planes.query('phase == "beta" & config == "120_4" & T1 in [300,600,1000] ')
## ===== BDP =====
## ===== BDP Na =====
planes_to_load = all_planes.query('phase != "beta" & metal == "Na" & config == "unsym_0" & T1 in [230,300,473,600]')
# planes_to_load = all_planes.query('phase != "beta" & metal == "Na" & config == "unsym_0" & T1 in [230,300]')
# planes_to_load = all_planes.query('phase != "beta" & metal == "Na" & config == "unsym_0" & T1 in [300,473,600]')
# planes_to_load = all_planes.query('phase != "beta" & metal == "Na" & T1 == 600')
# planes_to_load = all_planes.query('phase != "beta" & metal == "Na" & stoich in ["unsym", "unsymLi"] & T1 in [230,300,473]')
# planes_to_load.sort_values('config', ascending=False, inplace=True)
## ===== BDP K =====
# planes_to_load = all_planes.query('phase != "beta" & metal == "K"')
# planes_to_load = all_planes.query('phase != "beta" & metal == "K" & config == "symm_1"')
# planes_to_load = all_planes.query('phase != "beta" & metal == "K" & config == "unsym_0"')
# planes_to_load = all_planes.query('phase != "beta" & metal == "K" & config == "unsym_0" & T1 in [300,600]')
# planes_to_load = all_planes.query('phase != "beta" & metal == "K" & config == "unsym_0" & T1 == 300')
# planes_to_load = all_planes.query('phase != "beta" & metal == "K" & config == "unsym_0" & T1 == 600')
# planes_to_load.sort_values('config', ascending=False, inplace=True)
## ===== BDP Ag =====
# planes_to_load = all_planes.query('phase != "beta" & metal == "Ag"')
# planes_to_load = all_planes.query('phase != "beta" & metal == "Ag" & config == "symm_1"')
# planes_to_load = all_planes.query('phase != "beta" & metal == "Ag" & config == "unsym_0"')
# planes_to_load.sort_values('config', ascending=False, inplace=True)
## ===== BDP all metals together =====
# planes_to_load = all_planes.query('phase != "beta" & num_planes > 2 & config == "symm_1" & T1 == 600')
# planes_to_load = all_planes.query('phase != "beta" & num_planes > 2 & config == "symm_1" & T1 in [230,300,473,600]')
# planes_to_load = all_planes.query('phase != "beta" & num_planes > 2 & config == "unsym_0" & T1 == 300')
# planes_to_load = all_planes.query('phase != "beta" & num_planes > 2 & config == "unsym_0" & T1 == 600')
# planes_to_load = all_planes.query('phase != "beta" & num_planes > 2 & config == "unsym_0" & T1 in [300,600]')
# planes_to_load = all_planes.query('phase != "beta" & num_planes > 2 & config == "unsym_0" & T1 in [230,300,473,600]')
# planes_to_load = all_planes.query('phase != "beta" & metal in ["Na", "K"] & T1 in [230,300,473,600]')
# planes_to_load = all_planes.query('phase != "beta" & T1 in [230,300,473,600]')
# planes_to_load = all_planes.query('phase != "beta" & config == "unsym_0" & metal in ["Ag", "K"] & T1 in [230,300,473,600]')
# planes_to_load.sort_values('config', ascending=False, inplace=True)
## ===== both beta and doubleprime =====
# planes_to_load = all_planes.query('metal == "Na" & T1 == 300 & config in ["120_4", "unsym_0", "symm_1", "102_1"]')
# ========== automatic things below this line ==========
## make a structure for loading data
planes_dicts = []
## load macro-analysis files from the lammps non-Gaussian compute
for plane in planes_to_load.itertuples(index=False):
mm = plane.metal
T1 = plane.T1
hp = plane.hop_path
ph = plane.phase
st = plane.stoich
ex = plane.exclude
tt = plane.total_time
cn = plane.config
## load lammps structure
_, _, cell, atoms = read_lmp(plane.lammps_path, fractional=False)
a2_folder = '/'.join(plane.lammps_path.split('/')[:-1])
## load lammps r2 file for the diffusion coefficient
if load_r2 :
r2_fname = glob.glob(a2_folder+f'/a2_*{T1}K-{mm}.fix')
## load the r2 file if exactly one exists, else complain
if isinstance(r2_fname, list) and len(r2_fname) == 1:
## read the r2 file - options for fix file
this_r2 = pd.read_csv(r2_fname[0], names=['time','r2','r4','a2'],
skiprows=2, sep=' ')
this_r2.time /= 1000
this_r2.set_index('time', inplace=True)
## Look for a literature folder
lit_folder = '/'.join(a2_folder.split('/')[:-1])
print(f'\nLoaded r2 for plane {hp}')
else:
print(f'\nsomething off with plane {hp}.')
print(f'here are possible r2 outputs: {r2_fname}')
this_r2 = None
else : this_r2 = None
## the a2 fix file is LAMMPS output, csv is calculated with multiple starts
## this takes the longest-duration a2 file
a2_fnames = glob.glob(a2_folder+f'/{mm}*a2-*{T1}K*ps.csv')
## load the a2 file if exactly one exists, else complain
if a2_fnames :
if len(a2_fnames) > 1 : a2_fnames = sorted(a2_fnames, reverse=True,
key = lambda x : eval(re.split('-|_| ',x)[-1][:-6]))
# ## read the a2 file - options for fix file
# this_a2 = pd.read_csv(a2_fname[0], names=['time','r2','r4','a2'],
# skiprows=2, sep=' ')
# this_a2.time /= 1000
## read the a2 file - options for csv file
this_a2 = pd.read_csv(a2_fnames[0], sep=',').set_index('time')
## Look for a literature folder
lit_folder = '/'.join(a2_folder.split('/')[:-1])
print(f'Loaded a2: {a2_fnames[0]}')
else:
print(f'something off with plane {hp}.')
print(f'here are possible a2 outputs: {a2_fnames}')
this_a2 = None
## load the CoM trajectory if it exists
com_fname = glob.glob(a2_folder + f'/cm*{T1}K*{mm}.fix')
if isinstance(com_fname, list) and len(com_fname) == 1 and load_com:
this_com = pd.read_csv(com_fname[0],sep=' ', names=['time', 'x', 'y', 'z', 'vx', 'vy', 'vz'], skiprows=2).drop(columns=['vx','vy','vz'])
this_com.time /= 1000. ## hard-coded conversion from steps to picoseconds
this_com.set_index('time', inplace=True)
print('Loaded CoM trajectory.')
elif not load_com :
this_com = True
print('Skipping CoM trajectory.')
else :
print(f'Could not load CoM trajectory, found: {com_fname}')
this_com = None
## wrap the a2, CoM, and metadata into a dict
if (this_r2 is not None or not load_r2) and (this_a2 is not None) :
# if (this_a2 is not None) and (this_r2 is not None) and (this_com is not None) :
planes_dicts.append(dict(phase=ph, metal=mm, T1=T1, config=cn, stoich=st, exclude=ex,
a2=this_a2, lit_folder=lit_folder, com = this_com,
cell=cell, atoms=atoms, folder=a2_folder, r2=this_r2))
## make the holding structure into a dataframe
macro_planes_data = pd.DataFrame(planes_dicts)
# =============================================================================
# %% Figure 2 (and Extended Data 1-6) : (a) r2, (b) exponent of r2 vs distance,
# ## (c) dx raw, (d) dx rescaled,(e) Gs 2D color plot, (f) Gs fitting.
# ## Version "04", March 29 2021, this is in manuscript versions 07-09
# =============================================================================
## parameters:
## THIS RELIES ON LOADING PLANES ABOVE
dim = 2 ## dimension for r2, typically just 2, but 3 is possible.
guides = True ## plot guidelines in (a)
hop_length = 2.8 ## [AA] for binning dx
dx_times = [25, 25e1, 25e2, 25e3] ## time points for spectra of dx
gs_times = 2.5*np.logspace(-1,4,6) ## [ps] times for plotting spectra
rs_list = [[0.01,1.7], [0.01, 4.6]] ## use 1.7/4.6 for bdp, 1.6/4.3 for beta?
T1_dx = 300 ## for which temperature to plot dx
T1s_gs = [300,600] ## for which temperatures to plot Gs fits
cd_exclude = []
na_bdp_unsym = True ## trigger for making a broken axis for C_D(t) (Figure 2)
# ========== automatic things below this line ==========
## parameters to transform the C_D and create the broken axes
cd_break = 4.25
cd_break_top = 4.6
cd_scale = 2. # linear scaling factor; if > 1 makes transform look compressed
cd_true = 9 # value that will show up as cd_display
cd_display = 6.8 # value at which in
# linear function that calculates an underlying transformed coordinate
# given y points from real data.
# maps cd_true to cd_display; true y-value at cd_true shows up as at cd_display
cd_transform = lambda y : cd_display + (y - cd_true) / cd_scale
## new figure & counter for Gs fitting colors
fig, axes = plt.subplots(3,2, figsize=(10,12))
## make a color map for all temperature values
T1_colors = cycle([batlow_even(i) for i in np.linspace(0, 1, len(macro_planes_data.T1.unique()))])
dx_colors = cycle([batlow_even(i) for i in np.linspace(0, 1, len(dx_times))])
## one figure total
for i, plane in macro_planes_data.iterrows():
mm = plane.metal; st = plane.stoich; cn = plane.config; ph = plane.phase
ex = plane.exclude; T1 = plane.T1; folder = plane.folder
cc = next(T1_colors)
## set a legend title, and plotting labels for each curve
label = str(T1) + 'K'
leg_title = f'{mm} {phases[ph]}'
## load the 2D a2 file - leaving out the "split" files
if dim == 2 :
a2_xys = glob.glob(folder+f'/{mm}*{ex}-a2xy*{T1}K*ps.csv')
## load the a2 file if exactly one exists, else complain
if a2_xys :
if len(a2_xys) > 1 : a2_xys = sorted(a2_xys, reverse=True,
key = lambda x : eval(re.split('-|_| ',x)[-1][:-6]))
## read the a2 file - options for csv file
a2 = pd.read_csv(a2_xys[0], sep=',').set_index('time')
else : print(f'could not load a 2D a2 file for plane {mm} {cn} {T1}K')
else : a2 = plane.a2
## recalculate a2 for the right number of dimensions
a2.a2 = dim * a2.r4 / a2.r2 ** 2 / (dim+2) - 1
## load a short-time a2 file if using one
try :
a2s = pd.read_csv(folder + f'/{mm}-{st}-{ex}-a2{"xy" if dim == 2 else ""}-{T1}K-10ps.csv').set_index('time')
a2s.a2 = dim * a2s.r4 / a2s.r2 ** 2 / (dim+2) - 1
except : a2s = None
# ===== (a) r2 =====
axes[0,0].plot(a2.r2.iloc[1:], label=label, c=cc)
axes[0,0].legend(title=leg_title)
if guides and T1 == T1_dx :
if mm == 'Na' and 'unsym' in cn :
axes[0,0].plot([0.03,0.03*5], [0.07,0.07*25], c='k', lw=0.4)
axes[0,0].plot([15e2, 15e3], [2e3, 2e4], c='k', lw=0.4)
axes[0,0].plot([4e3, 4e4], [4, 4*10**0.75], c='k', lw=0.4)
elif mm == 'Na' and 'symm' in cn :
axes[0,0].plot([15e2, 15e3], [2e3, 2e4], c='k', lw=0.4)
axes[0,0].plot([25e2, 25e3], [2e1, 2e2], c='k', lw=0.4)
elif mm == 'Na' and '120_4' in cn :
axes[0,0].plot([0.03,0.03*5], [0.12,0.12*25], c='k', lw=0.4)
axes[0,0].plot([15e2, 15e3], [1.5e3, 1.5e4], c='k', lw=0.4)
axes[0,0].plot([2.5e3, 2.5e4], [5, 5*10**0.8], c='k', lw=0.4)
elif mm == 'K' and 'unsym' in cn :
axes[0,0].plot([0.03,0.03*5], [0.04,0.04*25], c='k', lw=0.4)
axes[0,0].plot([2e3, 2e4], [1.5e3, 1.5e4], c='k', lw=0.4)
axes[0,0].plot([2.5e3, 2.5e4], [7.5, 7.5*10**0.9], c='k', lw=0.4)
elif mm == 'Ag' and '120' in cn :
axes[0,0].plot([0.03,0.03*5], [0.03,0.03*25], c='k', lw=0.4)
axes[0,0].plot([3e3, 3e4], [2e3, 2e4], c='k', lw=0.4)
axes[0,0].plot([3.5e3, 3.5e4], [6, 6*10**0.75], c='k', lw=0.4)
elif mm == 'Ag' and 'unsym' in cn :
axes[0,0].plot([0.03,0.03*5], [0.02,0.02*25], c='k', lw=0.4)
axes[0,0].plot([2.5e3, 2.5e4], [1.5e3, 1.5e4], c='k', lw=0.4)
# axes[0,0].plot([2.5e3, 2.5e4], [5, 5*10**0.9], c='k', lw=0.4)
axes[0,0].plot([4e3, 4e4], [1.5, 1.5*10**0.75], c='k', lw=0.4)
elif mm == 'K' and '120' in cn :
axes[0,0].plot([0.03,0.03*5], [0.06,0.06*25], c='k', lw=0.4)
axes[0,0].plot([2e3, 4e4], [300, 300*20**0.9], c='k', lw=0.4)
axes[0,0].plot([0.04,10], [31.36, 31.36], c='k', lw=0.4, ls='--')
# ===== (b) exponent vs distance =====
fit_points = 21
p0 = [1, 0]
exp_alpha = np.array([cf(linfit, np.log10(a2.index.values[x:x+fit_points]),
np.log10(a2.r2.values[x:x+fit_points]),p0)[0][0] for x in range(10,len(a2)-fit_points)])
exp_times = a2.index.values[10+fit_points//2:-fit_points//2]
exp_rs = np.sqrt(a2.r2.values[10+fit_points//2:-fit_points//2])
# axes[0,1].plot(exp_times[exp_times >=0.8], exp_alpha[exp_times >=0.8], label=label, c=f'C{i}')
axes[0,1].plot(exp_rs[exp_times >=0.8], exp_alpha[exp_times >=0.8], label=label, c=cc)
## always plot short
try :
a2s = a2s.loc[:0.8]
exp_alpha = np.array([cf(linfit, np.log10(a2s.index.values[x:x+fit_points]),
np.log10(a2s.r2.values[x:x+fit_points]),p0)[0][0] for x in range(1,len(a2s)-fit_points)])
exp_times = a2s.index.values[1+fit_points//2:-fit_points//2]
exp_rs = np.sqrt(a2s.r2.values[1+fit_points//2:-fit_points//2])
# axes[0,1].plot(exp_times, exp_alpha, c=f'C{i}', ls='--')
axes[0,1].plot(exp_rs[exp_times <=0.8], exp_alpha[exp_times <=0.8], c=cc, ls='--')
except: pass
print(f'computed the exponent of MSD vs time for {mm} {cn} {T1}.')
axes[0,1].legend(title=leg_title, loc='lower right')
if guides:
axes[0,1].plot([0.03,3e5],[1,1], c='grey', lw=0.4, ls='--')
axes[0,1].plot([5.6,5.6],[0,2.1], c='grey', lw=0.4, ls='--')
# ===== (c) dx prep, and dx raw =====
if T1 == T1_dx:
## try loading a pre-computed dx file
dx_glob = glob.glob(plane.folder+f'/{mm}-*-dx-{T1}K*ps.csv')
dx = None
try:
dx = pd.read_csv(dx_glob[0])
dx = dx.set_index(['dx','time']).unstack().apply(lambda col: col/col.sum(), axis=0)
dx.columns = [x[1] for x in dx.columns]
except:
print(f'could not load a dx file for {mm} {cn} {T1}K')
continue
## apply binning by time intervals
time_tuples = [ (round(x*0.8), round(x*1.2)) for x in dx_times]
time_intervals = pd.IntervalIndex.from_tuples(time_tuples)
time_spectra = dx.T.groupby(pd.cut(dx.T.index,time_intervals)).agg('mean').T
## normalize each column to sum to 1
time_spectra = time_spectra / time_spectra.sum() / (time_spectra.index[1]-time_spectra.index[0])
## and rename the columns as something legible
col_names = [f'{x[0]}-{x[1]} ps' if max(x) < 1000 else f'{int(x[0])//1000}-{int(x[1])//1000} ns' for x in time_tuples]
time_spectra.rename(columns = dict(zip(time_spectra.columns,col_names)), inplace=True)
## plot each column
for col in time_spectra.columns :
xvals = time_spectra.loc[time_spectra[col] != 0].index
axes[1,0].plot(xvals, time_spectra.loc[time_spectra[col] != 0, col],
label=col, c=next(dx_colors))
axes[1,0].legend(title=leg_title + f' {T1}K')
# ===== (d) dx binned by hops and rescaled =====
if T1 == T1_dx :
## find the variances in dx to later rescale by them
col_sigmas = list()
for col, t in zip(time_spectra.columns, dx_times):
col_variance = time_spectra[col] * time_spectra.index.values **2 / time_spectra[col].sum()
col_sigma = np.sqrt(col_variance.sum())
print(f'{mm} {cn} {T1}K : {t} ps, sigma = {col_sigma:.2f} AA')
col_sigmas.append(col_sigma)
## numbers of hops from the Cartesian displacements
x_bins = (np.unique(dx.index.values // hop_length) * 2 - 1 ) * (hop_length / 2)
x_bins = np.insert(np.append(x_bins,max(x_bins)+hop_length), 0, min(x_bins)-hop_length)
## apply binning by number of hops
time_spectra = time_spectra.groupby(pd.cut(time_spectra.index,x_bins)).agg('sum')
time_spectra.index = (x_bins[:-1] + x_bins[1:])/2
## normalize each column to sum to 1
time_spectra = time_spectra / time_spectra.sum() / (time_spectra.index[1]-time_spectra.index[0])
## plot each column
for col, sigma in zip(time_spectra.columns, col_sigmas) :
xvals = time_spectra.loc[time_spectra[col] != 0].index
axes[1,1].plot(xvals/sigma,
time_spectra.loc[time_spectra[col] != 0, col]*sigma,
label=col, c=next(dx_colors))
axes[1,1].legend(title=leg_title + f' {T1}K')
## plot a Laplacian & a Gaussian as benchmarks
sigmas = np.linspace(-10, 10, 101)
gauss = np.exp(-sigmas**2/2) / sum(np.exp(-sigmas**2/2)) * len(sigmas)/(max(sigmas)-min(sigmas))
axes[1,1].plot(sigmas, gauss, c='grey', ls=':')
laplace = np.exp(-abs(sigmas)*np.sqrt(2)) / sum(np.exp(-abs(sigmas)*np.sqrt(2))) * len(sigmas)/(max(sigmas)-min(sigmas))
axes[1,1].plot(sigmas, laplace, c='k', ls=':')
# ===== (e) Gs fitting: hardcoded time bounds =====
if T1 in T1s_gs :
## try loading a pre-computed Gself file
gs = hu.load_gs(plane.folder+f'/{mm}-*-gs-{T1}K*ps.csv', 'fitting')
if gs is None: continue
for h, (rs, ls) in enumerate(zip(rs_list, ['--', '-', '-.', ':'])) :
s = gs.loc[min(rs):max(rs),0.1:5e4].sum().reset_index()
s.columns = ['time','gs']
s.set_index('time', inplace=True)
# s = s/s.max()
l = f'{label}, <{h+1} hop{"s" if h != 0 else ""}'
axes[2,0].plot(s, label = l, c=cc, ls=ls)
# ## fit and plot the fit
# try:
# s = s.loc[1:]
# popt, perr = expectation_multi_method(s, method, aggregated=True, verbose=True)
# if method == 'simple' : ax.plot(s.index.values, exp_decay(s.index.values, *popt), c='k', ls=':')
# elif method == 'stretch' : ax.plot(s.index.values, kww_decay_break(s.index.values, *popt), c='k', ls=':')
# print(f'fitting {mm} {cn} {T1} {min(rs):.1f}-{max(rs):.1f}AA : {popt[1]:.1f}±{perr[1]:.1f} ps, beta={1.00 if len(popt)<4 else popt[3]:.2f}, tstar={0 if len(popt)<4 else popt[4]:.2f}')
# except : pass
## inverse interpolating function to plot the 1/e time
int_fun = interp1d(s.gs.values, s.index.values)
try : axes[2,0].plot(int_fun(1/np.e), 1/np.e, marker='o', ls='', fillstyle='full',
mfc='yellow', mec='k', zorder=3, markersize=4)
except : print(f'for {mm} {cn} {T1}, not all radii decay to 1/e')
axes[2,0].legend(title=leg_title)
if guides: axes[2,0].plot([1e3,3e4],[1/np.e,1/np.e], c='grey', lw=0.4, ls='--')
# ===== (f) C_D(t) =====
if T1 not in cd_exclude :
start = dt.now()
svals = np.logspace(-5, 2, 4000) # if not short else np.logspace(-6,5,3000)
## Laplace transform of C_D(t)
cds = hu.fluctuation_kernel(a2, svals, dim=dim)
try: cdt = hu.stehfest_inverse(cds, a2.index.values[1:-1])
except :
print(f'could not append inverse transform for {mm} {cn} {T1}')
break
cdt = pd.DataFrame({'time':a2.index.values[1:-1],'cdt':cdt}).set_index('time')
if na_bdp_unsym : cdt = cdt.where(cdt < cd_break, cd_transform)
axes[2,1].plot(cdt.cdt.loc[0.2:a2.index.max()/3+1], label=label, c=cc)
## create the interpolator for plotting little stars based on Gs
## try loading a pre-computed Gself file
gs = hu.load_gs(plane.folder+f'/{mm}-*-gs-{T1}K*ps.csv', 'cdt', radii=rs_list)
int_fun = interp1d(cdt.index.values, cdt.cdt)
try: axes[2,1].scatter(gs, int_fun(gs), marker='o', facecolors='yellow', edgecolors='k', zorder=3, s=16)
except : print('something wrong with plotting Gs * for {mm} {cn} {T1}')
## plot short-time separately
cds_s = hu.fluctuation_kernel(a2s, np.logspace(0,4,1000), dim=dim)
cdt_s = hu.stehfest_inverse(cds_s, a2s.index.values[1:-1])
cdt_s = pd.DataFrame({'time':a2s.index.values[1:-1],'cdt':cdt_s}).set_index('time')
axes[2,1].plot(cdt_s.cdt.loc[0.0085 if (mm == 'Na' and 'unsym' in cn) else 0.005:0.2], ls='--', c=cc)
print(f'done {T1}K, time taken {(dt.now()-start).total_seconds():.2f}')
axes[2,1].plot([1e-3, 5e4], [0,0], c='grey', ls=':', lw=0.4)
axes[2,1].legend(title=leg_title, loc='upper left')
else :
print(f'skipping C_D(t) for {mm} {cn} {T1}')
## plot prettymaking
axes[0,0].set(xlim=[0.025,5e4], xscale='log', ylim=[1e-2,3e4], yscale='log',
xlabel=r'Time lag $t$, ps', ylabel=r'$\langle \overline{r^2(t)} \rangle$, $\AA^2$')
axes[0,1].set(xlim=[0.4,30], xscale='log', ylim=[0,1.05], xlabel=r'$\langle \overline{ r(t) }\rangle,~\AA$',
yticks=[0,0.2,0.4,0.6,0.8,1.], yticklabels=['0.0','0.2','0.4','0.6','0.8','1.0'],
xticks=[1,10], xticklabels=['1','10'],
ylabel=r'Exponent of $\langle \overline{ r^2(t) }\rangle$')
axes[1,0].set(xlim=[-28,28], ylim=[3e-5,None], yscale='log',
xlabel=r'$\Delta x$, $\AA$', ylabel=r'$P(\Delta x)$, $\AA^{-1}$')
axes[1,1].set(xlim=[-7,7], ylim=[1e-5,None], yscale='log',
xlabel=r'$(\Delta x)/\sigma_{\Delta x}$', ylabel=r'$P(\Delta x)$, $\sigma_{\Delta x}^{-1}$')
# axes[2,0].set(ylim=[0,13.5], xlim=[0.5,9e3], xscale='log',
# xlabel=r'Time lag $t$, ps', ylabel=r'Distance $r,~\AA$')
axes[2,0].set(xlim=[0.1,5e4], xscale='log', ylim=[0,1.04],
ylabel=r'$G_s~r^2$, a.u.', xlabel=r'Time lag $t$, ps')
axes[2,1].set(xlim=[5e-3,5e3], xscale='log',
xlabel=r'Time lag $t$, ps', ylabel=r'$C_D(t)$')
## create the broken axis
if na_bdp_unsym :
r1 = Rectangle((4.5e-3,cd_break),5.2e3,cd_break_top-cd_break, lw=0,
facecolor='w', clip_on=False, transform=axes[2,1].transData, zorder=3)
axes[2,1].add_patch(r1)
kwargs = dict(transform=axes[2,1].transData, color='k', clip_on=False, lw=0.75,zorder=4)
axes[2,1].plot(5e-3*np.array([10**-0.05,10**0.05]), [cd_break-0.05,cd_break+0.05],**kwargs)
axes[2,1].plot(5e-3*np.array([10**-0.05,10**0.05]), [cd_break_top-0.05,cd_break_top+0.05],**kwargs)
axes[2,1].plot(5e3*np.array([10**-0.05,10**0.05]), [cd_break-0.05,cd_break+0.05],**kwargs)
axes[2,1].plot(5e3*np.array([10**-0.05,10**0.05]), [cd_break_top-0.05,cd_break_top+0.05],**kwargs)
# axes[2,1].set(yticks=[0,2,4,cd_transform(10), cd_transform(15)], yticklabels=['0','2','4','10','15'])
axes[2,1].set(yticks=[0,2,4,cd_transform(6), cd_transform(8)], yticklabels=['0','2','4','6','8'])
axes[2,1].set(ylim=[-0.7,7.1])
fig.tight_layout(pad=0.5, h_pad=0.25)
# =============================================================================
# %% Figure 3 top row: spectra of conductivity
# =============================================================================
## Parameters:
start_1 = 0
start_step = 10 ## [ps] interval for sampling eMSD
durations = np.round(np.logspace(0.4,3.4),2) ## [ps] 2.5-2500 ps, 50 pts
enforce_indep = True ## make start_step >= duration, for all durations
# rs_list = [[0.01,1.7],[0.01, 4.6]] ## to plot two-hop relaxation as the Jonscher cutoff
rs_list = []
# ========== automatic things below this line ==========
## three-panel top row
fig, axes = plt.subplots(1,3, sharey=True, figsize=(12,4))
# fig, axes = plt.subplots(3,1, sharex=True, figsize=(4,9))
# ===== (a) Na-beta-doubleprime spectra vs T1 =====
## load three planes
planes_to_load = all_planes.query('metal == "Na" & config == "unsym_0" & T1 in [230,300,473]')
macro_planes_data = hu.load_macro_planes(planes_to_load).sort_values(by='T1')
colors = cycle([batlow_even(j) for j in np.linspace(0, 1, len(macro_planes_data))])
ax = axes[0]
## load and plot Na doubleprime conductivity spectra
for i, plane in macro_planes_data.iterrows():
ph = plane.phase; mm = plane.metal; cn = plane.config
st = plane.stoich; ex = plane.exclude
T1 = plane.T1; folder = plane.folder
N = len(plane.atoms.query('atom == @mm'))
cell = plane.cell
dcoms = list()
## load a pre-corrected CoM trajectory
cor = False
try:
cor_fname = glob.glob(plane.folder + f'/cm_{mm}-{st}-{ex}-{T1}K-{mm}-cor.csv')
if isinstance(cor_fname, list) and len(cor_fname) == 1 :
com = pd.read_csv(cor_fname[0]).set_index('time')
print(f'\nLoaded a corrected CoM trajectory for {mm} {cn} T1={T1}K')
cor = True
except :
com = None; continue
dtt = com.index[1]-com.index[0]
## average multiple starts
for duration in durations:
## enforce start_step >= duration
if enforce_indep and start_step < duration : start_step = duration
if com.index.max() <= duration*4 :
dcoms.append(np.nan)
continue
dr = com.loc[duration+com.index.min()::int(start_step/dtt)] - com.loc[:com.index.max()-duration:int(start_step/dtt)].values
# dr['dcom'] = (dr.x**2 + dr.y**2 + dr.z**2)/duration
dr['dcom'] = (dr.x**2 + dr.y**2 )/duration
all_dcom2 = dr.dcom.values * N / 4
dcoms.append(np.mean(all_dcom2))
## transform D_CoM to conductivity and plot it
sigmas = sigma_T(N,cell,np.array(dcoms)*unit_conv)/T1
this_marker = next(markers)
ax.plot(1e12/durations, sigmas, this_marker+next(lines), label=f'eMSD, {T1}K',
markersize=5, c=next(colors))
## plot the two-hop relaxation time
int_fun = interp1d(1e12/durations, sigmas, fill_value=1e-10, bounds_error=False)
gs = np.array(hu.load_gs(folder+f'/{mm}-*-gs-{T1}K*ps.csv', option='Funke', radii=rs_list))
ax.plot(1e12/gs, int_fun(1e12/gs), marker=this_marker, mfc='yellow', mec='k', zorder=3, ls='', fillstyle='full')
## plot literature values:
refs = {'Funke2007':52, 'Almond1984':32, 'Hoppe1991':51, 'Barker1976':44,
'Kamishima2014':30, 'Kamishima2015':31}
## Funke & Banhatti (2007) - 473K
lit6 = pd.read_csv('./production/bdp-Na/Na_unsym_Funke2007_473K_lit.csv', names=['logfreq','sigma'])
axes[0].plot(10**lit6.logfreq, (10**lit6.sigma)/473, marker='o', mec='k', ls='', zorder=0,
mfc='none', markersize=4, label=f'Ref. {refs["Funke2007"]}, 473K') ## Funke $\\it{et\ al.}$ (2007), ($Li_{Al}^{\prime\prime}$)
## Hoppe & Funke (1991) - 220K
lit2 = pd.read_csv('./production/bdp-Na/Na_unsym_Hoppe1991_220K_lit.csv', names=['logfreq','sigma'])
axes[0].plot(10**lit2.logfreq, lit2.sigma, marker='o', mec='k', ls='', zorder=0,
mfc='none', markersize=4, label=f'Ref. {refs["Hoppe1991"]}, 220K') ## Hoppe $\\it{et\ al.}$ (1991), ($Li_{Al}^{\prime\prime}$)
## Hoppe & Funke (1991) - 298K
lit5 = pd.read_csv('./production/bdp-Na/Na_unsym_Hoppe1991_298K_lit.csv', names=['logfreq','sigma'])
axes[0].plot(10**lit5.logfreq, lit5.sigma, marker='^', mec='k', ls='', zorder=0,
mfc='none', markersize=4, label=f'Ref. {refs["Hoppe1991"]}, 298K') ## Hoppe $\\it{et\ al.}$ (1991) ($Li_{Al}^{\prime\prime}$)
## Almond et al (1984) - 237K
lit1 = pd.read_csv('./production/bdp-Na/Na_unsym_Almond1984_237K_lit.csv', names=['logfreq','sigma'])
axes[0].plot(10**lit1.logfreq, lit1.sigma, marker='d', mec='k', ls='', zorder=0,
mfc='k', fillstyle='full', markersize=4, label=f'Ref. {refs["Almond1984"]}, 237K') ## Almond $\\it{et\ al.}$ (1984)
## Almond (1984) - 296K
lit3 = pd.read_csv('./production/bdp-Na/Na_unsym_Almond1984_296K_lit.csv', names=['logfreq','sigma'])
axes[0].plot(10**lit3.logfreq, lit3.sigma, marker='s', mec='k', ls='', zorder=0,
mfc='k', fillstyle='full', markersize=4, label=f'Ref. {refs["Almond1984"]}, 296K') ## Almond $\\it{et\ al.}$ (1984)
## make plot pretty
axes[0].set(xlim=[8e5,6e11], ylim=[6e-4,1.2], xscale='log', yscale='log',
xlabel=r'$\nu=1/t$, Hz', ylabel=r'$\sigma_{2D}(t;\Delta)$, S/cm')
axes[0].legend(title=r'Na $\beta^{\prime\prime}$', ncol=2, handletextpad=0.5,
handlelength=1.5, columnspacing=0.4, loc='upper left')
## add guidelines
axes[0].plot([1e6, 2e9], [6e-3, 6e-3*2000**0.15], c='grey', lw=0.4)
axes[0].plot([1e6, 2e8], [1e-3, 1e-3*200**0.15], c='grey', lw=0.4)
axes[0].plot([4e9, 4e11], [0.06*100**-0.7, 0.06], c='grey', lw=0.4)
# ===== (b) Na-beta spectra 300K =====
## load planes
planes_to_load = all_planes.query('metal == "Na" & config == "120_4" & T1 == 300')
macro_planes_data = hu.load_macro_planes(planes_to_load)
ax = axes[1]
## load and plot Na beta conductivity spectra
for i, plane in macro_planes_data.iterrows():
ph = plane.phase; mm = plane.metal; cn = plane.config
st = plane.stoich; ex = plane.exclude
T1 = plane.T1; folder = plane.folder
N = len(plane.atoms.query('atom == @mm'))
cell = plane.cell
dcoms = list()
## load a pre-corrected CoM trajectory
cor = False
try:
cor_fname = glob.glob(plane.folder + f'/cm_{mm}-{st}-{ex}-{T1}K-{mm}-cor.csv')
if isinstance(cor_fname, list) and len(cor_fname) == 1 :
com = pd.read_csv(cor_fname[0]).set_index('time')
print(f'\nLoaded a corrected CoM trajectory for {mm} {cn} T1={T1}K')
cor = True
except :
com = None; continue
dtt = com.index[1]-com.index[0]
## average multiple starts
for duration in durations:
## enforce start_step >= duration
if enforce_indep and start_step < duration : start_step = duration
if com.index.max() <= duration*4 :
dcoms.append(np.nan)
continue
dr = com.loc[duration+com.index.min()::int(start_step/dtt)] - com.loc[:com.index.max()-duration:int(start_step/dtt)].values
# dr['dcom'] = (dr.x**2 + dr.y**2 + dr.z**2)/duration
dr['dcom'] = (dr.x**2 + dr.y**2 )/duration
all_dcom2 = dr.dcom.values * N / 4
dcoms.append(np.mean(all_dcom2))
sigmas = sigma_T(N,cell,np.array(dcoms)*unit_conv)/T1
this_marker = next(markers)
ax.plot(1e12/durations, sigmas, this_marker+next(lines), label=f'eMSD, {T1}K',
markersize=5, c=batlow_even(0))
## plot the two-hop relaxation time
int_fun = interp1d(1e12/durations, sigmas, fill_value=1e-10, bounds_error=False)
gs = np.array(hu.load_gs(folder+f'/{mm}-*-gs-{T1}K*ps.csv', option='Funke', radii=rs_list))
ax.plot(1e12/gs, int_fun(1e12/gs), marker=this_marker, mfc='yellow', mec='k', zorder=3, ls='', fillstyle='full')
## plot literature values
## 2 literature datasets, Barker (1976) - 300K ## Barker $\\it{et\ al.}$ (1976)
lit1 = pd.read_csv('./production/beta-Na/Na_120_Barker1976_flux_lit.csv', names=['logfreq','sigma'])
axes[1].plot(10**lit1.logfreq, lit1.sigma, marker='D', mec='k', markersize=4, ls='', zorder=0,
mfc='none', label=f'Ref. {refs["Barker1976"]}, 300K, flux')
lit2 = pd.read_csv('./production/beta-Na/Na_120_Barker1976_melt_lit.csv', names=['logfreq','sigma'])
axes[1].plot(10**lit2.logfreq, lit2.sigma, marker='s',mec='k', markersize=4, ls='', zorder=0,
mfc='none', label=f'Ref. {refs["Barker1976"]}, 300K, melt')
## Kamishima (2015) - 300K # Kamishima $\\it{et\ al.}$ (2015)
axes[1].plot(1e7, 0.011692, marker='>', mec='k', markersize=4, ls='', zorder=0,
mfc='k', fillstyle='full', label=f'Ref. {refs["Kamishima2015"]}, 300K')
## make plot pretty
axes[1].set(xlim=[7e6,6e11], xscale='log', xlabel=r'$\nu=1/t$, Hz')
axes[1].legend(title=r'Na $\beta$', handletextpad=0.5, handlelength=1.5)
## add guidelines
axes[1].plot([1e7, 4e8], [6e-3, 6e-3*40**0.1], c='grey', lw=0.4)
axes[1].plot([4e9, 4e11], [0.05*100**-0.6, 0.05], c='grey', lw=0.4)
# ===== (c) Ag-beta spectra 300K =====
## load planes
planes_to_load = all_planes.query('metal == "Ag" & config == "120_4" & T1 == 300')
macro_planes_data = hu.load_macro_planes(planes_to_load)
ax = axes[2]
## load and plot Ag beta conductivity spectra
for i, plane in macro_planes_data.iterrows():
ph = plane.phase; mm = plane.metal; cn = plane.config
st = plane.stoich; ex = plane.exclude
T1 = plane.T1; folder = plane.folder
N = len(plane.atoms.query('atom == @mm'))
cell = plane.cell
dcoms = list()
## load a pre-corrected CoM trajectory
cor = False
try:
cor_fname = glob.glob(plane.folder + f'/cm_{mm}-{st}-{ex}-{T1}K-{mm}-cor.csv')
if isinstance(cor_fname, list) and len(cor_fname) == 1 :
com = pd.read_csv(cor_fname[0]).set_index('time')
print(f'\nLoaded a corrected CoM trajectory for {mm} {cn} T1={T1}K')
cor = True
except :
com = None; continue
dtt = com.index[1]-com.index[0]
## average multiple starts
for duration in durations:
## enforce start_step >= duration
if enforce_indep and start_step < duration : start_step = duration
if com.index.max() <= duration*4 :
dcoms.append(np.nan)
continue
dr = com.loc[duration+com.index.min()::int(start_step/dtt)] - com.loc[:com.index.max()-duration:int(start_step/dtt)].values
# dr['dcom'] = (dr.x**2 + dr.y**2 + dr.z**2)/duration
dr['dcom'] = (dr.x**2 + dr.y**2 )/duration
all_dcom2 = dr.dcom.values * N / 4
dcoms.append(np.mean(all_dcom2))
sigmas = sigma_T(N,cell,np.array(dcoms)*unit_conv)/T1
this_marker = next(markers)
ax.plot(1e12/durations, sigmas, this_marker+next(lines), label=f'eMSD, {T1}K', markersize=5,
c=batlow_even(0))
## plot the two-hop relaxation time
int_fun = interp1d(1e12/durations, sigmas, fill_value=1e-10, bounds_error=False)
gs = np.array(hu.load_gs(folder+f'/{mm}-*-gs-{T1}K*ps.csv', option='Funke', radii=rs_list))
ax.plot(1e12/gs, int_fun(1e12/gs), marker=this_marker, mfc='yellow', mec='k', zorder=3, ls='', fillstyle='full')
## plot literature values
## Barker (1976) melt - 300K # Barker $\\it{et\ al.}$ (1976) melt
lit21 = pd.read_csv('./production/beta-Ag/Ag_120_Barker1976_melt_lit.csv', names=['logfreq','sigma'])
axes[2].plot(10**lit21.logfreq, lit21.sigma, marker='s', mec='k', markersize=4, ls='',
mfc='none', label=f'Ref. {refs["Barker1976"]}, 300K, melt', zorder=0)
## 3 samples from Kamishima (2014) - near 300K ## Kamishima $\\it{et\ al.}$ (2014)
lit22 = pd.read_csv('./production/beta-Ag/Ag_120_Kamishima2014_296K_S1_lit.csv', names=['logfreq','sigma'])
axes[2].plot(10**lit22.logfreq, lit22.sigma, marker='o', mec='k', markersize=4, ls='',
mfc='k', fillstyle='full', label=f'Ref. {refs["Kamishima2014"]}, 296K, A', zorder=0)
lit23 = pd.read_csv('./production/beta-Ag/Ag_120_Kamishima2014_286K_S2_lit.csv', names=['logfreq','sigma'])
axes[2].plot(10**lit23.logfreq, lit23.sigma, marker='^', mec='k', markersize=4, ls='',
mfc='k', fillstyle='full', label=f'Ref. {refs["Kamishima2014"]}, 286K, B', zorder=0)
lit24 = pd.read_csv('./production/beta-Ag/Ag_120_Kamishima2014_299K_S3_lit.csv', names=['logfreq','sigma'])
axes[2].plot(10**lit24.logfreq, lit24.sigma, marker='v', mec='k', markersize=4, ls='',
mfc='k', fillstyle='full', label=f'Ref. {refs["Kamishima2014"]}, 299K, C', zorder=0)
## make plot pretty
axes[2].set(xlim=[5e5,6e11], xscale='log', xlabel=r'$\nu=1/t$, Hz')
axes[2].legend(title=r'Ag $\beta$', handletextpad=0.5, handlelength=1.5)
## add guidelines
axes[2].plot([4e9, 4e11], [0.05*100**-0.6, 0.05], c='grey', lw=0.4)
axes[2].plot([2e6, 2e8], [3e-3, 3e-3*100**0.1], c='grey', lw=0.4)
fig.tight_layout(pad=0.5, w_pad=0.25)
# =============================================================================
# %% Fig 3 bottom row: Arrhenius plots
# =============================================================================
# Parameters:
start_1 = 0 ## [ps] time at which to start sampling CoM MSD
start_step = 2500 ## [ps] interval for sampling CoM MSD
start_last = 97500 ## [ps] last time at which to sample CoM MSD
duration = 2500 ## [ps] how long each sampling is
refs_dict = {'Davies(1986)':38, 'Briant(1980)':34, 'Bates(1981)':35,
'Whittingham(1972)':55, 'Almond(1984)':32}
beta_refs_dict = {'Ag':53, 'K':55, 'Na':54}
# ========== automatic things below this line ==========
## array for multiple starts and its tuple description for lookup of pre-dones
starts = np.arange(start_1,start_last,start_step,dtype=float)
spec=(start_1,start_step,start_last,duration)
## pre-load and filter for the same computation conditions right away
sigmas_msd = pd.read_csv('./production/sigmas_msd.csv')
sigmas_msd.spec = sigmas_msd.spec.apply(eval)
sigmas_msd_spec = sigmas_msd.query('spec == @spec')
# figure
fig, axes = plt.subplots(1,3, sharey=True, figsize=(12,4))
# ===== (d) Na-doubleprime: normal + quenched =====
planes_to_load = all_planes.query('metal == "Na" & config in ["unsym_0", "symm_1"] & T1 in [230,300,473,600]')
macro_planes_data = hu.load_macro_planes(planes_to_load)
## structure for new computations
new_sigmas_msd = list()
for i, plane in macro_planes_data.iterrows():
all_dcom = list()
all_derr = list()
ph = plane.phase
mm = plane.metal
cn = plane.config
st = plane.stoich
ex = plane.exclude
T1 = plane.T1
com = plane.com
N = len(plane.atoms.query('atom == @mm'))
## check this configuration was already computed
pre_done = sigmas_msd_spec.query('metal == @mm & stoich == @st & exclude == @ex & T1 == @T1')
assert len(pre_done) > 0, 're-compute {mm} {cn} {T1}'
new_sigmas_msd.append(pre_done.to_dict('records')[0])
# print(f'pre-done {variable}={var}, {variable2}={var2} T1={T1}K, sigma*T = {pre_done.sigt.iloc[0]:.2e} S*K/cm')
## convert all fitted sigmas to dataframe
new_sigmas_msd = pd.DataFrame(new_sigmas_msd)
## plot normal Na beta-doubleprime
sigts = new_sigmas_msd.query('config == "unsym_0"')
axes[0].errorbar(x=1000./sigts.T1.values, y=sigts.sigt, zorder=2.5,
yerr=[sigts.sigt-sigts.sigt_20, sigts.sigt_80-sigts.sigt],
label=r'Na $\beta^{\prime\prime}$, eMSD', mfc=hu.metal_colors['Na'],
mec=hu.metal_colors['Na'], c=hu.metal_colors['Na'],
fillstyle='full', marker='o', ls='')
## plot literature values
lit_folder = macro_planes_data.lit_folder.unique()[0]
d_lit_files = sorted(glob.glob(lit_folder + f'/{mm}*sigma*lit.csv'), reverse=True)
for f, sym in zip(d_lit_files, ['o','s','D','v','^','<','>']) :
d_lit = np.loadtxt(f, delimiter = ',')
# print('loaded', f)
## find the author+year if they are in the filename
auth = [x[0] for x in [re.findall('[A-z]*\(19\d\d\)$', x) for x in re.split('/|-|_| ',f)] if len(x) > 0][0]
try: t_synth = [x[0] for x in [re.findall('1\d\d0$', x) for x in re.split('/|-|_| ',f)] if len(x) > 0][0]
except: t_synth = None
ref = refs_dict[auth]
label = f'Ref. {ref}'
if 'symm' in f :
label += ', quench'
continue ## skip this for simplifying the plot
elif t_synth is not None :
# label += f', {t_synth}C'
if int(t_synth) == 1700 : continue
## scatterplot, can be updated to include errorbars
axes[0].plot(d_lit[:,0], 10**d_lit[:,1], label=label, zorder=0,
mec='k', # if variable2 != 'metal' else hu.metal_colors[var],
mfc=(0,0,0,0), marker=sym, linestyle='', markersize=5)
axes[0].set(ylabel='$\sigma$T [$\Omega^{{-1}}$ cm$^{{-1}}$ K]', xlabel='1000/T, K$^{-1}$',
ylim=[1.5e-2,1.7e3], yscale='log', xlim=[1.3,4.45])
axes[0].legend(loc='lower left', title=r'Na $\beta^{\prime\prime}$', title_fontsize=10)
# ===== (e) K,Ag-doubleprime =====
planes_to_load = all_planes.query('metal in ["K", "Ag"] & config == "unsym_0" & T1 in [230,300,473,600]')
macro_planes_data = hu.load_macro_planes(planes_to_load)
## structure for new computations
new_sigmas_msd = list()
for i, plane in macro_planes_data.iterrows():
all_dcom = list()
all_derr = list()
ph = plane.phase
mm = plane.metal
cn = plane.config
st = plane.stoich
ex = plane.exclude
T1 = plane.T1
com = plane.com
N = len(plane.atoms.query('atom == @mm'))
## check this configuration was already computed
pre_done = sigmas_msd_spec.query('metal == @mm & stoich == @st & exclude == @ex & T1 == @T1')
assert len(pre_done) > 0, 're-compute {mm} {cn} {T1}'
new_sigmas_msd.append(pre_done.to_dict('records')[0])
# print(f'pre-done {variable}={var}, {variable2}={var2} T1={T1}K, sigma*T = {pre_done.sigt.iloc[0]:.2e} S*K/cm')
## convert all fitted sigmas to dataframe
new_sigmas_msd = pd.DataFrame(new_sigmas_msd)
## plot K beta-doubleprime
sigts = new_sigmas_msd.query('metal == "K"')
axes[1].errorbar(x=1000./sigts.T1.values, y=sigts.sigt, zorder=2.5,
yerr=[sigts.sigt-sigts.sigt_20, sigts.sigt_80-sigts.sigt],
label=r'K $\beta^{\prime\prime}$, eMSD', c=hu.metal_colors['K'],
mfc=hu.metal_colors['K'], mec=hu.metal_colors['K'],
fillstyle='full', marker='o', ls='')
## plot K beta-doubleprime
sigts = new_sigmas_msd.query('metal == "Ag"')
axes[1].errorbar(x=1000./sigts.T1.values, y=sigts.sigt, zorder=2.5,
yerr=[sigts.sigt-sigts.sigt_20, sigts.sigt_80-sigts.sigt],
label=r'Ag $\beta^{\prime\prime}$, eMSD', c=hu.metal_colors['Ag'],
mfc=hu.metal_colors['Ag'], mec=hu.metal_colors['Ag'],
fillstyle='full', marker='s', ls='')
## plot literature values
lit_folders = macro_planes_data.lit_folder.unique()
d_lit_files = sorted(glob.glob(lit_folder + '/*sigma*lit.csv'), reverse=True)
for lf in lit_folders:
mm = re.split('-|/| ',lf)[-1]
d_lit_files = sorted(glob.glob(lf + '/*sigma*lit.csv'), reverse=True)
for f, sym in zip(d_lit_files, ['o','s','D','v','^','<','>']) :
d_lit = np.loadtxt(f, delimiter = ',')
# print('loaded', f)
## find the author+year if they are in the filename
auth = [x[0] for x in [re.findall('[A-z]*\(19\d\d\)$', x) for x in re.split('/|-|_| ',f)] if len(x) > 0][0]
try: t_synth = [x[0] for x in [re.findall('1\d\d0$', x) for x in re.split('/|-|_| ',f)] if len(x) > 0][0]
except: t_synth = None
ref = refs_dict[auth]
label = mm + r' $\beta^{\prime\prime}$, ' + f'Ref. {ref}'
# if 'symm' in f : label += ', quench'
# elif t_synth is not None : label += f', {t_synth}C'
## scatterplot, can be updated to include errorbars
axes[1].plot(d_lit[:,0], 10**d_lit[:,1], label=label, zorder=0,
mec='k', # if variable2 != 'metal' else hu.metal_colors[var],
mfc=(0,0,0,0), marker=next(markers), linestyle='', markersize=5)
axes[1].set(xlabel='1000/T, K$^{-1}$', xlim=[1.3,4.45])
axes[1].legend(loc='lower left', title_fontsize=10)
# ===== (f) Ag,K,Na beta, 120_4 =====
planes_to_load = all_planes.query('config == "120_4" & T1 in [300,600,1000]')
planes_to_load = planes_to_load.query('not (T1 == 300 & metal == "K")')
macro_planes_data = hu.load_macro_planes(planes_to_load)
## structure for new computations
new_sigmas_msd = list()
for i, plane in macro_planes_data.iterrows():
all_dcom = list()
all_derr = list()
ph = plane.phase
mm = plane.metal
cn = plane.config
st = plane.stoich
ex = plane.exclude
T1 = plane.T1
com = plane.com
N = len(plane.atoms.query('atom == @mm'))
## check this configuration was already computed
pre_done = sigmas_msd_spec.query('metal == @mm & stoich == @st & exclude == @ex & T1 == @T1')
assert len(pre_done) > 0, 're-compute {mm} {cn} {T1}'
new_sigmas_msd.append(pre_done.to_dict('records')[0])
# print(f'pre-done {variable}={var}, {variable2}={var2} T1={T1}K, sigma*T = {pre_done.sigt.iloc[0]:.2e} S*K/cm')
## convert all fitted sigmas to dataframe
new_sigmas_msd = pd.DataFrame(new_sigmas_msd)
for i, mm in enumerate(new_sigmas_msd.metal.unique()) :
sigts = new_sigmas_msd.query('metal == @mm')
axes[2].errorbar(x=1000./sigts.T1.values, y=sigts.sigt, zorder=2.5,
yerr=[sigts.sigt-sigts.sigt_20, sigts.sigt_80-sigts.sigt],
label=mm + r', eMSD', c=hu.metal_colors[mm], mfc=hu.metal_colors[mm], # + f', $t=${duration/1000} ns'
fillstyle='full', marker=next(markers), ls='')
## plot literature values
lit_folder = macro_planes_data.query('metal == @mm & config == "120_4"').lit_folder.unique()[0]
f = sorted(glob.glob(lit_folder + '/*sigma*lit.csv'), reverse=True)[0]
d_lit = np.loadtxt(f, delimiter = ',')
# print('loaded', f)
ref = beta_refs_dict[mm]
label = f' Ref. {ref}, {mm}'
## scatterplot, can be updated to include errorbars
axes[2].plot(d_lit[:,0], 10**d_lit[:,1], label=label, zorder=0, mec='k', # if variable2 != 'metal' else hu.metal_colors[var],
mfc=(0,0,0,0), marker=next(markers), linestyle='', markersize=5)
# print('plotted', f)
axes[2].set(xlabel='1000/T, K$^{-1}$', xlim=[0.9,3.49])
axes[2].legend(loc='lower left', title=r'$\beta$-aluminas', ncol=2, title_fontsize=10)
## final figure pretty-making
fig.tight_layout(pad=0.5, w_pad=0.25)
# =============================================================================
# %% SI Figure NN : non-Gaussian & EB parameters for all relevant simulations
# ## (Block 7, option='a2')
# =============================================================================
option = 'a2' ## implemented here: 'a2', 'eb'
do_fft = False ## implemented for 'a2', 'burnett'; requires regularly spaced data
dimension = 2 ## stuff within the conduction plane is 2D; super-short is 3D
plot_gs = True ## if True, add 1/e times from self van Hove function decay
guides = True ## plot log-log guidelines for long-time regimes
rs_list = [[0.01,1.75],[0.01, 4.2]] ## default radii for van Hove decay points
rs_list = [[0.01,1.7],[0.01, 4.6]] ## default radii for van Hove decay points
## variable by which to plot stuff: variable is columns, variable2 is rows
variable = 'metal'
variable2 = 'config'
eb_lag = 20
# ========== automatic things below this line ==========
planes_to_load = all_planes.query('config in ["unsym_0", "symm_1", "120_4"] & T1 in [230,300,473,600,1000]')
planes_to_load = planes_to_load.query('T1 != 473 or phase != "beta"').sort_values(by='config',ascending=False)
macro_planes_data = hu.load_macro_planes(planes_to_load)
## values of the first (metal) and second (config) variables in the loaded data
var_values = sorted(macro_planes_data[variable].unique())
var2_values = sorted(macro_planes_data[variable2].unique(), reverse=True)
## adjust colormap
batlow_even = LinearSegmentedColormap.from_list('batlow_even', hu.batlow_lightness_scaled(0.4,0.6))
## figure to make an Arrhenius plot
fig, axes = plt.subplots(len(var2_values), len(var_values), sharex=True, sharey='row',
figsize=(3.5*len(var_values),4.8*len(var2_values)))
## loop over values of the variable(s)
for r, var2 in enumerate(var2_values) :
# ## make y axes shared by row
# for ax in axes[r,:]:
# axes[r,0]._shared_y_axes.join(ax,axes[r,0])
for c, var in enumerate(var_values):
## set the current axes
ax = axes[r,c]
## subset planes
subset = macro_planes_data.query(f'{variable} == @var & {variable2} == @var2').sort_values(by="T1", ascending=True)
guide_T1 = subset.T1.max()
## make a color map
colors = [batlow_even(j) for j in np.linspace(0, 1, len(subset))]
## iterate through all data planes
for i, (index, plane) in enumerate(subset.iterrows()):
if plane[variable] == var and plane[variable2] == var2:
mm = plane.metal; st = plane.stoich; cn = plane.config
ex = plane.exclude; T1 = plane.T1; folder = plane.folder
ph = plane.phase
## set a plotting label
# label = f'{mm}$_{{{int(st)/100:.2f}}}$ {T1}K'
label = f'{T1}K'
# label = str(var2) + units[variable2]
leg_title = f'{mm} {phases[ph]}' + (' quench' if 'symm' in cn else '')
## interpolation for van Hove
int_fun = None
## load the 2D file - leaving out the "split" files
if dimension == 2 :
a2_xys = glob.glob(folder+f'/{mm}*{ex}-a2xy*{T1}K*ps.csv')
## load the a2 file if exactly one exists, else complain
if a2_xys :
if len(a2_xys) > 1 : a2_xys = sorted(a2_xys, reverse=True,
key = lambda x : eval(re.split('-|_| ',x)[-1][:-6]))
## read the a2 file - options for csv file
a2 = pd.read_csv(a2_xys[0], sep=',').set_index('time')
else : print(f'could not load a 2D a2 file for plane {mm} {cn} {T1}K')
else : a2 = plane.a2
## recalculate a2 for the right number of dimensions
a2.a2 = dimension * a2.r4 / a2.r2 ** 2 / (dimension+2) - 1
## non-Gaussian parameter
if option == 'a2' and not do_fft :
ax.plot(a2.a2, label=label, c=colors[i])
## create the interpolator for plotting little stars based on Gs
if plot_gs : int_fun = interp1d(a2.index.values, a2.a2)
## Plot points from van Hove function
if plot_gs and int_fun is not None :
try :
gs = hu.load_gs(plane.folder+f'/{mm}-*-gs-{T1}K*ps.csv', option, radii=rs_list)
ax.plot(gs, int_fun(gs), marker='o', mfc='yellow', ls='', markersize=5,
mec='k', zorder=3, fillstyle='full')
except ValueError :
print(f'something wrong with Gs for {mm} {cn} {T1}, check fractional/real computation.')
ax.legend(title=leg_title, loc='lower left')
## plot log-log guidelines
if guides and T1 == guide_T1 :
# axes[0].plot([0.25, 0.25*10**0.333],[3e-2,3e-1],c='k', lw=0.4)
if mm == 'K' and '120' in cn :
ax.plot([30, 3000],[1.5,1.5*10**-0.8],c='k', lw=0.4)
elif mm == 'Na' and 'unsym' in cn :
ax.plot([5,100], [0.4,0.4*20**-0.8], c='k', lw=0.4)
ax.plot([60,3e3], [2,2*50**-0.4], c='k', lw=0.4)
elif mm == 'Na' and '120' in cn :
ax.plot([8,400], [1,50**-0.5], c='k', lw=0.4)
elif mm == 'K' and 'unsym' in cn :
ax.plot([10,100], [0.35,0.35*10**-0.9], c='k', lw=0.4)
elif mm == 'Na' and 'symm' in cn :
ax.plot([10,100], [0.2,0.2*10**-0.8], c='k', lw=0.4)
elif mm == 'K' and 'symm' in cn :
ax.plot([7,70], [0.35,0.35*10**-0.9], c='k', lw=0.4)
elif mm == 'Ag' and 'symm' in cn :
ax.plot([10,100], [0.35,0.35*10**-0.7], c='k', lw=0.4)
elif mm == 'Ag' and '120' in cn :
ax.plot([15,150], [0.6,0.6*10**-0.6], c='k', lw=0.4)
ax.plot([50,2500], [1,50**-0.5], c='k', lw=0.4)
ax.plot([500,5e3], [2,2*10**-0.4], c='k', lw=0.4)
elif mm == 'Ag' and 'unsym' in cn :
ax.plot([20, 200], [0.3, 0.3*10**-0.8], c='k', lw=0.4)
ax.plot([4e2, 4e3], [2, 2*10**-0.4], c='k', lw=0.4)
elif option == 'eb' :
## Load and plot EB
try:
eb_glob = glob.glob(plane.folder+f'/*eb*{T1}K*{int(eb_lag)}ps.csv')
eb = pd.read_csv(eb_glob[0]).set_index('time')
ax.plot(eb.eb, label=label, c=colors[i]) # + f', $t={eb_lag}$ ps'
except:
print(f'could not load the first EB file for {mm} {cn} {T1}K: ')
ax.legend(title=leg_title, loc='lower left')
## plot log-log guidelines
if guides and T1 == guide_T1 :
if mm == 'K' and '120' in cn:
ax.plot([1e4,8e4],[0.1, 0.1*8**-0.75], lw=0.4, c='k')
elif mm == 'Na' and 'unsym' in cn:
ax.plot([7e3, 7e4], [0.25,0.25*10**-0.3], c='k', lw=0.4)
ax.plot([1e3, 1e4], [0.015,0.015*10**-0.9], c='k', lw=0.4)
elif mm == 'Na' and '120' in cn:
ax.plot([7e3,7e4], [0.02,0.02*10**-1], c='k', lw=0.4)
ax.plot([12e3,72e3], [0.11,0.11*6**-0.6], c='k', lw=0.4)
elif mm == 'K' and 'unsym' in cn :
ax.plot([12e3,72e3], [0.015,0.015*6**-0.6], c='k', lw=0.4)
ax.plot([1e3,1e4], [0.016,0.016*10**-1], c='k', lw=0.4)
elif mm == 'Na' and 'symm' in cn :
ax.plot([12e3,72e3], [0.017,0.017*6**-0.6], c='k', lw=0.4)
ax.plot([1e3,1e4], [0.02,0.02*10**-1], c='k', lw=0.4)
elif mm == 'Ag' and '120' in cn :
ax.plot([2e4, 7e4], [0.04,0.04*3.5**-1], c='k', lw=0.4)
ax.plot([3e3, 14e3], [0.13, 0.13*(14/3)**-0.6], c='k', lw=0.4)
# axes[2].plot([3e3, 3e4], [0.3, 0.3*10**-0.3], c='k', lw=0.4)
elif mm == 'Ag' and 'unsym' in cn :
ax.plot([2e3, 2e4], [0.02, 0.02*10**-1], c='k', lw=0.4)
elif mm == 'Ag' and 'symm' in cn :
axes[1,0].plot([9e2,9e3], [0.045, 0.045*10**-0.9], c='k', lw=0.4)
elif mm == 'K' and 'symm' in cn :
ax.plot([9e2,9e3], [0.05, 0.005], c='k', lw=0.4)
else : pass
## make axes pretty
if option == 'a2' :
# axes[0,0].set(xlim=[0.05,5e4], xscale='log', ylim=[0.02,None], yscale='log',yticks=[0.1,1.0,10])
for ax in axes[:,0] :
ax.set(ylabel='Non-Gauss. Param.', yscale='log',ylim=[0.02,None])
ax.set(yticks=[0.1,1.0,10] if max(ax.get_ylim()) > 10 else [0.1,1.0],
yticklabels=['0.1','1.0','10'] if max(ax.get_ylim()) > 10 else ['0.1','1.0'])
for ax in axes[:,1:].flat : ax.set(yticks=[])
for ax in axes[-1,:] : ax.set(xlabel=r'Time lag $t$, ps', xlim=[0.05,5e4], xscale='log')
elif option == 'eb' :
for ax in axes[:,0] :
ax.set(xlim=[5*eb_lag, 9e4], xscale='log', ylim=[1e-3, 3], yscale='log',
ylabel=f'EB at $t=${eb_lag} ps',
yticks=[0.01, 0.1, 1], yticklabels=['.01', '0.1', '1.0']
)
for ax in axes[-1,:] : ax.set(xlabel='Simulation Time $\Delta$, ps')
else : pass
fig.tight_layout(pad=0.5, w_pad=0.1)
# =============================================================================
# %% Extended Data Figure NN : Distributions of r^2_CoM
# ## top row: rescale = False, bottom row: rescale = True
# =============================================================================
option = 'hist' ## 'spectra' for conductivity, 'hist' for distributions of DCOM,
## variable by which to plot stuff
variable = 'T1' ## << pick a variable
start_1 = 0 ## [ps] time at which to start sampling CoM MSD
start_step = 10 ## [ps] interval for sampling CoM MSD
durations = np.array([2.5,25,250,2500]) ## [ps] for histograms
rescale=True ## divide the DCOM distribution by its stdev, with hist
enforce_random = False ## flag to enforce start_step >= duration
# ========== automatic things below this line ==========
## load three planes
planes_to_load = all_planes.query('metal == "Na" & config == "unsym_0" & T1 in [230,300,473,600]')
macro_planes_data = hu.load_macro_planes(planes_to_load)
## values of the first variable in the loaded data
var_values = sorted(macro_planes_data[variable].unique())
## initialize a second variable
variable2 = None
## deduce a second variable
if len(macro_planes_data) > len(var_values) : ## a second variable is varied
for rv in [x for x in relevant_vars if x != 'z']:
if rv in macro_planes_data.columns and len(set(macro_planes_data[rv])) > 1 and rv != variable:
variable2 = rv
break
else: variable2 = 'config'
var2_values = sorted(macro_planes_data[variable2].unique())
## figure to plot the distributions
## do not share x-scale if each axes is at a different temperature ## sharex=(variable != 'T1'),
fig, axes = plt.subplots(1, len(var_values), sharey=True, sharex=rescale,
figsize=(3.2*len(var_values),3.75))
if len(var_values) < 2: axes = [axes]
## structure for new computations
new_sigmas_msd = list()
## loop over the values of the variables
for var, ax in zip(var_values, axes) :
for var2 in var2_values:
for i, plane in macro_planes_data.iterrows():
if plane[variable] == var and plane[variable2] == var2:
ph = plane.phase; mm = plane.metal; cn = plane.config
st = plane.stoich; ex = plane.exclude
T1 = plane.T1
# com = plane.com
N = len(plane.atoms.query('atom == @mm'))
cell = plane.cell
dcoms = list()
## load a pre-corrected result
cor = False
try:
cor_fname = glob.glob(plane.folder + f'/cm_{mm}-{st}-{ex}-{T1}K-{mm}-cor.csv')
if isinstance(cor_fname, list) and len(cor_fname) == 1 :
com = pd.read_csv(cor_fname[0]).set_index('time')
print(f'\nLoaded an Al-corrected CoM trajectory for {variable}={var}, {variable2}={var2} T1={T1}K')
cor = True
except : continue
dtt = com.index[1]-com.index[0]
## average multiple starts
for c, duration in enumerate(durations):
## enforce start_step >= duration
if enforce_random and start_step < duration : start_step = duration
if com.index.max() <= duration*4 :
dcoms.append(np.nan)
# print(f'Clipping long duration: {variable}={var}, {variable2}={var2} T1={T1}K')
continue
dr = com.loc[duration+com.index.min()::int(start_step/dtt)] - com.loc[:com.index.max()-duration:int(start_step/dtt)].values
# dr['dcom'] = (dr.x**2 + dr.y**2 + dr.z**2)/duration
dr['dcom'] = (dr.x**2 + dr.y**2 )/duration
all_dcom2 = dr.dcom.values / 4
dcoms.append(np.mean(all_dcom2))
## divide by st.dev. / plot D_CoM rescaled by its st.dev.
# print(f'{mm} {cn} {T1}K : mean {np.mean(all_dcom2):.3g} A2/ps, st.d. {np.std(all_dcom2):.3g} A2/ps (lag={duration}, step={start_step}), simple method')
if rescale : all_dcom2 = np.array(all_dcom2) / np.std(all_dcom2)
## plot stuff
if option == 'hist' :
## plot a histogram of D_com
dcom2_bins = np.linspace(min(all_dcom2)*(0.99 if min(all_dcom2) < 0 else 1.01), max(all_dcom2)*1.01,50)
hist, bin_edges = np.histogram(all_dcom2, bins=dcom2_bins, density=True)
bin_ctrs = (bin_edges[:-1] + bin_edges[1:])*0.5
if duration < 10 :
l = f'{duration:.1f} ps'
elif duration > 1e3 :
l = f'{duration/1e3:.1f} ns'
else :
l = f'{duration:.0f} ps'
ax.plot(bin_ctrs, hist, label=f'$t=${l}', linestyle='-', marker=None)
del com
ax.legend(title=f'{mm} {phases[ph]}, {T1}K')
## axes decorations
for ax in axes:
if rescale :
ax.plot([1,2,3],[0.1,0.1/np.e, 0.1/np.e**2], c='k', lw=0.4)
ax.set(xlim=[0,9.5], xlabel=r'$D_{CoM}(t;\Delta,\delta)/\sigma_{D_{CoM}}$')
axes[0].set(ylabel=r'$P(D_{CoM})$, $\sigma^{-1}_{D_{CoM}}$', yscale='log')
else :
ax.set(xscale='log', yscale='log', ylim=[0.5,None])
axes[0].set(ylabel=f'$P(D_{{CoM}}(t;\Delta,\delta={start_step}ps)$, ps $\AA^{{-2}}$')
ax.set(xlabel=r'$D_{CoM}(t;\Delta,\delta)$, $\AA^2$/ps')
## recolor lines
non_guide_lines = [x for x in ax.lines if x.get_label()[0] != '_']
colors = [batlow_even(j) for j in np.linspace(0, 1, len(non_guide_lines))]
for i, l in enumerate(non_guide_lines) : l.set(color=colors[i])
## remake legend with same title - but possibly new colors
ax.legend(title=ax.get_legend().get_title().get_text(), loc='upper right' if rescale else 'lower left')
fig.tight_layout(pad=0.5, w_pad=0.25)
# =============================================================================
# %% Figure SN : Crowding by Oi
# =============================================================================
# ===== load data =====
planes_to_load = all_planes.query('metal == "Na" & num_planes > 1 & phase == "beta" & T1 in [300,600] & config in ["120_1", "120_4"]')
planes_to_load = all_planes.query('metal == "Na" & num_planes > 1 & phase == "beta" & T1 == 300 & config in ["120_M1", "120_M4"]')
# ===== parameters =====
option = 'spectra' ## 'spectra' for conductivity, 'hist' for distributions of DCOM,
start_1 = 0 ## [ps] time at which to start sampling CoM MSD
start_step = 10 ## [ps] interval for sampling CoM MSD
durations = np.round(np.logspace(0.4,3.4),2) ## [ps] how long each sampling is. Time "Delta" from Barkai
enforce_random = False ## flag to enforce start_step >= duration
# ========== automatic things below this line ==========
variable = 'T1' ## hard-coded here for paper figures
variable2 = 'config'
macro_planes_data = hu.load_macro_planes(planes_to_load).sort_values(by=[variable2,variable])
## values of the first variable in the loaded data
var_values = sorted(macro_planes_data[variable].unique())
var2_values = sorted(macro_planes_data[variable2].unique())
colors = cycle([batlow_even(i) for i in np.linspace(0, 1, len(var2_values))])
# ===== conductivity =====
## figure to plot the distributions
## do not share x-scale if each axes is at a different temperature ## sharex=(variable != 'T1'),
fig, ax = plt.subplots(figsize=(5,3.75))
## loop over the values of the variables
for var in var_values :
for var2 in var2_values:
plane = macro_planes_data.query(f'{variable} == @var & {variable2} == @var2').iloc[0]
ph = plane.phase; mm = plane.metal; cn = plane.config
st = plane.stoich; ex = plane.exclude; T1 = plane.T1
N = len(plane.atoms.query('atom == @mm'))
cell = plane.cell
defect_type = [r'$O_i^{\prime\prime}$', r'$Mg_{Al}^\prime$'][('M' in ex) or (ph == 'bdp')]
dcoms = list()
## load a pre-corrected result
cor = False
try:
cor_fname = glob.glob(plane.folder + f'/cm_{mm}-{st}-{ex}-{T1}K-{mm}-cor.csv')
if isinstance(cor_fname, list) and len(cor_fname) == 1 :
com = pd.read_csv(cor_fname[0]).set_index('time')
print(f'\nLoaded an Al-corrected CoM trajectory for {variable}={var}, {variable2}={var2} T1={T1}K')
cor = True
except : com = None
## check that the trajectory is loaded, and is long enough
# if (com is None) or (com.index.max() <= duration) :
if com is None :
print(f'CoM trajectory too short. Found {com.index.max()} ps, need {duration} ps.\n')
continue
dtt = com.index[1]-com.index[0]
## average multiple starts
for duration in durations:
## enforce start_step >= duration
if enforce_random and start_step < duration : start_step = duration
if com.index.max() <= duration*4 :
dcoms.append(np.nan)
print(f'Clipping long duration: {variable}={var}, {variable2}={var2}')
continue
dr = com.loc[duration+com.index.min()::int(start_step/dtt)] - com.loc[:com.index.max()-duration:int(start_step/dtt)].values
# dr['dcom'] = (dr.x**2 + dr.y**2 + dr.z**2)/duration
dr['dcom'] = (dr.x**2 + dr.y**2 )/duration
all_dcom2 = dr.dcom.values * N / 4
dcoms.append(np.mean(all_dcom2))
sigts = sigma_T(N,cell,np.array(dcoms)*unit_conv)
l = f'{mm}$_{{{eval(st)/100:.2f}}}~\sigma$ from $r_{{CM}}^2 $' if len(macro_planes_data) == 1 and ph == 'beta' else f'{var2}{units[variable2]}'
l = f'{mm}$_{{{st}}}~\sigma$ from $r_{{CM}}^2 $' if len(macro_planes_data) == 1 and ph != 'beta' else l
l = f'{T1}K, {defect_type} {ex.replace("M","")}+'
ax.plot(1e12/durations, sigts/T1, next(markers)+next(lines),
label=l, mfc='none', c=next(colors))
del com
ax.legend(loc='lower right', title=f'{mm} {phases[ph]}-alumina')
ax.set(yscale='log', xscale='log', xlabel=r'$\nu=1/t$, Hz')
ax.set(ylabel=r'$\sigma_{xy}(t;\Delta)$, S/cm')
ax.set(xlim=[1e8,6e11])
fig.tight_layout(pad=0.5, h_pad=0.25)
# ===== r2 and C_D =====
dim = 2
## new figure with 2 panels
fig3, axes = plt.subplots(3,1, sharex=True, figsize=(4.5, 9.9))
## loop over the values of the variables
for var in var_values :
for var2 in var2_values:
plane = macro_planes_data.query(f'{variable} == @var & {variable2} == @var2').iloc[0]
ph = plane.phase; mm = plane.metal; cn = plane.config
st = plane.stoich; ex = plane.exclude; T1 = plane.T1
N = len(plane.atoms.query('atom == @mm'))
cell = plane.cell
defect_type = [r'$O_i^{\prime\prime}$', r'$Mg_{Al}^\prime$'][('M' in ex) or (ph == 'bdp')]
label = f'{T1}K, {defect_type} {ex.replace("M","")}+'
leg_title = f'{mm} {phases[ph]}-alumina'
this_color = next(colors); this_line = next(lines)
## load the 2D a2 file - leaving out the "split" files
a2_xys = glob.glob(plane.folder+f'/{mm}*{ex}-a2xy*{T1}K*ps.csv')
## load the a2 file if exactly one exists, else complain
if a2_xys :
if len(a2_xys) > 1 : a2_xys = sorted(a2_xys, reverse=True,
key = lambda x : eval(re.split('-|_| ',x)[-1][:-6]))
## read the a2 file - options for csv file
a2 = pd.read_csv(a2_xys[0], sep=',').set_index('time')
else : print(f'could not load a 2D a2 file for plane {mm} {cn} {T1}K')
## recalculate a2 for the right number of dimensions
a2.a2 = dim * a2.r4 / a2.r2 ** 2 / (dim+2) - 1
## load a short-time a2 file if using one
try :
a2s = pd.read_csv(plane.folder + f'/{mm}-{st}-{ex}-a2{"xy" if dim == 2 else ""}-{T1}K-10ps.csv').set_index('time')
a2s.a2 = dim * a2s.r4 / a2s.r2 ** 2 / (dim+2) - 1
except :
print(f'could not load a short a2 file for plane {mm} {cn} {T1}K')
a2s = None
## load Gs
## Plot points from van Hove function
try : gs = hu.load_gs(plane.folder+f'/{mm}-*-gs-{T1}K*ps.csv', 'cdt', radii=[[0.01,1.7],[0.01,4.6]])
except ValueError : print(f'something wrong with Gs for {mm} {cn} {T1}, check fractional/real computation.')
# === r2 ===
axes[0].plot(a2.r2.iloc[1:], label=label, c=this_color, ls=this_line)
axes[0].plot(a2s.r2.iloc[1:].loc[:0.02], c=this_color, ls=this_line)
axes[0].legend(title=leg_title, loc='upper left')
# === NGP ===
axes[1].plot(a2.a2, label=label, c=this_color, ls=this_line)
axes[1].plot(a2s.a2.iloc[1:].loc[:0.02], c=this_color, ls=this_line)
## create the interpolator for plotting little stars based on Gs
int_fun = interp1d(a2.index.values, a2.a2)
## Plot points from van Hove function
axes[1].plot(gs, int_fun(gs), marker='o', mfc='yellow', ls='', markersize=4,
mec='k', zorder=3, fillstyle='full')
axes[1].legend(title=leg_title, loc='lower left')
# === C_D ===
start = dt.now()
svals = np.logspace(-5, 2, 4000) # if not short else np.logspace(-6,5,3000)
## Laplace transform of C_D(t)
cds = hu.fluctuation_kernel(a2, svals, dim=dim)
try: cdt = hu.stehfest_inverse(cds, a2.index.values[1:-1])
except :
print(f'could not append inverse transform for {mm} {cn} {T1}')
break
cdt = | pd.DataFrame({'time':a2.index.values[1:-1],'cdt':cdt}) | pandas.DataFrame |
import numpy as np
import pandas as pd
from powersimdata.input.input_data import InputData
from powersimdata.tests.mock_scenario import MockScenario
from postreise.analyze.transmission import congestion
mock_plant = {
"plant_id": ["A", "B", "C", "D"],
"bus_id": [1, 1, 2, 3],
}
mock_bus = {
"bus_id": [1, 2, 3, 4],
"Pd": [5, 6, 30, 1],
"zone_id": [1, 1, 1, 2],
}
grid_attrs = {"plant": mock_plant, "bus": mock_bus}
def _check_return(expected_return, surplus):
assert isinstance(surplus, pd.Series)
msg = "Time series indices don't match"
np.testing.assert_array_equal(
surplus.index.to_numpy(), expected_return.index.to_numpy(), msg
)
msg = "Values don't match expected"
np.testing.assert_array_equal(surplus.to_numpy(), expected_return.to_numpy(), msg)
def test_calculate_congestion_surplus_single_time(monkeypatch):
"""Congested case from Kirschen & Strbac Section 5.3.2.4"""
def mock_get_data(*args, **kwargs):
return demand
# Override default InputData.get_data method to avoid profile csv lookup
monkeypatch.setattr(InputData, "get_data", mock_get_data)
demand = | pd.DataFrame({"UTC": ["t1"], 1: [410], 2: [0]}) | pandas.DataFrame |
import numpy as np
import pandas as pd
class RegressionTree:
def __init__(self, col_names):
self.train_data, self.test_data = RegressionTree.get_data(col_names)
full_data = | pd.concat([self.train_data, self.test_data]) | pandas.concat |
from IMLearn.utils import split_train_test
from IMLearn.learners.regressors import LinearRegression
from IMLearn.metrics import loss_functions as loss
from typing import NoReturn
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
import plotly.io as pio
pio.templates.default = "simple_white"
def proccess(matrix: pd.array):
exclude_feutrues=["sqft_lot15","sqft_lot","lat", "long","date","yr_renovated"] ##Features that are not relevant for the regression
matrix = matrix.loc[:, ~matrix.columns.isin(exclude_feutrues)]
matrix = matrix.apply(pd.to_numeric, errors="coerce")
matrix = matrix[matrix['yr_built'].values > 1000]
matrix = matrix[matrix['price'].values > 100]
matrix = matrix.dropna()
y=matrix['price']
X=matrix.loc[:,~matrix.columns.isin(["price"])]
return(X,y)
def load_data(filename: str):
"""
Load house prices dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (prices) - either as a single
DataFrame or a Tuple[DataFrame, Series]
"""
table = pd.read_csv(filename, index_col=0)
return proccess(table)
def feature_evaluation(X: pd.DataFrame, y: pd.Series, output_path: str = ".") -> NoReturn:
"""
Create scatter plot between each feature and the response.
- Plot title specifies feature name
- Plot title specifies Pearson Correlation between feature and response
- Plot saved under given folder with file name including feature name
Parameters
----------
X : DataFrame of shape (n_samples, n_features)
Design matrix of regression problem
y : array-like of shape (n_samples, )
Response vector to evaluate against
output_path: str (default ".")
Path to folder in which plots are saved
"""
Y, X= y.to_numpy(), X.to_numpy()
y_sigma= np.sqrt(np.var(y))
corr_lst = []
for i in range(X.shape[1]):
cov = np.cov(X[:, i],y)
xi_sigma = np.sqrt(np.array(np.var(X[:,i])))
corr_i=cov/(xi_sigma*y_sigma)
corr_lst.append(corr_i[0,1])
sqft_living=X[:,2]
go.Figure([go.Scatter(x=sqft_living, y=Y, mode='markers', name=r'$\price$'),],
layout=go.Layout(title=r"$\text{Price as function of sqft living}$",
xaxis_title="$\\text{ sqft}$",
yaxis_title="r$\\text{ price}$")).show()
yr_built=X[:,10]
go.Figure([go.Scatter(x=yr_built, y=Y, mode='markers', name=r'$\yr_built$'),],
layout=go.Layout(title=r"$\text{Price as function of Year of built}$",
xaxis_title="$\\text{ yr_built}$",
yaxis_title="r$\\text{ price}$")).show()
if __name__ == '__main__':
np.random.seed(0)
# Question 1 - Load and preprocessing of housing prices dataset
X, y = load_data('datasets/house_prices.csv')
# # Question 2 - Feature evaluation with respect to response
feature_evaluation(X, y)
# Question 3 - Split samples into training- and testing sets.
X = | pd.get_dummies(X, columns=["zipcode"]) | pandas.get_dummies |
"""
事前準備に
$ pip install pandas
$ pip install openpyxl
$ pip install xlrd
が必要
リファレンス
https://pandas.pydata.org/pandas-docs/stable/reference/index.html
"""
import pandas as pd
import openpyxl
excel_in_path1 = './data/excel_in_header_2sheet.xlsx'
print("********何も指定せず読み込み********")
# 何も指定しない場合は最初のシートになる
df = pd.read_excel(excel_in_path1)
print(df)
print(df.index) # index(行の項目名)
print(df.columns) # header (列の項目名)
print(df.values) # 値
print(df.dtypes) # 判定指定した型
print(type(df.at[0,'Note'])) # "OK" 文字列の場合<str>
print(type(df.at[3,'Note'])) # Nan 空白(セルの内容がない場合)の場合<float>扱い
print(type(df.at[4,'Note'])) # 12 数値の場合<int64>
print("********シートを指定して読み込み********")
df = pd.read_excel(excel_in_path1, sheet_name="Member")
print(df)
df = pd.read_excel(excel_in_path1, sheet_name="History")
print(df)
print("********dtypeを指定して読み込み********")
df = pd.read_excel(excel_in_path1, dtype={"Note": 'str'})
print(df)
print(df.dtypes) # 判定指定した型
print(type(df.at[0,'Note'])) # <str> 元々文字列なので変わらず
print(type(df.at[3,'Note'])) # <float> 空白の場合strを指定してもNaNなのでfloat扱いのようだ
print(type(df.at[4,'Note'])) # <str> 指定がない場合はint64扱いだったのがstrになった
print("NaN判定")
print(pd.isnull(df.at[0,'Note'])) # NaNではないのでFalse
print(pd.isnull(df.at[3,'Note'])) # NaNなのでTrue
#df.at[3,'Note'] = ""
#print(df.at[3,'Note']) #
#print(type(df.at[3,'Note'])) #
for index, note in enumerate(df['Note']):
if pd.isnull(note):
df.at[index, 'Note'] = "" # 空白に変更
print(type(df.at[index,'Note']))
print(df['Note'])
print("********Dateの扱い********")
# excelのdateは1900/1/1からのシリアル日時なのでPythonの日付クラスとは扱いが異なる・
import datetime
#参考
#https://docs.python.org/ja/3.8/library/datetime.html
df = pd.read_excel(excel_in_path1, sheet_name="Member")
birthdays = df['Birthday']
def convert_date_from_excel(excel_sirial):
# excelのシリアル値は1900/1/1を1としている、そして何故か1900/2/29が存在することになっているので-2する必要がある。
# そのため1900/2/29までのシリアル値の場合一日ずれるがその年代を使う確率は低いので運用上は問題ないと思う。
return datetime.date(1900,1,1) + datetime.timedelta(days=(excel_sirial -2))
#from_excel_date = lambda sirial : datetime.date(1899,12,31) + datetime.timedelta(days=sirial)
print(birthdays[0])
print(type(birthdays[0]))
pydate = convert_date_from_excel(int(birthdays[0]))
print(pydate)
print(type(pydate))
"""
# この書き方でも実際にDataFrameが書き換わるが、forで回しているリストを書き換えている為Warningが発生する
# SettingWithCopyWarning:
# A value is trying to be set on a copy of a slice from a DataFrame
for index, birthday in enumerate(birthdays):
pydate = convert_date_from_excel(birthday)
birthdays[index] = pydate
"""
pyBirthdays = []
for birthday in birthdays:
pydate = convert_date_from_excel(birthday)
pyBirthdays.append(pydate)
print(pyBirthdays)
birthdays.update(pd.Series(pyBirthdays))
# dateframe自体が入れ替わる
print(df)
print("********新しいexcelファイルを作成(新規作成)********")
df = pd.read_excel(excel_in_path1, sheet_name="Member")
print(df)
excel_out_default = './data/excel_out_defaut.xlsx'
# シート名はSheet1. headerとindexがついた状態で状態で出力される(Dateもそのままシリアル値で)
df.to_excel(excel_out_default)
excel_out_no_index = './data/excel_out_no_index.xlsx'
df.to_excel(excel_out_no_index, index=False)
excel_out_no_header = './data/excel_out_no_header.xlsx'
df.to_excel(excel_out_no_header, header=False)
excel_out_sheet_name = './data/excel_out_sheet_name.xlsx'
df.to_excel(excel_out_sheet_name, sheet_name='test_sheet')
excel_out_change_start_cell = './data/excel_out_change_start_cell.xlsx'
df.to_excel(excel_out_change_start_cell, startrow=3, startcol= 2)
print("********複数シートの書き込み********")
df1 = pd.read_excel(excel_in_path1, sheet_name="Member")
df2 = pd.read_excel(excel_in_path1, sheet_name="History")
excel_out_two_sheets = './data/excel_out_two_sheets.xlsx'
with pd.ExcelWriter(excel_out_two_sheets) as ew:
df1.to_excel(ew, sheet_name='Member_copy', index=False)
df2.to_excel(ew, sheet_name='History_copy', index=False)
print("********dateのフォーマット********")
df = | pd.read_excel(excel_in_path1, sheet_name="Member") | pandas.read_excel |
import re
import pandas as pd
import numpy as np
from datasets.constants import signal_types
from datasets.sources.source_base import SourceBase
import logging
logger = logging.getLogger(__name__)
class EverionSource(SourceBase):
FILES = {
'signals': r'^CsvData_signals_EV-[A-Z0-9-]{14}\.csv$',
'sensors': r'^CsvData_sensor_data_EV-[A-Z0-9-]{14}\.csv$',
'features': r'^CsvData_features_EV-[A-Z0-9-]{14}\.csv$',
# 'aggregates': r'^CsvData_aggregates_EV-[A-Z0-9-]{14}\.csv$',
# 'analytics': r'^CsvData_analytics_events_EV-[A-Z0-9-]{14}\.csv$',
# 'events': r'^CsvData_everion_events_EV-[A-Z0-9-]{14}\.csv$',
}
META = {
'inter_pulse_interval': {
'type': signal_types.RR_INTERVAL,
'unit': 'Milliseconds'
},
'heart_rate': {
'unit': 'BPM'
},
'heart_rate_variability': {
'unit': 'Milliseconds'
},
'gsr_electrode': {
'unit': 'Microsiemens'
},
'ctemp': {
'unit': 'Celsius'
},
'temperature_object': {
'unit': 'Celsius'
},
'temperature_barometer': {
'unit': 'Celsius'
},
'temperature_local': {
'unit': 'Celsius'
},
'barometer_pressure': {
'unit': 'Millibar'
},
'respiration_rate': {
'unit': 'BPM'
},
'oxygen_saturation': {
'unit': 'Percent'
},
}
SIGNAL_TAGS = {
6: ['heart_rate'],
7: ['oxygen_saturation'],
#8: ['perfusion_index'],
#9: ['motion_activity'],
#10: ['activity_classification'],
11: ['heart_rate_variability', 'heart_rate_variability_quality'],
12: ['respiration_rate'],
#13: ['energy'],
15: ['ctemp'],
19: ['temperature_local'],
20: ['barometer_pressure'],
21: ['gsr_electrode'],
#22: ['health_score'],
#23: ['relax_stress_intensity_score'],
#24: ['sleep_quality_index_score'],
#25: ['training_effect_score'],
#26: ['activity_score'],
#66: ['richness_score'],
#68: ['heart_rate_quality'],
#69: ['oxygen_saturation_quality'],
#70: ['blood_pulse_wave', 'blood_pulse_wave_quality'],
#71: ['number_of_steps'],
#72: ['activity_classification_quality'],
#73: ['energy_quality'],
#74: ['heart_rate_variability_quality'],
#75: ['respiration_rate_quality'],
#76: ['ctemp_quality'],
118: ['temperature_object'],
119: ['temperature_barometer'],
#133: ['perfusion_index_quality'],
#134: ['blood_pulse_wave_quality']
}
SENSOR_TAGS = {
80: ['led1_data'],
81: ['led2_data'],
82: ['led3_data'],
83: ['led4_data'],
84: ['accx_data'],
85: ['accy_data'],
86: ['accz_data'],
#88: ['led2_current'],
#89: ['led3_current'],
#90: ['led4_current'],
#91: ['current_offset'],
#92: ['compressed_data']
}
FEATURE_TAGS = {
14: ['inter_pulse_interval', 'inter_pulse_interval_deviation'],
#17: ['pis'],
#18: ['pid'],
#77: ['inter_pulse_deviation'],
#78: ['pis_quality'],
#79: ['pid_quality']
}
@classmethod
def name(cls):
return "Biovotion Everion"
@classmethod
def fileOptions(cls):
return [
{
'label': 'Signals Data',
'pattern': '^CsvData_signals_EV-[A-Z0-9-]{14}\.csv$',
'required': True,
'multiple': False,
'timestamp': False
},
{
'label': 'Sensor Data',
'pattern': '^CsvData_sensor_data_EV-[A-Z0-9-]{14}\.csv$',
'required': False,
'multiple': False,
'timestamp': False
},
{
'label': 'Features Data',
'pattern': '^CsvData_features_EV-[A-Z0-9-]{14}\.csv$',
'required': False,
'multiple': False,
'timestamp': False
},
]
@staticmethod
def extend_values(df, dtype='float64'):
values_extended = df['values'].str.extract(r'(?P<value>[\d.]+);?(?P<value2>[\d.]+)?') \
.astype({ 'value': dtype, 'value2': dtype }, copy=False)
df_extended = pd.concat([df, values_extended], axis=1)
df_extended.drop(columns='values', inplace=True)
return df_extended
@staticmethod
def get_dataframe_iterator(path, cols=['count', 'tag', 'time', 'values']):
parse_dates = ['time'] if 'time' in cols else None
return pd.read_csv(
path,
usecols=cols,
dtype={
'count': 'uint32',
'streamType': 'int8',
'tag': 'int8',
'values': 'object'
},
parse_dates=parse_dates,
date_parser=lambda x: pd.to_datetime(x, unit='s', utc=True),
chunksize=100000
)
@staticmethod
def split_data(df, predicate):
df = df.copy()
df_split = []
split_at = df[predicate(df)]['count'].unique()
for index, count in enumerate(split_at):
selected = df['count'] <= count
if index > 0:
selected = selected & (df['count'] > split_at[index - 1])
df_split.append(df[selected])
# If it was splitted append last segment, else whole dataframe
if split_at.size == 0:
df_split.append(df)
else:
df_split.append(df[df['count'] > split_at[-1]])
assert np.sum([len(part) for part in df_split]) == len(df)
return [part for part in df_split if not part.empty]
@classmethod
def create_time_lookup_for_ibi(cls, path, max_deviation=15, threshold=600):
df = pd.DataFrame()
df_iterator = cls.get_dataframe_iterator(path, ['tag', 'count', 'time', 'values'])
# append data from csv in chunks and drop duplicates
for chunk in df_iterator:
chunk.drop_duplicates(subset=['count', 'tag'], inplace=True)
chunk = chunk[chunk['tag'] == 14]
chunk = cls.extend_values(chunk)
chunk = chunk[chunk['value2'] <= max_deviation]
chunk.drop('value2', axis='columns', inplace=True)
chunk['value'] = chunk['value'].astype('uint16')
df = pd.concat([df, chunk], sort=False)
df.drop_duplicates(subset=['count', 'tag'], inplace=True)
df.sort_values(['tag', 'count'], inplace=True)
df.reset_index(drop=True, inplace=True)
# split dataframes in consecutive parts
df_split = cls.split_data(
df,
lambda x: x['time'].shift(-1, fill_value=x['time'].max()) - x['time'] - pd.to_timedelta(x['value'].shift(-1, fill_value=0), 'ms') > pd.to_timedelta(x['value'] + threshold, 'ms')
)
# calculate correct time and concatenate split dataframes
df = pd.DataFrame()
for each in df_split:
start_time = each['time'].min()
each['seconds'] = pd.Series(each['value'].cumsum(), dtype='uint32')
each['seconds'] = each['seconds'].shift(1, fill_value=0)
each['seconds'] = pd.to_timedelta(each['seconds'], unit='ms')
each['time'] = each['seconds'] + start_time
each.drop(['seconds'], axis='columns', inplace=True)
df = pd.concat([df, each])
df.reset_index(drop=True, inplace=True)
return df[['tag', 'count', 'time']]
@classmethod
def create_time_lookup(cls, path, tag):
if isinstance(tag, int):
tag = [tag]
elif not isinstance(tag, list):
raise TypeError(f'Expected tag to be int or list, but got {type(tag)}')
includes_interbeat_interval = 14 in tag
if includes_interbeat_interval:
tag = [value for value in tag if value != 14]
df = pd.DataFrame(columns=['tag', 'count', 'time'])
if tag:
df_iterator = cls.get_dataframe_iterator(path, ['tag', 'count', 'time'])
# append data from csv in chunks and drop duplicates
for chunk in df_iterator:
chunk.drop_duplicates(subset=['count', 'tag'], inplace=True)
subset = chunk['tag'].isin(tag)
df = | pd.concat([df, chunk[subset]], sort=False) | pandas.concat |
import itertools
import warnings
from typing import Callable
from typing import Optional
import numpy as np
import pandas as pd
from sid.shared import boolean_choices
from sid.validation import validate_return_is_series_or_ndarray
def perform_rapid_tests(
date: pd.Timestamp,
states: pd.DataFrame,
params: pd.DataFrame,
rapid_test_models: Optional[Callable],
contacts: pd.DataFrame,
seed: itertools.count,
) -> pd.DataFrame:
"""Perform testing with rapid tests."""
if rapid_test_models:
receives_rapid_test = _compute_who_receives_rapid_tests(
date=date,
states=states,
params=params,
rapid_test_models=rapid_test_models,
contacts=contacts,
seed=seed,
)
is_tested_positive = _sample_test_outcome(
states, receives_rapid_test, params, seed
)
states = _update_states_with_rapid_tests_outcomes(
states, receives_rapid_test, is_tested_positive
)
return states
def apply_reactions_to_rapid_tests(
date,
states,
params,
rapid_test_reaction_models,
contacts,
seed,
):
"""Apply reactions to rapid_tests."""
if rapid_test_reaction_models:
for model in rapid_test_reaction_models.values():
loc = model.get("loc", params.index)
func = model["model"]
if model["start"] <= date <= model["end"]:
contacts = func(
contacts=contacts,
states=states,
params=params.loc[loc],
seed=next(seed),
)
return contacts
def _compute_who_receives_rapid_tests(
date, states, params, rapid_test_models, contacts, seed
):
"""Compute who receives rapid tests.
We loop over all rapid tests models and collect newly allocated rapid tests in
``receives_rapid_test``. A copy of the series is passed to each rapid test model so
that the user is aware of who is tested, but cannot alter the existing assignment.
"""
receives_rapid_test = pd.Series(index=states.index, data=False)
for name, model in rapid_test_models.items():
loc = model.get("loc", params.index)
func = model["model"]
if model["start"] <= date <= model["end"]:
new_receives_rapid_test = func(
receives_rapid_test=receives_rapid_test.copy(deep=True),
states=states,
params=params.loc[loc],
contacts=contacts,
seed=next(seed),
)
new_receives_rapid_test = validate_return_is_series_or_ndarray(
new_receives_rapid_test, name, "rapid_test_models", states.index
)
receives_rapid_test.loc[new_receives_rapid_test] = True
return receives_rapid_test
def _sample_test_outcome(states, receives_rapid_test, params, seed):
"""Sample the outcomes of the rapid tests.
For those who are infectious, sensitivity gives us the probability that they are
also tested positive.
For those who are not infectious, 1 - specificity gives us the probability that they
are falsely tested positive.
"""
np.random.seed(next(seed))
is_tested_positive = pd.Series(index=states.index, data=False)
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", message="indexing past lexsort depth may impact performance."
)
sensitivity_params = params.loc[("rapid_test", "sensitivity"), "value"]
infected = states["cd_infectious_true"] >= -10
receives_test_and_is_infected = infected & receives_rapid_test
sensitivity = _create_sensitivity(
states=states[receives_test_and_is_infected],
sensitivity_params=sensitivity_params,
)
is_truly_positive = boolean_choices(sensitivity)
is_tested_positive.loc[receives_test_and_is_infected] = is_truly_positive
specificity = params.loc[("rapid_test", "specificity", "specificity"), "value"]
uninfected_test_receivers = ~infected & receives_rapid_test
p_false_positive = np.full(uninfected_test_receivers.sum(), 1 - specificity)
is_falsely_positive = boolean_choices(p_false_positive)
is_tested_positive.loc[uninfected_test_receivers] = is_falsely_positive
return is_tested_positive
def _create_sensitivity(states, sensitivity_params):
"""Create the sensitivity se"""
sensitivity = | pd.Series(np.nan, index=states.index) | pandas.Series |
import json
import requests
import pandas as pd
def get_collection(code):
url = 'http://sweetgum.nybg.org/science/api/v1/institutions/' + code
collection = requests.get(url)
if collection.status_code == 200:
collections = json.loads(collection.text)
collections = {'code' : collections['code'], 'name' : collections['organization'], 'website' : collections['contact']['webUrl']}
df = | pd.DataFrame(collections, index=[0]) | pandas.DataFrame |
#
# Copyright 2020 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Testing out the datacompy functionality
"""
import io
import logging
import sys
from datetime import datetime
from decimal import Decimal
from unittest import mock
import numpy as np
import pandas as pd
import pytest
from pandas.util.testing import assert_series_equal
from pytest import raises
import datacompy
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
def test_numeric_columns_equal_abs():
data = """a|b|expected
1|1|True
2|2.1|True
3|4|False
4|NULL|False
NULL|4|False
NULL|NULL|True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, abs_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_numeric_columns_equal_rel():
data = """a|b|expected
1|1|True
2|2.1|True
3|4|False
4|NULL|False
NULL|4|False
NULL|NULL|True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_string_columns_equal():
data = """a|b|expected
Hi|Hi|True
Yo|Yo|True
Hey|Hey |False
résumé|resume|False
résumé|résumé|True
💩|💩|True
💩|🤔|False
| |True
| |False
datacompy|DataComPy|False
something||False
|something|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_string_columns_equal_with_ignore_spaces():
data = """a|b|expected
Hi|Hi|True
Yo|Yo|True
Hey|Hey |True
résumé|resume|False
résumé|résumé|True
💩|💩|True
💩|🤔|False
| |True
| |True
datacompy|DataComPy|False
something||False
|something|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_string_columns_equal_with_ignore_spaces_and_case():
data = """a|b|expected
Hi|Hi|True
Yo|Yo|True
Hey|Hey |True
résumé|resume|False
résumé|résumé|True
💩|💩|True
💩|🤔|False
| |True
| |True
datacompy|DataComPy|True
something||False
|something|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(
df.a, df.b, rel_tol=0.2, ignore_spaces=True, ignore_case=True
)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_date_columns_equal():
data = """a|b|expected
2017-01-01|2017-01-01|True
2017-01-02|2017-01-02|True
2017-10-01|2017-10-10|False
2017-01-01||False
|2017-01-01|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
# First compare just the strings
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
| assert_series_equal(expect_out, actual_out, check_names=False) | pandas.util.testing.assert_series_equal |
# -*- coding: utf-8 -*-
"""
author: zengbin93
email: <EMAIL>
create_dt: 2021/12/13 17:39
describe: 事件性能分析
"""
import os
import os.path
import traceback
import pandas as pd
import matplotlib.pyplot as plt
from datetime import timedelta, datetime
from tqdm import tqdm
from typing import Callable, List
from czsc.objects import Factor
from czsc.data.ts_cache import TsDataCache
from czsc.sensors.utils import generate_signals
from czsc.utils import io
from czsc.utils import WordWriter
plt.style.use('ggplot')
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
class FactorsSensor:
"""因子(Factor)感应器:分析各种信号和因子的表现"""
def __init__(self,
results_path: str,
sdt: str,
edt: str,
dc: TsDataCache,
base_freq: str,
freqs: List[str],
get_signals: Callable,
get_factors: Callable):
self.name = self.__class__.__name__
self.version = "V20211213"
os.makedirs(results_path, exist_ok=True)
self.results_path = results_path
self.sdt = sdt
self.edt = edt
self.get_signals = get_signals
self.get_factors = get_factors
self.factors: List[Factor] = get_factors()
self.base_freq = base_freq
self.freqs = freqs
self.file_docx = os.path.join(results_path, f'factors_sensor_{sdt}_{edt}.docx')
self.writer = WordWriter(self.file_docx)
self.dc = dc
self.betas = ['000001.SH', '000016.SH', '000905.SH', '000300.SH', '399001.SZ', '399006.SZ']
self.file_sf = os.path.join(results_path, f'factors_{sdt}_{edt}.pkl')
self.signals_path = os.path.join(results_path, 'signals')
os.makedirs(self.signals_path, exist_ok=True)
if os.path.exists(self.file_sf):
self.sf = io.read_pkl(self.file_sf)
else:
self.sf = self.get_stock_factors()
io.save_pkl(self.sf, self.file_sf)
def get_share_factors(self, ts_code: str, name: str):
"""获取单个标的因子信息"""
dc = self.dc
sdt = self.sdt
edt = self.edt
factors = self.factors
start_date = pd.to_datetime(self.sdt) - timedelta(days=3000)
bars = dc.pro_bar(ts_code=ts_code, start_date=start_date, end_date=edt, freq='D', asset="E", raw_bar=True)
n_bars = dc.pro_bar(ts_code=ts_code, start_date=sdt, end_date=edt, freq='D', asset="E", raw_bar=False)
nb_dicts = {row['trade_date'].strftime("%Y%m%d"): row for row in n_bars.to_dict("records")}
signals = generate_signals(bars, sdt, self.base_freq, self.freqs, self.get_signals)
results = []
for s in signals:
row = {'name': name, 'ts_code': ts_code}
for factor in factors:
row[factor.name] = factor.is_match(s)
nb_info = nb_dicts.get(s['dt'].strftime("%Y%m%d"), None)
row.update(nb_info)
results.append(row)
df_res = | pd.DataFrame(results) | pandas.DataFrame |
import heapq
import pandas as pd
import numpy as np
from sklearn.cluster import KMeans
def calculate_rating(a, b, c, b1, c1):
rating = np.mean(a)
user1 = calculate_consin(a, b) * (b1 - rating)
user2 = calculate_consin(a, c) * (c1 - rating)
rating = rating + ((user1 + user2) / (calculate_consin(a, b) + calculate_consin(a, c)))
return rating
def calculate_consin(a, b):
temp = np.linalg.norm(a) * np.linalg.norm(b)
if temp == 0:
return 0
else:
return np.dot(a, b) / temp
def data_preprocess(path):
df = pd.read_csv(path)
users = dict()
for row in df.iterrows():
if row[1]['userid'] not in users:
users[row[1]['userid']] = dict()
if row[1]['song_id'] not in users[row[1]['userid']]:
users[row[1]['userid']][row[1]['song_id']] = row[1]['song_score']
return pd.DataFrame(users).T, users.keys()
def recommend(path, user_id):
df, users = data_preprocess(path)
df = df.fillna(0)
songs = df.columns.values
users = list(users)
data = df.to_numpy()
estimator = KMeans(n_clusters=5)
results = estimator.fit(data)
labels = results.labels_
index = users.index(user_id)
flag = labels[index]
similarity = list()
for i, value in enumerate(labels):
if i == index or flag != labels[index]:
continue
else:
temp = calculate_consin(data[index], data[i])
if temp == 1:
similarity.append(0)
else:
similarity.append(temp)
max_num_index_list = map(similarity.index, heapq.nlargest(2, similarity))
max_num_index_list = list(max_num_index_list)
candidates = list()
for col in range(len(songs)):
if data[max_num_index_list[0]][col] != 0 or data[max_num_index_list[1]][col] != 0 and data[index][col] == 0:
candidates.append(col)
ratings = list()
lut = list()
for i, song_index in enumerate(candidates):
lut.append(song_index)
ratings.append(calculate_rating(data[index], data[max_num_index_list[0]], data[max_num_index_list[1]]
, data[max_num_index_list[0]][song_index],
data[max_num_index_list[1]][song_index]))
recommend_song_list = map(ratings.index, heapq.nlargest(5, ratings))
recommend_song_list = set(recommend_song_list)
recommend_list = list()
for song_index in recommend_song_list:
recommend_list.append(songs[lut[song_index]])
df = | pd.read_csv(path) | pandas.read_csv |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.